prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Representation of the standard COCO json dataset format.
When working with a new dataset, we strongly suggest to convert the dataset into
the COCO json format and use the existing code; it is not recommended to write
code to support new dataset formats.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import cPickle as pickle
import logging
import numpy as np
import os
import scipy.sparse
from PIL import Image
import cv2
# Must happen before importing COCO API (which imports matplotlib)
import detectron.utils.env as envu
envu.set_up_matplotlib()
# COCO API
from pycocotools import mask as COCOmask
from pycocotools.coco import COCO
from detectron.core.config import cfg
from detectron.utils.timer import Timer
import detectron.datasets.dataset_catalog as dataset_catalog
import detectron.utils.boxes as box_utils
import detectron.utils.segms as segm_utils
from detectron.datasets.dataset_catalog_LIP import ANN_FN
from detectron.datasets.dataset_catalog_LIP import DATASETS
from detectron.datasets.dataset_catalog_LIP import IM_DIR
from detectron.datasets.dataset_catalog_LIP import IM_IDS
from detectron.datasets.dataset_catalog_LIP import IM_PREFIX
logger = logging.getLogger(__name__)
class JsonDataset(object):
"""A class representing a COCO json dataset."""
def __init__(self, name):
assert name in DATASETS.keys(), \
'Unknown dataset name: {}'.format(name)
assert os.path.exists(DATASETS[name][IM_DIR]), \
'Image directory \'{}\' not found'.format(DATASETS[name][IM_DIR])
if 'train' in name:
assert os.path.exists(DATASETS[name][ANN_FN]), \
'Annotation file \'{}\' not found'.format(DATASETS[name][ANN_FN])
assert os.path.exists(DATASETS[name][IM_IDS]), \
'im_ids file \'{}\' not found'.format(DATASETS[name][IM_IDS])
logger.debug('Creating: {}'.format(name))
self.name = name
self.dataset = name.split('_')[-1] # 'train' or 'val'
self.image_directory = DATASETS[name][IM_DIR]
self.image_prefix = (
'' if IM_PREFIX not in DATASETS[name] else DATASETS[name][IM_PREFIX]
)
#self.COCO = COCO(DATASETS[name][ANN_FN])
self.debug_timer = Timer()
# Set up dataset classes
#category_ids = self.COCO.getCatIds()
if 'ATR' in self.name:
category_ids = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]
categories = ['background', 'hat', 'hair', 'sunglasses', 'upperclothes',
'skirt', 'pants', 'dress', 'belt', 'leftShoes', 'right-shoe', 'face',
'left-leg', 'right-leg', 'left-arm', 'right-arm', 'bag', 'scarf']
else:
category_ids = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]
#categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
categories = ['background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes',
'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt',
'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe','rightShoe']
if cfg.Ignore_left: # 14,15, 16,17, 18,19
if 'ATR' in self.name:
categories = ['background', 'hat', 'hair', 'sunglasses', 'upperclothes',
'skirt', 'pants', 'dress', 'belt', 'shoe', 'face', 'leg', 'arm', 'bag', 'scarf']
category_ids = range(len(categories))
self.category_id_to_Ignore_left_id = {
v: i
for i, v in enumerate(range(18))
}
self.category_id_to_Ignore_left_id[10] = 9
self.category_id_to_Ignore_left_id[11] = 10
self.category_id_to_Ignore_left_id[12] = 11
self.category_id_to_Ignore_left_id[13] = 11
self.category_id_to_Ignore_left_id[14] = 12
self.category_id_to_Ignore_left_id[15] = 12
self.category_id_to_Ignore_left_id[16] = 13
self.category_id_to_Ignore_left_id[17] = 14
else:
categories = ['background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes',
'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt',
'face', 'Arm', 'Leg', 'Shoe']
category_ids = range(len(categories))
self.category_id_to_Ignore_left_id = {
v: i
for i, v in enumerate(range(20))
}
self.category_id_to_Ignore_left_id[15] = 14
self.category_id_to_Ignore_left_id[16] = 15
self.category_id_to_Ignore_left_id[17] = 15
self.category_id_to_Ignore_left_id[18] = 16
self.category_id_to_Ignore_left_id[19] = 16
self.category_to_id_map = dict(zip(categories, category_ids))
self.classes = categories
self.num_classes = len(self.classes)
logger.info('classes: {}'.format(self.classes))
logger.info('num_classes: {}'.format(self.num_classes))
self.json_category_id_to_contiguous_id = {
v: i
for i, v in enumerate(category_ids)
}
self.contiguous_category_id_to_json_id = {
v: k
for k, v in self.json_category_id_to_contiguous_id.items()
}
self._init_keypoints()
def get_roidb(
self,
gt=False,
proposal_file=None,
min_proposal_size=2,
proposal_limit=-1,
crowd_filter_thresh=0
):
"""Return an roidb corresponding to the json dataset. Optionally:
- include ground truth boxes in the roidb
- add proposals specified in a proposals file
- filter proposals based on a minimum side length
- filter proposals that intersect with crowd regions
"""
assert gt is True or crowd_filter_thresh == 0, \
'Crowd filter threshold must be 0 if ground-truth annotations ' \
'are not included.'
roidb = self._load_lip() # load data when train or test
if gt:
# include gt object annotations
self.debug_timer.tic()
self.load_lip_annotations(roidb)
logger.debug(
'load_lip_annotations took {:.3f}s'.
format(self.debug_timer.toc(average=False))
)
#############################################
if proposal_file is not None:
# Include proposals from a file
self.debug_timer.tic()
self._add_proposals_from_file(
roidb, proposal_file, min_proposal_size, proposal_limit,
crowd_filter_thresh
)
logger.debug(
'_add_proposals_from_file took {:.3f}s'.
format(self.debug_timer.toc(average=False))
)
_add_class_assignments(roidb)
return roidb
def _load_lip(self):
""" gao: load train or test dadaset of LIP"""
imglist_file = DATASETS[self.name][IM_IDS]
assert os.path.exists(imglist_file), 'path does not exist: {}'.format(imglist_file)
imgids_list = []
with open(imglist_file) as f:
for line in f.readlines():
if len(line)>1:
imgids_list.append(line.strip())
# mistake label id
if 'LIP_train' in self.name:
mistakelist_file = os.path.join(os.path.dirname(imglist_file), 'train_mistake_id.txt')
assert os.path.exists(mistakelist_file), 'path does not exist: {}'.format(mistakelist_file)
im_mistake_ids = []
with open(mistakelist_file) as f:
for line in f.readlines():
if len(line)>1:
im_mistake_ids.append(line.strip())
roidb = []
for i in range(len(imgids_list)):
if 'LIP_train' in self.name:
if imgids_list[i] in im_mistake_ids:
continue
roi_entry = dict()
roi_entry['dataset'] = self
roi_entry['id'] = imgids_list[i]
roi_entry['image'] = os.path.join(DATASETS[self.name][IM_DIR], imgids_list[i] + '.jpg')
assert os.path.exists(roi_entry['image']), 'image path does not exist: {}'.format(roi_entry['images'])
img = cv2.imread(roi_entry['image'])
size = img.shape
roi_entry['height'] = size[0]
roi_entry['width'] = size[1]
roi_entry['flipped'] = False
roi_entry['has_visible_keypoints'] = False
roi_entry['boxes'] = np.empty((0,4), dtype=np.float32)
roi_entry['gt_classes'] = np.empty((0), dtype=np.int32)
roi_entry['box_to_gt_ind_map'] = np.empty((0), dtype=np.int32)
roi_entry['gt_overlaps'] = scipy.sparse.csr_matrix(
np.empty((0,self.num_classes), dtype=np.float32)
)
roi_entry['is_crowd'] = np.empty((0), dtype=np.bool)
roi_entry['seg_areas'] = np.empty((0), dtype=np.float32)
roidb.append(roi_entry)
return roidb
def load_lip_annotations(self,roidb):
# load from label of png
for i in range(len(roidb)):
roi_entry = roidb[i]
if roi_entry['id'] in ['27969_199668']:
continue
#print(i, roi_entry['id'])
boxes, gt_classes, ins_id, label_path, gt_overlaps = self.load_from_seg(roi_entry['id'])
if boxes.size == 0:
total_num_objs = 0
boxes = np.zeros((total_num_objs, 4), dtype=np.uint16)
gt_overlaps = np.zeros((total_num_objs, self.num_classes), dtype=np.float32)
gt_classes = np.zeros((total_num_objs, ), dtype=np.int32)
roi_entry['boxes'] = boxes
roi_entry['gt_classes'] = gt_classes
roi_entry['box_to_gt_ind_map'] = ins_id
roi_entry['ins_seg'] = label_path # full path of label png
# im_label = Image.open(label_path)
# pixel = list(im_label.getdata())
# im_label = np.array(pixel).reshape([im_label.size[1], im_label.size[0]])
# roi_entry['ins_seg'] = im_label
roi_entry['gt_overlaps'] = gt_overlaps
roi_entry['gt_overlaps'] = scipy.sparse.csr_matrix(roi_entry['gt_overlaps'])
#roi_entry['max_overlaps'] = gt_overlaps.max(axis=1)
#roi_entry['max_class'] = gt_overlaps.argmax(axis=1)
roi_entry['is_crowd'] = np.zeros((boxes.shape[0]), dtype=np.bool)
#roi_entry['has_visible_keypoints'] = False
roi_entry['seg_areas'] = np.zeros((boxes.shape[0]), dtype=np.float32)
roi_entry['seg_areas'][:] = 50
#roi_entry['gt_boxes'] = boxes
#roidb.append(roi_entry)
#return roidb
def load_from_seg(self,seg_gt_id):
""" gao: load from seg label png """
seg_gt = os.path.join(DATASETS[self.name][ANN_FN], seg_gt_id + '.png')
assert os.path.exists(seg_gt), 'path does not exist: {}'.format(seg_gt)
im = Image.open(seg_gt)
pixel = list(im.getdata())
pixel = np.array(pixel).reshape([im.size[1], im.size[0]])
gt_classes = []
boxes = []
box_to_gt_ind_map = []
gt_overlaps = []
ins_id = 0
for c in range(1,self.num_classes):
px = np.where(pixel == c)
if len(px[0])==0:
continue
x_min = np.min(px[1])
y_min = np.min(px[0])
x_max = np.max(px[1])
y_max = np.max(px[0])
if x_max - x_min <= 1 or y_max - y_min <= 1:
continue
if cfg.Ignore_left:
c = self.category_id_to_Ignore_left_id[c]
# gt_classes.append(c)
# boxes.append([x_min, y_min, x_max, y_max])
# box_to_gt_ind_map.append(ins_id)
# ins_id += 1
# overlaps = np.zeros(self.num_classes,dtype=np.float32)
# overlaps[c] = 1
# gt_overlaps.append(overlaps)
if (c==3 or c==8) and 'LIP' in cfg.TRAIN.DATASETS[0]: # has gloves or socks
box,gt_class,box_to_gt_ind,gt_overlap,ins_id = _get_socks_glove(pixel,c,ins_id,self.num_classes)
for i in range(len(box)):
boxes.append(box[i])
gt_classes.append(gt_class[i])
box_to_gt_ind_map.append(box_to_gt_ind[i])
gt_overlaps.append(gt_overlap[i])
else:
gt_classes.append(c)
boxes.append([x_min, y_min, x_max, y_max])
box_to_gt_ind_map.append(ins_id)
ins_id += 1
overlaps = np.zeros(self.num_classes)
overlaps[c] = 1
gt_overlaps.append(overlaps)
return np.asarray(boxes, dtype=np.float32),
|
np.asarray(gt_classes,dtype=np.int32)
|
numpy.asarray
|
import scanpy as sc
import pandas as pd
import numpy as np
import math
from scipy.sparse import csr_matrix, find
import matplotlib.pyplot as plt
import time
from scipy.stats import norm
from joblib import Parallel, delayed
def which(x):
n = len(x)
m = sum(x)
y = np.zeros(m)
j = -1
for i in range(0, n):
if x[i]:
j = j+1
y[j] = i
y = y.astype(int)
return y
def sparsetoid(sp_mtx, x = 0, y = 0):
(id_x, id_y) = sp_mtx.nonzero()
if len(id_x)*len(id_y) == 0:
v = np.array([])
else:
v = np.asarray(sp_mtx[id_x, id_y])[0]
d = {'x_id': id_x+x, 'y_id': id_y+y, 'value': v}
df = pd.DataFrame(data=d)
return df
def id_concat(pair):
frames = [pair[0], pair[1]]
result = pd.concat(frames)
return result
def idtosparse(df, G1=None, G2=None):
if G1 is None:
G1 = max(df.x_id) + 1
if G2 is None:
G2 = max(df.y_id) + 1
sp_mtx = csr_matrix((df.value, (df.x_id, df.y_id)), shape=(G1, G2))
return sp_mtx
def csntoflat(csn_mat):
if type(csn_mat) is list:
n2 = len(csn_mat)
n1 = csn_mat[0].shape[0]
csn_flat = np.zeros((int(n1*(n1-1)/2), n2))
k = 0
for i in range(0, n1-1):
for j in range(i+1, n1):
csn_flat[k, :] = np.asarray([item[i,j] for item in csn_mat])
k = k + 1
else:
(n1, n11, n2) = csn_mat.shape
if n1 != n11:
print('dimension not match!')
return
csn_flat = np.zeros((int(n1*(n1-1)/2), n2))
k = 0
for i in range(0, n1-1):
for j in range(i+1, n1):
csn_flat[k, :] = csn[i, j, :]
k = k + 1
return csn_flat
def csn(data_full, g_mtx = None, wd_q = 0.1, dev = True, md = 1, iteration = False, fuzzy = False, ncore = 4):
(n1, n2) = data_full.shape
eps = np.finfo(float).eps
if g_mtx is None:
g_mtx = np.ones((n1, n1)) - np.tri(n1)
zero_id = np.where(data_full.sum(axis = 1)==0)
g_mtx[zero_id, :] = 0
g_mtx[:, zero_id] = 0
#csn = [[[0 for col in range(n1)]for row in range(n1)] for x in range(n2)]
#csn = np.zeros((n1, n1, n2))
(I, J, S) = find(g_mtx)
L = len(I)
print(*[L , 'pairs need calculation'])
csn_mat = np.zeros((L, n2))
def valuetosparse(value, I, J, G1 = None, G2 = None):
if G1 is None:
G1 = max(df.x_id) + 1
if G2 is None:
G2 = max(df.y_id) + 1
I = I[value != 0]
J = J[value != 0]
value = value[value != 0]
sp_mtx = csr_matrix((value, (I, J)), shape = (G1, G2))
return sp_mtx
if dev:
if fuzzy:
selected_gene = np.unique([I, J])
grid_gene = np.zeros((n1, n2))
gene_first_zero = np.zeros(n1)
for s in selected_gene:
gene_temp = data_full[s, :]
max_gene = max(gene_temp[gene_temp!=0])
min_gene = min(gene_temp[gene_temp!=0])
range_temp = max_gene - min_gene
if range_temp == 0:
gene_cut = np.array([min_gene])
else:
gene_cut = np.arange(min_gene, max_gene, range_temp/20)
if sum(gene_temp == 0) > 1:
gene_cut = np.insert(gene_cut, 0, 0)
gene_first_zero[s] = 1
grid_gene[s,:] = np.digitize(gene_temp, gene_cut)
def inner_fuzzy_fun(m):
i = I[m]
j = J[m]
gene1 = data_full[i, :]
gene2 = data_full[j, :]
data = data_full[[i, j], :]
grid1 = grid_gene[i, :]
grid2 = grid_gene[j, :]
grid_mix = grid1*30 + grid2
u_grid = np.unique(grid_mix)
n_grid = np.zeros(len(u_grid))
for s in range(0, len(u_grid)):
n_grid[s] = sum(grid_mix == u_grid[s])
u_grid_mix = np.vstack((np.floor((u_grid - 1)/30), (u_grid - 1)%30 + 1, u_grid, n_grid))
if gene_first_zero[i] == 1:
u_grid_mix = u_grid_mix[:, u_grid_mix[0, :]!=1]
if gene_first_zero[j] == 1:
u_grid_mix = u_grid_mix[:, u_grid_mix[1, :]!=1]
cell_id = np.zeros(u_grid_mix.shape[1])
cell_id_full = []
for t in range(0, u_grid_mix.shape[1]):
cell_id_full.append(np.where(grid_mix == u_grid_mix[2, t])[0])
cell_id[t] = np.random.choice(cell_id_full[t], 1)
cell_id = cell_id.astype(int)
(upper, lower) = upperlower_dev(gene1, gene2, wd_q, md, iteration, cell_id)
csn_temp = np.zeros(n2)
for t in range(0, len(cell_id)):
k = cell_id[t]
B = np.zeros((2, n2))
for l in range(0, n2):
B[:, l] = (data[:,l] <= upper[:, k]) & (data[:, l] >= lower[:, k]) & (data[:, k] > 0)
a = B.sum(axis = 1)
a = np.reshape(a, (2, 1))
temp = ([email protected]*n2 - [email protected])/np.sqrt(([email protected])*((n2-a)@(n2-a).T)/(n2-1)+eps)
csn_temp[cell_id_full[t]] = temp[0, 1]
return csn_temp
csn_arr_temp = np.asarray(Parallel(n_jobs = ncore)(delayed(inner_fuzzy_fun)(m) for m in range(0, L)))
csn = [valuetosparse(v, I, J, n1, n1) for v in list(csn_arr_temp.T)]
else:
def inner_fun(m):
i = I[m]
j = J[m]
gene1 = data_full[i,:]
gene2 = data_full[j,:]
data = data_full[[i,j],:]
csn_temp = np.zeros(n2)
(upper, lower) = upperlower_dev(gene1, gene2, boxsize = wd_q, md = md, iteration = iteration)
for k in range(0, n2):
if gene1[k]*gene2[k] > 0:
B = np.zeros((2, n2))
for l in range(0, n2):
B[:, l] = (data[:,l] <= upper[:, k]) & (data[:, l] >= lower[:, k]) & (data[:, k] > 0)
a = B.sum(axis = 1)
a = np.reshape(a, (2, 1))
temp = ([email protected]*n2 - [email protected])/np.sqrt(([email protected])*((n2-a)@(n2-a).T)/(n2-1)+eps)
csn_temp[k] = temp[0, 1]
return csn_temp
csn_arr_temp = np.asarray(Parallel(n_jobs = ncore)(delayed(inner_fun)(m) for m in range(0, L)))
csn = [valuetosparse(v, I, J, n1, n1) for v in list(csn_arr_temp.T)]
else:
(upper, lower) = upperlower(data_full, boxsize = wd_q)
csn = []
for k in range(0, n2):
B = np.zeros((n1, n2))
for j in range(0, n2):
B[:, j] = (data_full[:, j] <= upper[:, k]) & (data_full[:, j] >= lower[:, k]) & (data_full[:, k] > 0)
a = B.sum(axis = 1)
a = np.reshape(a, (n1, 1))
temp = ([email protected]*n2 - [email protected])/np.sqrt(([email protected])*((n2-a)@(n2-a).T)/(n2-1)+eps)
np.fill_diagonal(temp, 0)
csn.append(csr_matrix(temp))
return csn
def upperlower_dev(gene1, gene2, boxsize = 0.1, md = 1, iteration = False, cell_id = None):
if len(gene1) != len(gene2):
return
n1 = 2
n2 = len(gene1)
data = np.append([gene1], [gene2], axis= 0)
if cell_id is None:
cell_id = range(0, n2)
(up_q, low_q) = upperlower(data, boxsize)
upper = np.zeros((n1, n2))
lower = np.zeros((n1, n2))
if iteration:
maxiter = 10000
for k in cell_id:
if gene1[k] * gene2[k] > 0:
d2_0 = md * gene2[(gene1 <= up_q[0,k]) & (gene1 >= low_q[0, k])].std()
d1_0 = md * gene1[(gene2 <= up_q[1,k]) & (gene2 >= low_q[1, k])].std()
d2_1 = md * gene2[(gene1 <= gene1[k] + d1_0) & (gene1 >= gene1[k] - d1_0)].std()
d1_1 = md * gene1[(gene2 <= gene2[k] + d2_0) & (gene2 >= gene2[k] - d2_0)].std()
count = 0
while (math.sqrt(pow(d2_0-d2_1, 2)+pow(d1_0-d1_1, 2)) < pow(10, -5)) & count < maxiter:
d2_0 = d2_1
d1_0 = d1_1
d2_1 = md * gene2[(gene1 <= gene1[k] + d1_0) & (gene1 >= gene1[k] - d1_0)].std()
d1_1 = md * gene1[(gene2 <= gene2[k] + d2_0) & (gene2 >= gene2[k] - d2_0)].std()
count = count + 1
if count >= 10000:
print('Iteration at cell' , k, ' exceeds ', maxiter)
return
upper[0, k] = gene1[k] + d1_1
upper[1, k] = gene2[k] + d2_1
lower[0, k] = gene1[k] - d1_1
lower[1, k] = gene2[k] - d2_1
else:
for k in cell_id:
if gene1[k] * gene2[k] > 0:
d2 = md * gene2[(gene1 <= up_q[0,k]) & (gene1 >= low_q[0, k])].std()
d1 = md * gene1[(gene2 <= up_q[1,k]) & (gene2 >= low_q[1, k])].std()
upper[0, k] = gene1[k] + d1
upper[1, k] = gene2[k] + d2
lower[0, k] = gene1[k] - d1
lower[1, k] = gene2[k] - d2
return (upper, lower)
def upperlower(data, boxsize = 0.1):
(n1, n2) = data.shape # n1 gene; n2 cells
upper = np.zeros((n1, n2))
lower = np.zeros((n1, n2))
for i in range(0, n1):
s1 = sorted(data[i,:])
s2 = data[i,:].argsort()
#s1.append(0)
h = round(boxsize/2 * n2)
k = 0
while k < n2:
s = 0
#while (k + s + 1 < n2) & (s1[k + s + 1] == s1[k]):
while k+s+1 < n2:
if s1[k+s+1] == s1[k]:
s = s + 1
else:
break
if s >= h:
upper[i, s2[k:k + s + 1]] = data[i, s2[k]]
lower[i, s2[k:k + s + 1]] = data[i, s2[k]]
else:
upper[i, s2[k:k + s + 1]] = data[i, s2[min(n2 - 1, k + s + h)]]
lower[i, s2[k:k + s + 1]] = data[i, s2[max(0, k - h)]]
k = k + s + 1
return (upper, lower)
def upperlower_soft(data_full, soft_c, wd_q = 0.1):
(n2, K) = soft_c.shape
(n1, n2) = data_full.shape
F_c = soft_c/sum(soft_c)
upper = [np.zeros((n1, n2)), np.zeros((n1, n2))]
lower = [np.zeros((n1, n2)), np.zeros((n1, n2))]
for cl in range(0, K):
fc = F_c[:, cl]
for i in range(0, n1):
s1 = sorted(data_full[i,:])
s2 = data_full[i,:].argsort()
n3 = s1.count(0)
k = 0
while k < n2:
s = 0
while k+s+1 < n2:
if s1[k+s+1] == s1[k]:
s = s + 1
else:
break
if sum(fc[s2[k:k+s+1]]) >= wd_q/2:
upper[cl][i, s2[k:k+s+1]] = data_full[i, s2[k]]
lower[cl][i, s2[k:k+s+1]] = data_full[i, s2[k]]
else:
h = 1
while (h+k+s < n2) & (sum(fc[s2[k:k+s+h+1]]) < wd_q/2):
h = h+1
upper[cl][i, s2[k:k+s+1]] = data_full[i, s2[min(n2-1, k+h+s)]]
h = 1
while (k-h>=0) & (sum(fc[s2[k-h:k+1]]) < wd_q/2):
h = h+1
lower[cl][i, s2[k:k+s+1]] = data_full[i, s2[max(n3*(n3>h), k-h)]]
k = k + s + 1
print('soft cluster', cl+1, 'gene', i , 'is done!')
return(upper, lower)
def csn_soft_dev(data_full, soft_c, upper = None, lower = None, wd_q = 0.1, md = 1, iteration = False, maxiter = 10000):
if upper is None or lower is None:
(upper, lower) = upperlower_soft(data_full, soft_c, wd_q)
K = soft_c.shape[1]
(n1, n2) = data_full.shape
csn = [np.zeros((n1, n1, n2)), np.zeros((n1, n1, n2))]
for cl in range(0, K):
n_cl = sum(soft_c[:, cl])
soft_cl = soft_c[:, cl]
for i in range(0, n1):
for j in range(i+1, n1):
nz_index = which(data_full[i,:]*data_full[j,:]*soft_cl > 0)
for k in nz_index:
btw_i = (data_full[i,:] <= upper[cl][i, k]) & (data_full[i, :] >= lower[cl][i, k])
btw_j = (data_full[j,:] <= upper[cl][j, k]) & (data_full[j, :] >= lower[cl][j, k])
sdj_0 = md*np.sqrt(np.cov(data_full[j,btw_i], aweights=soft_cl[btw_i]))
sdi_0 = md*np.sqrt(np.cov(data_full[i,btw_j], aweights=soft_cl[btw_j]))
if iteration:
btw_i = (data_full[i,:] <= data_full[i, k]+sdi_0) & (data_full[i, :] >= data_full[i, k]-sdi_0)
btw_j = (data_full[j,:] <= data_full[j, k]+sdj_0) & (data_full[j, :] >= data_full[j, k]-sdj_0)
sdj_1 = md*np.sqrt(np.cov(data_full[j,btw_i], aweights=soft_cl[btw_i]))
sdi_1 = md*np.sqrt(np.cov(data_full[i,btw_j], aweights=soft_cl[btw_j]))
count = 0
while ((sdi_0-sdi_1)**2 + (sdj_0-sdj_1)**2 > pow(10, -12)) & (count < maxiter) & (sdi_1*sdj_1 >0):
sdi_0 = sdi_1
sdj_0 = sdj_1
btw_i = (data_full[i,:] <= data_full[i, k]+sdi_0) & (data_full[i, :] >= data_full[i, k]-sdi_0)
btw_j = (data_full[j,:] <= data_full[j, k]+sdj_0) & (data_full[j, :] >= data_full[j, k]-sdj_0)
sdj_1 = md*np.sqrt(np.cov(data_full[j,btw_i], aweights=soft_cl[btw_i]))
sdi_1 = md*np.sqrt(np.cov(data_full[i,btw_j], aweights=soft_cl[btw_j]))
count = count + 1
if count >= maxiter:
print('Iteration of Cluster', cl+1, 'at gene', i+1, 'and gene', j+1, 'at cell', k+1, 'have exceed ', maxiter)
return
sdj = sdj_1
sdi = sdi_1
else:
sdj = sdj_0
sdi = sdi_0
nx = soft_cl[(data_full[i, :]<= data_full[i,k]+sdi) & (data_full[i, :] >= data_full[i,k]-sdi)].sum()
ny = soft_cl[(data_full[j, :]<= data_full[j,k]+sdj) & (data_full[j, :] >= data_full[j,k]-sdj)].sum()
nxy = soft_cl[(data_full[i, :]<= data_full[i,k]+sdi) & (data_full[i, :] >= data_full[i,k]-sdi) & (data_full[j, :]<= data_full[j,k]+sdj) & (data_full[j, :] >= data_full[j,k]-sdj)].sum()
rho_xy = nxy/n_cl - (nx/n_cl)*(ny/n_cl)
sigma_xy = nx*ny*(n_cl-nx)*(n_cl-ny)/(n_cl**4*(n_cl-1))
csn[cl][i, j, k] = rho_xy/np.sqrt(sigma_xy)
csn[cl][j, i, k] = rho_xy/np.sqrt(sigma_xy)
print('soft cluster', cl+1)
return csn
def csn_comb_cluster(csn, soft_c):
(n2, K) = soft_c.shape
scale = np.sqrt(soft_c[:, 1]**2 + soft_c[:, 0]**2)
n1 = csn[0].shape[0]
csn_comb = np.zeros((n1, n1, n2))
for k in range(0, K):
for i in range(0, n2):
csn_comb[:, :, i] = (csn_comb[:, :, i] + csn[k][:, :, i]*soft_c[i, k])/scale[i]
return csn_comb
def csn_rec(data1, data2, g_mtx = None, wd_q = 0.1, dev = True, md = 1, iteration = False, fuzzy = False, ncore = 4):
(G1, N) = data1.shape
G2 = data2.shape[0]
eps = np.finfo(float).eps
data = [data1, data2]
if g_mtx is None:
g_mtx = np.ones((G1, G2))
zero_id1 = np.where(data1.sum(axis = 1)==0)
g_mtx[zero_id1, :] = 0
zero_id2 = np.where(data2.sum(axis = 1)==0)
g_mtx[:, zero_id2] = 0
#csn = np.zeros((G1, G2, N))
(I, J, S) = find(g_mtx)
L = len(I)
print(*[L , 'pairs need calculation'])
csn_mat = np.zeros((L, N))
if dev:
if fuzzy:
selected_gene = [np.unique(I), np.unique(J)]
grid_gene = [np.zeros((G1, N)), np.zeros((G2, N))]
gene_first_zero = [np.zeros(G1), np.zeros(G2)]
for k in range(0, 1):
for s in selected_gene[k]:
gene_temp = data[k][s, :]
max_gene = max(gene_temp[gene_temp!=0])
min_gene = min(gene_temp[gene_temp!=0])
range_temp = max_gene - min_gene
if range_temp == 0:
gene_cut = np.array([min_gene])
else:
gene_cut = np.arange(min_gene, max_gene, range_temp/20)
if sum(gene_temp == 0) > 1:
gene_cut = np.insert(gene_cut, 0, 0)
gene_first_zero[k][s] = 1
grid_gene[k][s,:] = np.digitize(gene_temp, gene_cut)
def inner_fuzzy_fun(m):
i = I[m]
j = J[m]
gene1 = data1[i, :]
gene2 = data2[j, :]
data = np.append([gene1], [gene2], axis= 0)
grid1 = grid_gene[0][i, :]
grid2 = grid_gene[1][j, :]
grid_mix = grid1*30 + grid2
u_grid = np.unique(grid_mix)
n_grid = np.zeros(len(u_grid))
for s in range(0, len(u_grid)):
n_grid[s] = sum(grid_mix == u_grid[s])
u_grid_mix = np.vstack((np.floor((u_grid - 1)/30), (u_grid - 1)%30 + 1, u_grid, n_grid))
if gene_first_zero[0][i] == 1:
u_grid_mix = u_grid_mix[:, u_grid_mix[0, :]!=1]
if gene_first_zero[1][j] == 1:
u_grid_mix = u_grid_mix[:, u_grid_mix[1, :]!=1]
cell_id = np.zeros(u_grid_mix.shape[1])
cell_id_full = []
for t in range(0, u_grid_mix.shape[1]):
cell_id_full.append(np.where(grid_mix == u_grid_mix[2, t])[0])
cell_id[t] = np.random.choice(cell_id_full[t], 1)
cell_id = cell_id.astype(int)
(upper, lower) = upperlower_dev(gene1, gene2, wd_q, md, iteration, cell_id)
csn_temp = np.zeros(N)
for t in range(0, len(cell_id)):
k = cell_id[t]
B = np.zeros((2, N))
for l in range(0, N):
B[:, l] = (data[:,l] <= upper[:, k]) & (data[:, l] >= lower[:, k]) & (data[:, k] > 0)
a = B.sum(axis = 1)
a = np.reshape(a, (2, 1))
temp = ([email protected]*N - [email protected])/np.sqrt(([email protected])*((N-a)@(N-a).T)/(N-1)+eps)
csn_temp[cell_id_full[t]] = temp[0, 1]
return csn_temp
csn_arr_temp = np.asarray(Parallel(n_jobs = ncore)(delayed(inner_fuzzy_fun)(m) for m in range(0, L)))
#for k in range(0, n2):
# csn[I, J, k] = csn_mat[:, k]
# csn[J, I, k] = csn_mat[:, k]
else:
def inner_fun(m):
i = I[m]
j = J[m]
gene1 = data1[i, :]
gene2 = data2[j, :]
data = np.append([gene1], [gene2], axis= 0)
csn_temp = np.zeros(N)
(upper, lower) = upperlower_dev(gene1, gene2, boxsize = wd_q, md = md, iteration = iteration)
for k in range(0, N):
if gene1[k]*gene2[k] > 0:
B = np.zeros((2, N))
for l in range(0, N):
B[:, l] = (data[:,l] <= upper[:, k]) & (data[:, l] >= lower[:, k]) & (data[:, k] > 0)
a = B.sum(axis = 1)
a = np.reshape(a, (2, 1))
temp = ([email protected]*N - [email protected])/np.sqrt(([email protected])*((N-a)@(N-a).T)/(N-1)+eps)
csn_temp[k] = temp[0, 1]
return csn_temp
csn_arr_temp = np.asarray(Parallel(n_jobs = ncore)(delayed(inner_fun)(m) for m in range(0, L)))
def valuetosparse(value, I, J, G1 = None, G2 = None):
if G1 is None:
G1 = max(df.x_id) + 1
if G2 is None:
G2 = max(df.y_id) + 1
I = I[value != 0]
J = J[value != 0]
value = value[value != 0]
sp_mtx = csr_matrix((value, (I, J)), shape = (G1, G2))
return sp_mtx
csn = [valuetosparse(v, I, J, G1, G2) for v in list(csn_arr_temp.T)]
else:
print('Please use csn directly')
return
return csn
def csn_block(data, M = 100, g_mtx = None, wd_q = 0.1, dev = True, md = 1, iteration = False, fuzzy = False, ncore = 4):
(G, K) = data.shape
n = math.ceil(G/M)
group_n = np.zeros(G)
for i in range(0, n):
group_n[i*M:min((i+1)*M, G)] = i
if g_mtx is None:
g_mtx = np.ones((G, G)) - np.tri(G)
zero_id = np.where(data.sum(axis = 1)==0)
g_mtx[zero_id, :] = 0
g_mtx[:, zero_id] = 0
csn_mtx_id = []
for k in range(0, K):
csn_mtx_id.append(pd.DataFrame(data={'x_id': np.array([], dtype = int), 'y_id': np.array([], dtype = int), 'value': np.array([])}))
for i in range(0, n):
data_i = data[group_n == i, :]
for j in range(i, n):
if i == j:
g_mtx_temp = g_mtx[group_n == i,:][:, group_n == i]
csn_temp = csn(data_i, g_mtx_temp, wd_q, dev, md, iteration, fuzzy, ncore)
csn_id_temp = [sparsetoid(item, i*M, j*M) for item in csn_temp]
csn_mtx_id = [id_concat(pair) for pair in zip(csn_mtx_id, csn_id_temp)]
else:
data_j = data[group_n == j,:]
g_mtx_temp = g_mtx[group_n == i,:][:, group_n == j]
csn_temp = csn_rec(data_i, data_j, g_mtx_temp, wd_q, dev, md, iteration, fuzzy, ncore)
csn_id_temp = [sparsetoid(item, i*M, j*M) for item in csn_temp]
csn_mtx_id = [id_concat(pair) for pair in zip(csn_mtx_id, csn_id_temp)]
csn_id_temp = [item[item.columns[[1, 0, 2]]].rename(columns={'y_id': 'x_id', 'x_id': 'y_id'}) for item in csn_id_temp]
csn_mtx_id = [id_concat(pair) for pair in zip(csn_mtx_id, csn_id_temp)]
print('block [', i , ',', j, '] finished!')
csn_mtx = [idtosparse(item, G, G) for item in csn_mtx_id]
return csn_mtx
def csn_loc(data_full, knn_index, wd_q = 0.1, dev = True, md = 1, iteration = False, ncore = 4):
(n1, n2) = data_full.shape
(nk, nc) = knn_index.shape
eps = np.finfo(float).eps
if nc != n2:
print('dimension of data and knn do not match!')
return
#csn_mat = np.zeros((n1, n1, n2))
def inner_fun(k):
#csn_temp = np.zeros((n1, n1))
index_temp = knn_index[:, k]
data_sub = data_full[:, index_temp-1]
g_index = which(data_sub[:, 0] > 0).astype(int)
L_temp = len(g_index)
I = np.zeros(L_temp*(L_temp-1))
J = np.zeros(L_temp*(L_temp-1))
S =
|
np.zeros(L_temp*(L_temp-1))
|
numpy.zeros
|
"""Basic tests for bq utils."""
import numpy as np
import pytest
from probnum.quad._utils import as_domain
# fmt: off
@pytest.mark.parametrize(
"dom, in_dim",
[
((0, 1), -2), # negative dimension
((np.zeros(2), np.ones(2)), 3), # length of bounds does not match dimension
((np.zeros(2), np.ones(3)), None), # lower and upper bounds not equal lengths
((np.array([0, 0]), np.array([1, 0])), None), # integration domain is empty
((np.zeros([2, 1]), np.ones([2, 1])), None), # bounds have too many dimensions
((np.zeros([2, 1]), np.ones([2, 1])), 2), # bounds have too many dimensions
]
)
def test_as_domain_wrong_input(dom, in_dim):
with pytest.raises(ValueError):
as_domain(dom, in_dim)
@pytest.mark.parametrize(
"dom, in_dim",
[
((0, 1), 1), # convert bounds to 1D array
((0, 1), 3), # expand bounds to 3D array
((np.zeros(3),
|
np.ones(3)
|
numpy.ones
|
# Copyright (c) 2013-2017 LSST Dark Energy Science Collaboration (DESC)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import numpy as np
import math
import datetime
import warnings
from .angle import Angle, _Angle
from .angleunit import radians, degrees, hours, arcsec
from . import util
class CelestialCoord(object):
"""This class defines a position on the celestial sphere, normally given by two angles,
``ra`` and ``dec``.
This class can be used to perform various calculations in spherical coordinates, such
as finding the angular distance between two points in the sky, calculating the angles in
spherical triangles, projecting from sky coordinates onto a Euclidean tangent plane, etc.
**Initialization:**
A `CelestialCoord` object is constructed from the right ascension and declination:
:meth:`coord.CelestialCoord.__init__`
>>> c = CelestialCoord(ra=12*hours, dec=31*degrees)
>>> print(c)
coord.CelestialCoord(3.141592653589793 radians, 0.5410520681182421 radians)
**Attributes:**
A CelestialCoord has the following (read-only) attributes:
:ra: The right ascension (an Angle instance)
:dec: The declination (an Angle instance)
>>> print(c.ra / degrees, c.dec / degrees)
180.0 31.0
In addition there is a convenience access property that returns ra and dec in radians.
:rad: A tuple (ra.rad, dec.rad)
>>> print(c.rad)
(3.141592653589793, 0.5410520681182421)
**Spherical Geometry:**
The basic spherical geometry operations are available to work with spherical triangles
For three coordinates cA, cB, cC making a spherical triangle, one can calculate the
sides and angles via:
| :meth:`coord.CelestialCoord.distanceTo`
| :meth:`coord.CelestialCoord.angleBetween`
>>> cA = CelestialCoord(0 * degrees, 0 * degrees)
>>> cB = CelestialCoord(0 * degrees, 10 * degrees)
>>> cC = CelestialCoord(10 * degrees, 0 * degrees)
>>> a = cB.distanceTo(cC)
>>> b = cC.distanceTo(cA)
>>> c = cA.distanceTo(cB)
>>> print(a.deg, b.deg, c.deg)
14.106044260566366 10.0 10.0
>>> A = cA.angleBetween(cB, cC)
>>> B = cB.angleBetween(cC, cA)
>>> C = cC.angleBetween(cA, cB)
>>> print(A.deg, B.deg, C.deg)
90.0 45.43854858674231 45.43854858674231
**Projections:**
Local tangent plane projections of an area of the sky can be performed using the project
method:
:meth:`coord.CelestialCoord.project`
>>> center = CelestialCoord(ra=10*hours, dec=30*degrees)
>>> sky_coord = CelestialCoord(ra=10.5*hours, dec=31*degrees)
>>> print(sky_coord)
coord.CelestialCoord(2.748893571891069 radians, 0.5410520681182421 radians)
>>> u, v = center.project(sky_coord)
>>> print(u.deg, v.deg)
-6.452371275343261 1.21794987288635
and back:
:meth:`coord.CelestialCoord.deproject`
>>> sky_coord = center.deproject(u,v)
>>> print(sky_coord)
coord.CelestialCoord(2.748893571891069 radians, 0.5410520681182421 radians)
where u and v are Angles and center and sky_coord are CelestialCoords.
"""
def __init__(self, ra, dec=None):
"""
:param ra: The right ascension. Must be an Angle instance.
:param dec: The declination. Must be an Angle instance.
"""
if isinstance(ra, CelestialCoord) and dec is None:
# Copy constructor
self._ra = ra._ra
self._dec = ra._dec
self._x = None
elif ra is None or dec is None:
raise TypeError("ra and dec are both required")
elif not isinstance(ra, Angle):
raise TypeError("ra must be a coord.Angle")
elif not isinstance(dec, Angle):
raise TypeError("dec must be a coord.Angle")
elif dec/degrees > 90. or dec/degrees < -90.:
raise ValueError("dec must be between -90 deg and +90 deg.")
else:
# Normal case
self._ra = ra
self._dec = dec
self._x = None # Indicate that x,y,z are not set yet.
@property
def ra(self):
"""A read-only attribute, giving the Right Ascension as an Angle"""
return self._ra
@property
def dec(self):
"""A read-only attribute, giving the Declination as an Angle"""
return self._dec
@property
def rad(self):
"""A convenience property, giving a tuple (ra.rad, dec.rad)
"""
return (self._ra.rad, self._dec.rad)
def _set_aux(self):
if self._x is None:
self._sindec, self._cosdec = self._dec.sincos()
self._sinra, self._cosra = self._ra.sincos()
self._x = self._cosdec * self._cosra
self._y = self._cosdec * self._sinra
self._z = self._sindec
def get_xyz(self):
"""Get the (x,y,z) coordinates on the unit sphere corresponding to this (RA, Dec).
.. math::
x &= \\cos(dec) \\cos(ra) \\\\
y &= \\cos(dec) \\sin(ra) \\\\
z &= \\sin(dec)
:returns: a tuple (x,y,z)
"""
self._set_aux()
return self._x, self._y, self._z
@staticmethod
def from_xyz(x, y, z):
"""Construct a CelestialCoord from a given (x,y,z) position in three dimensions.
The 3D (x,y,z) position does not need to fall on the unit sphere. The RA, Dec will
be inferred from the relations:
.. math::
x &= r \\cos(dec) \\cos(ra) \\\\
y &= r \\cos(dec) \\sin(ra) \\\\
z &= r \\sin(dec)
where :math:`r` is arbitrary.
:param x: The x position in 3 dimensions. Corresponds to r cos(dec) cos(ra)
:param y: The y position in 3 dimensions. Corresponds to r cos(dec) sin(ra)
:param z: The z position in 3 dimensions. Corresponds to r sin(dec)
:returns: a CelestialCoord instance
"""
norm = np.sqrt(x*x + y*y + z*z)
if norm == 0.:
raise ValueError("CelestialCoord for position (0,0,0) is undefined.")
ret = CelestialCoord.__new__(CelestialCoord)
ret._x = x / norm
ret._y = y / norm
ret._z = z / norm
ret._sindec = ret._z
ret._cosdec = np.sqrt(ret._x*ret._x + ret._y*ret._y)
if ret._cosdec == 0.:
ret._sinra = 0.
ret._cosra = 1.
else:
ret._sinra = ret._y / ret._cosdec
ret._cosra = ret._x / ret._cosdec
ret._ra = (np.arctan2(ret._sinra, ret._cosra) * radians).wrap(_Angle(math.pi))
ret._dec = np.arctan2(ret._sindec, ret._cosdec) * radians
return ret
@staticmethod
def radec_to_xyz(ra, dec, r=1.):
"""Convert ra, dec (in radians) to 3D x,y,z coordinates on the unit sphere.
The connection between (ra,dec) and (x,y,z) are given by the following formulae:
.. math::
x &= r \\cos(dec) \\cos(ra) \\\\
y &= r \\cos(dec) \\sin(ra) \\\\
z &= r \\sin(dec)
For a single ra,dec pair, the following are essentially equivalent:
>>> ra = 12*hours/radians # May be any angle measured
>>> dec = 31*degrees/radians # in radians
>>> CelestialCoord.radec_to_xyz(ra, dec)
(-0.8571673007021123, 1.0497271911386187e-16, 0.5150380749100542)
>>> CelestialCoord(ra * radians, dec * radians).get_xyz()
(-0.8571673007021123, 1.0497271911386187e-16, 0.5150380749100542)
However, the advantage of this function is that the input values may be numpy
arrays, in which case, the return values will also be numpy arrays.
:param ra: The right ascension(s) in radians. May be a numpy array.
:param dec: The declination(s) in radians. May be a numpy array.
:param r: The distance(s) from Earth (default 1.). May be a numpy array.
:returns: x, y, z as a tuple.
"""
cosdec = np.cos(dec)
x = cosdec * np.cos(ra) * r
y = cosdec * np.sin(ra) * r
z = np.sin(dec) * r
return x,y,z
@staticmethod
def xyz_to_radec(x, y, z, return_r=False):
"""Convert 3D x,y,z coordinates to ra, dec (in radians).
The connection between (ra,dec) and (x,y,z) are given by the following formulae:
.. math::
x &= r \\cos(dec) \\cos(ra) \\\\
y &= r \\cos(dec) \\sin(ra) \\\\
z &= r \\sin(dec)
For a single (x,y,z) position, the following are essentially equivalent:
>>> x = 0.839 # May be any any 3D location
>>> y = 0.123 # Not necessarily on unit sphere
>>> z = 0.530
>>> CelestialCoord.xyz_to_radec(x, y, z)
(0.14556615088111796, 0.558616191048523)
>>> c = CelestialCoord.from_xyz(x, y, z)
>>> c.ra.rad, c.dec.rad
(0.145566150881118, 0.558616191048523)
However, the advantage of this function is that the input values may be numpy
arrays, in which case, the return values will also be numpy arrays.
:param x: The x position(s) in 3 dimensions. May be a numpy array.
:param y: The y position(s) in 3 dimensions. May be a numpy array.
:param z: The z position(s) in 3 dimensions. May be a numpy array.
:param return_r: Whether to return r as well as ra, dec. (default: False)
:returns: ra, dec as a tuple. Or if return_r is True, (ra, dec, r).
"""
xy2 = x**2 + y**2
ra = np.arctan2(y, x)
# Note: We don't need arctan2, since always quadrant 1 or 4.
# Using plain arctan is slightly faster. About 10% for the whole function.
# However, if any points have x=y=0, then this will raise a numpy warning.
# It still gives the right answer, but we catch and ignore the warning here.
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning)
dec = np.arctan(z/np.sqrt(xy2))
if return_r:
return ra, dec, np.sqrt(xy2 + z**2)
else:
return ra, dec
def normal(self):
"""Return the coordinate in the "normal" convention of having 0 <= ra < 24 hours.
This convention is not enforced on construction, so this function exists to make it
easy to convert if desired.
Functions such as `from_galactic` and `from_xyz` will return normal coordinates.
"""
return _CelestialCoord(self.ra.wrap(_Angle(math.pi)), self.dec)
@staticmethod
def _raw_dsq(c1, c2):
# Compute the raw dsq between two coordinates.
# Both are expected to already have _set_aux() called.
return (c1._x-c2._x)**2 + (c1._y-c2._y)**2 + (c1._z-c2._z)**2
@staticmethod
def _raw_cross(c1, c2):
# Compute the raw cross product between two coordinates.
# Both are expected to already have _set_aux() called.
return (c1._y * c2._z - c2._y * c1._z,
c1._z * c2._x - c2._z * c1._x,
c1._x * c2._y - c2._x * c1._y)
def distanceTo(self, coord2):
"""Returns the great circle distance between this coord and another one.
The return value is an Angle object
:param coord2: The CelestialCoord to calculate the distance to.
:returns: the great circle distance to ``coord2``.
"""
# The easiest way to do this in a way that is stable for small separations
# is to calculate the (x,y,z) position on the unit sphere corresponding to each
# coordinate position.
#
# x = cos(dec) cos(ra)
# y = cos(dec) sin(ra)
# z = sin(dec)
self._set_aux()
coord2._set_aux()
# The direct distance between the two points is
#
# d^2 = (x1-x2)^2 + (y1-y2)^2 + (z1-z2)^2
dsq = self._raw_dsq(self, coord2)
if dsq < 3.99:
# (The usual case. This formula is perfectly stable here.)
# This direct distance can then be converted to a great circle distance via
#
# sin(theta/2) = d/2
theta = 2. * math.asin(0.5 * math.sqrt(dsq))
else:
# Points are nearly antipodes where the accuracy of this formula starts to break down.
# But in this case, the cross product provides an accurate distance.
cx, cy, cz = self._raw_cross(self, coord2)
crosssq = cx**2 + cy**2 + cz**2
theta = math.pi - math.asin(math.sqrt(crosssq))
return _Angle(theta)
def greatCirclePoint(self, coord2, theta):
"""Returns a point on the great circle connecting self and coord2.
Two points, c1 and c2, on the unit sphere define a great circle (so long as the two points
are not either coincident or antipodal). We can define points on this great circle by
their angle from c1, such that the angle for c2 has 0 < theta2 < pi. I.e. theta increases
from 0 as the points move from c1 towards c2.
This function then returns the coordinate on this great circle (where c1 is ``self`` and
c2 is ``coord2``) that corresponds to the given angle ``theta``.
:param coord2: Another CelestialCoord defining the great circle to use.
:param theta: The Angle along the great circle corresponding to the desired point.
:returns: the corresponding CelestialCoord
"""
self._set_aux()
coord2._set_aux()
# Define u = self
# v = coord2
# w = (u x v) x u
# The great circle through u and v is then
#
# R(t) = u cos(t) + w sin(t)
#
# Rather than directly calculate (u x v) x u, let's do some simplification first.
# u x v = ( uy vz - uz vy )
# ( uz vx - ux vz )
# ( ux vy - uy vx )
# wx = (u x v)_y uz - (u x v)_z uy
# = (uz vx - ux vz) uz - (ux vy - uy vx) uy
# = vx uz^2 - vz ux uz - vy ux uy + vx uy^2
# = vx (1 - ux^2) - ux (uz vz + uy vy)
# = vx - ux (u . v)
# = vx - ux (1 - d^2/2)
# = vx - ux + ux d^2/2
# wy = vy - uy + uy d^2/2
# wz = vz - uz + uz d^2/2
dsq = self._raw_dsq(self, coord2)
# These are unnormalized yet.
wx = coord2._x - self._x + self._x * dsq/2.
wy = coord2._y - self._y + self._y * dsq/2.
wz = coord2._z - self._z + self._z * dsq/2.
# Normalize
wr = (wx**2 + wy**2 + wz**2)**0.5
if wr == 0.:
raise ValueError("coord2 does not define a unique great circle with self.")
wx /= wr
wy /= wr
wz /= wr
# R(theta)
s, c = theta.sincos()
rx = self._x * c + wx * s
ry = self._y * c + wy * s
rz = self._z * c + wz * s
return CelestialCoord.from_xyz(rx,ry,rz)
def _triple(self, coord2, coord3):
"""Compute the scalar triple product of the three vectors:
(A x C). B = sina sinb sinC
where C = self, A = coord2, B = coord3. This is used by both angleBetween and area.
(Although note that the triple product is invariant to the ordering modulo a sign.)
"""
# Note, the scalar triple product, (AxC).B, is the determinant of the 3x3 matrix
# [ xA yA zA ]
# [ xC yC zC ]
# [ xB yB zB ]
# Furthermore, it is more stable to calculate it that way than computing the cross
# product by hand and then dotting it to the other vector.
return np.linalg.det([ [ coord2._x, coord2._y, coord2._z ],
[ self._x, self._y, self._z ],
[ coord3._x, coord3._y, coord3._z ] ])
def _alt_triple(self, coord2, coord3):
"""Compute a different triple product of the three vectors:
(A x C). (B x C) = sina sinb cosC
where C = self, A = coord2, B = coord3. This is used by both angleBetween and area.
"""
# We can simplify (AxC).(BxC) as follows:
# (A x C) . (B x C)
# = (C x (BxC)) . A Rotation of triple product with (BxC) one of the vectors
# = ((C.C)B - (C.B)C) . A Vector triple product identity
# = A.B - (A.C) (B.C) C.C = 1
# Dot products for nearby coordinates are not very accurate. Better to use the distances
# between the points: A.B = 1 - d_AB^2/2
# = 1 - d_AB^2/2 - (1-d_AC^2/2) (1-d_BC^2/2)
# = d_AC^2 / 2 + d_BC^2 / 2 - d_AB^2 / 2 - d_AC^2 d_BC^2 / 4
dsq_AC = (self._x-coord2._x)**2 + (self._y-coord2._y)**2 + (self._z-coord2._z)**2
dsq_BC = (self._x-coord3._x)**2 + (self._y-coord3._y)**2 + (self._z-coord3._z)**2
dsq_AB = (coord3._x-coord2._x)**2 + (coord3._y-coord2._y)**2 + (coord3._z-coord2._z)**2
return 0.5 * (dsq_AC + dsq_BC - dsq_AB - 0.5 * dsq_AC * dsq_BC)
def angleBetween(self, coord2, coord3):
"""Find the open angle at the location of the current coord between ``coord2`` and ``coord3``.
The current coordinate along with the two other coordinates form a spherical triangle
on the sky. This function calculates the angle between the two sides at the location of
the current coordinate.
Note that this returns a signed angle. The angle is positive if the sweep direction from
``coord2`` to ``coord3`` is counter-clockwise (as observed from Earth). It is negative if
the direction is clockwise.
:param coord2: A second CelestialCoord
:param coord3: A third CelestialCoord
:returns: the angle between the great circles joining the other two coordinates to the
current coordinate.
"""
# Call A = coord2, B = coord3, C = self
# Then we are looking for the angle ACB.
# If we treat each coord as a (x,y,z) vector, then we can use the following spherical
# trig identities:
#
# (A x C) . B = sina sinb sinC
# (A x C) . (B x C) = sina sinb cosC
#
# Then we can just use atan2 to find C, and atan2 automatically gets the sign right.
# And we only need 1 trig call, assuming that x,y,z are already set up, which is often
# the case.
self._set_aux()
coord2._set_aux()
coord3._set_aux()
sinC = self._triple(coord2, coord3)
cosC = self._alt_triple(coord2, coord3)
C = math.atan2(sinC, cosC)
return _Angle(C)
def area(self, coord2, coord3):
"""Find the area of a spherical triangle in steradians.
The current coordinate along with the two other coordinates form a spherical triangle
on the sky. This function calculates the area of that spherical triangle, which is
measured in steradians (i.e. surface area of the triangle on the unit sphere).
:param coord2: A second CelestialCoord
:param coord3: A third CelestialCoord
:returns: the area in steradians of the given spherical triangle.
"""
# The area of a spherical triangle is defined by the "spherical excess", E.
# There are several formulae for E:
# (cf. http://en.wikipedia.org/wiki/Spherical_trigonometry#Area_and_spherical_excess)
#
# E = A + B + C - pi
# tan(E/4) = sqrt(tan(s/2) tan((s-a)/2) tan((s-b)/2) tan((s-c)/2)
# tan(E/2) = tan(a/2) tan(b/2) sin(C) / (1 + tan(a/2) tan(b/2) cos(C))
#
# We use the last formula, which is stable both for small triangles and ones that are
# nearly degenerate (which the middle formula may have trouble with).
#
# Furthermore, we can use some of the math for angleBetween and distanceTo to simplify
# this further:
#
# In angleBetween, we have formulae for sina sinb sinC and sina sinb cosC.
# In distanceTo, we have formulae for sin(a/2) and sin(b/2).
#
# Define: F = sina sinb sinC
# G = sina sinb cosC
# da = 2 sin(a/2)
# db = 2 sin(b/2)
#
# tan(E/2) = sin(a/2) sin(b/2) sin(C) / (cos(a/2) cos(b/2) + sin(a/2) sin(b/2) cos(C))
# = sin(a) sin(b) sin(C) / (4 cos(a/2)^2 cos(b/2)^2 + sin(a) sin(b) cos(C))
# = F / (4 (1-sin(a/2)^2) (1-sin(b/2)^2) + G)
# = F / (4-da^2) (4-db^2)/4 + G)
self._set_aux()
coord2._set_aux()
coord3._set_aux()
F = self._triple(coord2, coord3)
G = self._alt_triple(coord2, coord3)
dasq = (self._x-coord2._x)**2 + (self._y-coord2._y)**2 + (self._z-coord2._z)**2
dbsq = (self._x-coord3._x)**2 + (self._y-coord3._y)**2 + (self._z-coord3._z)**2
tanEo2 = F / ( 0.25 * (4.-dasq) * (4.-dbsq) + G)
E = 2. * math.atan( abs(tanEo2) )
return E
_valid_projections = [None, 'gnomonic', 'stereographic', 'lambert', 'postel']
def project(self, coord2, projection=None):
"""Use the currect coord as the center point of a tangent plane projection to project
the ``coord2`` coordinate onto that plane.
This function return a tuple (u,v) in the Euclidean coordinate system defined by
a tangent plane projection around the current coordinate, with +v pointing north and
+u pointing west. (i.e. to the right on the sky if +v is up.)
There are currently four options for the projection, which you can specify with the
optional ``projection`` keyword argument:
:gnomonic: [default] uses a gnomonic projection (i.e. a projection from the center of
the sphere, which has the property that all great circles become straight
lines. For more information, see
http://mathworld.wolfram.com/GnomonicProjection.html
This is the usual TAN projection used by most FITS images.
:stereographic: uses a stereographic proejection, which preserves angles, but
not area. For more information, see
http://mathworld.wolfram.com/StereographicProjection.html
:lambert: uses a Lambert azimuthal projection, which preserves area, but not angles.
For more information, see
http://mathworld.wolfram.com/LambertAzimuthalEqual-AreaProjection.html
:postel: uses a Postel equidistant proejection, which preserves distances from
the projection point, but not area or angles. For more information, see
http://mathworld.wolfram.com/AzimuthalEquidistantProjection.html
The distance or angle errors increase with distance from the projection point of course.
:param coord2: The coordinate to project onto the tangent plane.
:param projection: The name of the projection to be used. [default: gnomonic, see above
for other options]
:returns: (u,v) as Angle instances
"""
if projection not in CelestialCoord._valid_projections:
raise ValueError('Unknown projection: %s'%projection)
self._set_aux()
coord2._set_aux()
# The core calculation is done in a helper function:
u, v = self._project(coord2._cosra, coord2._sinra, coord2._cosdec, coord2._sindec,
projection)
return u * radians, v * radians
def project_rad(self, ra, dec, projection=None):
"""This is basically identical to the project() function except that the input ``ra``, ``dec``
are given in radians rather than packaged as a CelestialCoord object and the returned
u,v are given in radians.
The main advantage to this is that it will work if ``ra`` and ``dec`` are NumPy arrays, in which
case the output ``u``, ``v`` will also be NumPy arrays.
:param ra: The right ascension in radians to project onto the tangent plane.
:param dec: The declination in radians to project onto the tangent plane.
:param projection: The name of the projection to be used. [default: gnomonic, see ``project``
docstring for other options]
:returns: (u,v) in radians
"""
if projection not in CelestialCoord._valid_projections:
raise ValueError('Unknown projection: %s'%projection)
self._set_aux()
cosra = np.cos(ra)
sinra = np.sin(ra)
cosdec = np.cos(dec)
sindec = np.sin(dec)
return self._project(cosra, sinra, cosdec, sindec, projection)
def _project(self, cosra, sinra, cosdec, sindec, projection):
# The equations are given at the above mathworld websites. They are the same except
# for the definition of k:
#
# x = k cos(dec) sin(ra-ra0)
# y = k ( cos(dec0) sin(dec) - sin(dec0) cos(dec) cos(ra-ra0) )
#
# Lambert:
# k = sqrt( 2 / ( 1 + cos(c) ) )
# Stereographic:
# k = 2 / ( 1 + cos(c) )
# Gnomonic:
# k = 1 / cos(c)
# Postel:
# k = c / sin(c)
# where cos(c) = sin(dec0) sin(dec) + cos(dec0) cos(dec) cos(ra-ra0)
# cos(dra) = cos(ra-ra0) = cos(ra0) cos(ra) + sin(ra0) sin(ra)
cosdra = self._cosra * cosra
cosdra += self._sinra * sinra
# sin(dra) = -sin(ra - ra0)
# Note: - sign here is to make +x correspond to -ra,
# so x increases for decreasing ra.
# East is to the left on the sky!
# sin(dra) = -cos(ra0) sin(ra) + sin(ra0) cos(ra)
sindra = self._sinra * cosra
sindra -= self._cosra * sinra
# Calculate k according to which projection we are using
cosc = cosdec * cosdra
cosc *= self._cosdec
cosc += self._sindec * sindec
if projection is None or projection[0] == 'g':
k = 1. / cosc
elif projection[0] == 's':
k = 2. / (1. + cosc)
elif projection[0] == 'l':
k = np.sqrt( 2. / (1.+cosc) )
else:
c = np.arccos(cosc)
# k = c / np.sin(c)
# np.sinc is defined as sin(pi x) / (pi x)
# So need to divide by pi first.
k = 1. / np.sinc(c / np.pi)
# u = k * cosdec * sindra
# v = k * ( self._cosdec * sindec - self._sindec * cosdec * cosdra )
u = cosdec * sindra
v = cosdec * cosdra
v *= -self._sindec
v += self._cosdec * sindec
u *= k
v *= k
return u, v
def deproject(self, u, v, projection=None):
"""Do the reverse process from the project() function.
i.e. This takes in a position (u,v) and returns the corresponding celestial
coordinate, using the current coordinate as the center point of the tangent plane
projection.
:param u: The u position on the tangent plane to deproject (must be an Angle
instance)
:param v: The v position on the tangent plane to deproject (must be an Angle
instance)
:param projection: The name of the projection to be used. [default: gnomonic, see ``project``
docstring for other options]
:returns: the corresponding CelestialCoord for that position.
"""
if projection not in CelestialCoord._valid_projections:
raise ValueError('Unknown projection: %s'%projection)
# Again, do the core calculations in a helper function
ra, dec = self._deproject(u / radians, v / radians, projection)
return CelestialCoord(_Angle(ra), _Angle(dec))
def deproject_rad(self, u, v, projection=None):
"""This is basically identical to the deproject() function except that the output ``ra``,
``dec`` are returned as a tuple (ra, dec) in radians rather than packaged as a CelestialCoord
object and ``u`` and ``v`` are in radians rather than Angle instances.
The main advantage to this is that it will work if ``u`` and ``v`` are NumPy arrays, in which
case the output ``ra``, ``dec`` will also be NumPy arrays.
:param u: The u position in radians on the tangent plane to deproject
:param v: The v position in radians on the tangent plane to deproject
:param projection: The name of the projection to be used. [default: gnomonic, see ``project``
docstring for other options]
:returns: the corresponding RA, Dec in radians
"""
if projection not in CelestialCoord._valid_projections:
raise ValueError('Unknown projection: %s'%projection)
return self._deproject(u, v, projection)
def _deproject(self, u, v, projection):
# The inverse equations are also given at the same web sites:
#
# sin(dec) = cos(c) sin(dec0) + v sin(c) cos(dec0) / r
# tan(ra-ra0) = u sin(c) / (r cos(dec0) cos(c) - v sin(dec0) sin(c))
#
# where
#
# r = sqrt(u^2+v^2)
# c = tan^(-1)(r) for gnomonic
# c = 2 tan^(-1)(r/2) for stereographic
# c = 2 sin^(-1)(r/2) for lambert
# c = r for postel
# Note that we can rewrite the formulae as:
#
# sin(dec) = cos(c) sin(dec0) + v (sin(c)/r) cos(dec0)
# tan(ra-ra0) = u (sin(c)/r) / (cos(dec0) cos(c) - v sin(dec0) (sin(c)/r))
#
# which means we only need cos(c) and sin(c)/r. For most of the projections,
# this saves us from having to take sqrt(rsq).
rsq = u*u
rsq += v*v
if projection is None or projection[0] == 'g':
# c = arctan(r)
# cos(c) = 1 / sqrt(1+r^2)
# sin(c) = r / sqrt(1+r^2)
cosc = sinc_over_r = 1./
|
np.sqrt(1.+rsq)
|
numpy.sqrt
|
#------------------------imports------------------------
from astropy.io import fits
from astropy.wcs import WCS
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.patches import Ellipse
from matplotlib.patches import Rectangle
import warnings
warnings.filterwarnings('ignore')
from astropy import units as u
from astropy.cosmology import Planck18
import math
import img_scale
from functions import *
import argparse
from astropy.wcs import utils
import os
from os import path
import cmasher as cmr
import glob
from astropy.convolution import convolve
from astropy.convolution import Gaussian2DKernel
from astropy.coordinates import SkyCoord
defaultfile='/Users/emma/GitHub/possum-tools/DataProcess/pilot_sources.txt'
xraysrcs=['2129-5053','2144-5637','2052-5246']
WISEfolder='/Volumes/NARNIA/pilot_cutouts/WISEfiles/'
dpi=300
#set parameters for plots
# get colour maps and set bad values to be transparent
bluecmap=plt.cm.Blues
bluecmap.set_bad('white',0)
redcmap=plt.cm.Reds
redcmap.set_bad('white',0)
yellowcmap=plt.cm.YlOrBr_r
yellowcmap.set_bad('white',0)
purplecmap=plt.cm.Purples
purplecmap.set_bad('white',0)
greyscale=plt.cm.gray
greyscale.set_bad('black',1)
greyscale_r=plt.cm.gray_r
greyscale_r.set_bad('white',1)
cmap1=cmr.get_sub_cmap('plasma', 0, 0.333)
cmap1.set_bad('white',0)
cmap2=cmr.get_sub_cmap('plasma', 0.333, 0.666)
cmap2.set_bad('white',0)
cmap3=cmr.get_sub_cmap('plasma', 0.666, 1)
cmap3.set_bad('white',0)
#perceptively uniform cmap that will be used for contours
twilightcmap = plt.cm.get_cmap('twilight')
# contour levels
#the step parameter is the factor of 2^step each contour goes up by
# so use step=1 for contours which double each time
contourexps=np.arange(start=0,stop=32,step=0.5)
contourmults=np.power(2,contourexps)
contourexps_1=np.arange(start=0,stop=32,step=1)
contourmults_1=np.power(2,contourexps_1)
contourexps_2=np.arange(start=0,stop=32,step=2)
contourmults_2=np.power(2,contourexps_2)
plt.rcParams.update({'lines.linewidth':0.3})
plt.rcParams['mathtext.fontset'] = 'cm'
plt.rcParams['font.family'] = 'cmu serif'
SMALL_SIZE = 10
MEDIUM_SIZE = 12
BIGGER_SIZE = 14
plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
beampad=5
def main(args):
###########################################################################
#read in sources
print('Reading in source list')
sources=np.loadtxt(args.filename,dtype='str')
nsrc=sources.shape[0]
###########################################################################
sourcecount=0
for i in range(nsrc-2,nsrc-1):
if 1+1==2:
src=sources[i,0]
print(src)
ra=sources[i,1]
dec=sources[i,2]
coords=SkyCoord(ra,dec)
POSSUMSB=sources[i,3]
EMUSB=sources[i,4]
LAS=float(sources[i,5]) #arcmin
FOV=float(sources[i,7]) #degrees
z=float(sources[i,8])
EMUrms=float(sources[i,9])
POSSUMrms=float(sources[i,10])
#rmscale=float(sources[i,11])
xmin=float(sources[i,12])
ymin=float(sources[i,13])
xmax=float(sources[i,14])
ymax=float(sources[i,15])
ratio=(ymax-ymin)/(xmax-xmin)
scalebarkpc=float(sources[i,16])
if z!=None:
dist = Planck18.comoving_distance(z) #distance in MPc
scale=Planck18.arcsec_per_kpc_comoving(z) # scale in arcsec per kpc
sb_length_arcsec=scale*scalebarkpc*u.kpc
imagefolder='/Volumes/NARNIA/pilot_cutouts/'+src+'/'
if src in xraysrcs:
purple_im_folder=imagefolder+'/xmm/pps/'
xrayims=glob.glob(purple_im_folder+'*M1S*IMAGE_8000.FTZ')
purple_im=xrayims[0]
else:
purple_im=None
#blue
try:
ra_str=str(coords.ra.deg)[0:5]
dec_str=str(coords.dec.deg)[0:5]
#WISEim=glob.glob(WISEfolder+'*-w1-*ra{}*dec{}*'.format(ra_str,dec_str))
#blue_im=WISEim[0]
blue_im=imagefolder+'2200-5611_wise.fits'
blue_thresh=5 #threshold for lowest contour (typically 3x rms noise level)
blue_label=r'WISE 3.4 $\mu$m'
except:
try:
WISEim=glob.glob(imagefolder+'*WISE_3.4.fits'.format(ra_str,dec_str))
blue_im=WISEim[0]
blue_thresh=5
blue_label=r'WISE 3.4 $\mu$m'
except:
print("uh oh")
greyscale_im=imagefolder+src+'_DSS.fits' #fits data file to load and display in greyscale
greyscale_label='DSS'
# get DES data
R,Rhead=fitsopen(glob.glob(imagefolder+'DES*_i*.fits')[0])
G,Ghead=fitsopen(glob.glob(imagefolder+'DES*_r*.fits')[0])
B,Bhead=fitsopen(glob.glob(imagefolder+'DES*_g*.fits')[0])
DES_wcs=WCS(Rhead)
#bins=np.arange(0,200,1)
#plt.hist(np.ndarray.flatten(R),alpha=0.3,color='r',bins=bins)
#plt.hist(np.ndarray.flatten(G),alpha=0.3,color='g',bins=bins)
#plt.hist(np.ndarray.flatten(B),alpha=0.3,color='b',bins=bins)
#plt.show()
#break
#R=R-np.nanmean(R)
#G=G-np.nanmean(G)
#B=B-np.nanmean(B)
#R=R/2.
img = np.zeros((R.shape[0], R.shape[1], 3), dtype=float)
img[:,:,0] = img_scale.linear(R, scale_min=0, scale_max=1000)
img[:,:,1] = img_scale.linear(G, scale_min=0, scale_max=1000)
img[:,:,2] = img_scale.linear(B, scale_min=0, scale_max=1000)
#R2,Rhead2=fitsopen(glob.glob(imagefolder+'DES*_i*.fits')[1])
#G2,Ghead2=fitsopen(glob.glob(imagefolder+'DES*_r*.fits')[1])
#B2,Bhead2=fitsopen(glob.glob(imagefolder+'DES*_g*.fits')[1])
#DES_wcs2=WCS(Rhead2)
#img2 = np.zeros((R2.shape[0], R2.shape[1], 3), dtype=float)
#img2[:,:,0] = img_scale.linear(R2, scale_min=0.1, scale_max=500)
#img2[:,:,1] = img_scale.linear(G2, scale_min=0.1, scale_max=500)
#img2[:,:,2] = img_scale.linear(B2, scale_min=0.1, scale_max=500)
plt.figure(dpi= dpi,figsize=(8.25,8.25*ratio))
# load the data for the background greyscale
greyscale_data,greyscale_header=fitsopen(greyscale_im)
wcs=WCS(greyscale_header)
pix_scale=greyscale_header['CDELT2']*3600*u.arcsec
# plot the greyscale
ax=plt.subplot(projection=wcs)
ax.imshow(greyscale_data,origin='lower',cmap=greyscale,vmin=vmax(greyscale_data,5),vmax=vmax(greyscale_data,99.9))
ax.imshow(img,transform=ax.get_transform(DES_wcs),origin='lower')
#ax.imshow(img2,transform=ax.get_transform(DES_wcs2),origin='lower')
#purple data
if purple_im!=None:
purple_data,purple_header=fitsopen(purple_im)
purple_wcs=WCS(purple_header)
kernel = Gaussian2DKernel(x_stddev=1)
smoothed=convolve(purple_data,kernel)
purple_contours = [1,2,4,8,16,32,64]
purple_thresh=1
ax.contour(smoothed, transform=ax.get_transform(purple_wcs), colors=[twilightcmap(0.4)],levels=purple_contours,linewidths=1)
# calculate transparency array for map (alphas) by normalising the data to be between 0 and 1
# any data below the threshold value will have alpha=0
# if you want to saturate the scale at a value lower than the maximum, change np.nanmax(purple_data) in the line below
#purple_alphas=np.divide(purple_data-purple_thresh,np.nanmax(purple_data)-purple_thresh)
#purple_alphas=np.where(purple_alphas<0,0,purple_alphas)
#ax2=plt.imshow(purple_data,origin='lower',transform=ax.get_transform(purple_wcs),cmap=purplecmap,alpha=purple_alphas)
purple_label='XMM-Newton'
if purple_label!=None:
ax.plot(-100,-100,'-',c=twilightcmap(0.4),label=purple_label,linewidth=3)
#blue data
if blue_im!=None:
blue_data,blue_header=fitsopen(blue_im)
blue_wcs=WCS(blue_header)
blue_contours = [blue_thresh * i for i in contourmults_1]
ax.contour(blue_data, transform=ax.get_transform(blue_wcs), colors=[twilightcmap(0.2)],levels=blue_contours[:1],linewidths=0.1)
# calculate transparency array for map (alphas) by normalising the data to be between 0 and 1
blue_alphas=np.divide(blue_data-blue_thresh,np.nanmax(blue_data)-blue_thresh)
blue_alphas=np.where(blue_alphas<0,0,blue_alphas)
blue_alphas=np.where(blue_alphas>1,1,blue_alphas)
ax3=plt.imshow(blue_data,origin='lower',transform=ax.get_transform(blue_wcs),cmap=bluecmap,alpha=blue_alphas,vmin=-10*np.nanmax(blue_data),vmax=10*np.nanmax(blue_data))
if blue_label!=None:
ax.plot(-100,-100,'-',c=twilightcmap(0.2),label=blue_label,linewidth=3)
#add EMU data
if EMUSB!='NaN':
red_data,red_header=fitsopen(imagefolder+src+'_EMU.fits')
red_label='ASKAP 944 MHz'
red_wcs=WCS(red_header)
red_thresh=3.*EMUrms
red_contours = [red_thresh * i for i in contourmults_1]
ax.contour(red_data, transform=ax.get_transform(red_wcs), colors=[twilightcmap(0.7)],levels=red_contours[:1],linewidths=0.1)
# calculate transparency array for map (alphas) by normalising the data to be between 0 and 1
red_alphas=np.divide(red_data-red_thresh,vmax(red_data,99.9)-red_thresh)
red_alphas=np.where(red_alphas<0,0,red_alphas)
red_alphas=np.where(red_alphas>1,1,red_alphas)
red_alphas=0.333*np.sqrt(red_alphas)
ax5=plt.imshow(red_data,origin='lower',transform=ax.get_transform(red_wcs),cmap=redcmap,alpha=red_alphas,vmin=-10*np.nanmax(red_data),vmax=10*np.nanmax(red_data))
if red_label!=None:
ax.plot(-100,-100,'-',c=twilightcmap(0.75),label=red_label,linewidth=3)
r_bmaj=red_header['BMAJ']*u.degree
r_bmin=red_header['BMIN']*u.degree
r_bpa=red_header['BPA']
r_bmaj_pix=r_bmaj/pix_scale
r_bmin_pix=r_bmin/pix_scale
lowerleft=utils.skycoord_to_pixel(utils.pixel_to_skycoord(xmin,ymin,red_wcs),wcs)
upperright=utils.skycoord_to_pixel(utils.pixel_to_skycoord(xmax,ymax,red_wcs),wcs)
greyxmin=lowerleft[0]
greyymin=lowerleft[1]
greyxmax=upperright[0]
greyymax=upperright[1]
#add possum data
if POSSUMSB!='NaN' and POSSUMSB!='10035':
yellow_data,yellow_header=fitsopen(imagefolder+src+'_POSSUM.fits')
yellow_label='ASKAP 1368 MHz'
yellow_thresh=3.*POSSUMrms
yellow_wcs=WCS(yellow_header)
yellow_contours = [yellow_thresh * i for i in contourmults_1]
ax.contour(yellow_data, transform=ax.get_transform(yellow_wcs), colors=[twilightcmap(0.85)],levels=yellow_contours[:1],linewidths=0.1)
# calculate transparency array for map (alphas) by normalising the data to be between 0 and 1
yellow_alphas=np.divide(yellow_data-yellow_thresh,vmax(yellow_data,99.9)-yellow_thresh)
yellow_alphas=np.where(yellow_alphas<0,0,yellow_alphas)
yellow_alphas=np.where(yellow_alphas>1,1,yellow_alphas)
yellow_alphas=0.333*np.sqrt(yellow_alphas)
ax5=plt.imshow(yellow_data,origin='lower',transform=ax.get_transform(yellow_wcs),cmap=yellowcmap,alpha=yellow_alphas,vmin=-10*np.nanmax(yellow_data),vmax=10*np.nanmax(yellow_data))
if yellow_label!=None:
ax.plot(-100,-100,'-',c='gold',label=yellow_label,linewidth=3)
y_bmaj=yellow_header['BMAJ']*u.degree
y_bmin=yellow_header['BMIN']*u.degree
y_bpa=yellow_header['BPA']
y_bmaj_pix=y_bmaj/pix_scale
y_bmin_pix=y_bmin/pix_scale
lowerleft=utils.skycoord_to_pixel(utils.pixel_to_skycoord(xmin,ymin,yellow_wcs),wcs)
upperright=utils.skycoord_to_pixel(utils.pixel_to_skycoord(xmax,ymax,yellow_wcs),wcs)
greyxmin=lowerleft[0]
greyymin=lowerleft[1]
greyxmax=upperright[0]
greyymax=upperright[1]
if z!=None:
sb_length_pix= sb_length_arcsec/pix_scale
scalebar=Rectangle(xy=(greyxmax-sb_length_pix-10,greyymin+5),width=sb_length_pix,height=sb_length_pix/40.,edgecolor='none',fc='white',alpha=1)
ax.add_patch(scalebar)
try:
scaletext='{} kpc'.format(int(scalebarkpc))
plt.annotate(xy=(greyxmax-(sb_length_pix/2.)-10,greyymin+5+(sb_length_pix/12.)),text=scaletext,c='white',ha="center")
except:
print("shrug")
plt.gca().set_aspect("equal")
plt.xlabel('Right Ascension [J2000]')
plt.ylabel('Declination [J2000]')
ax.set_xlim(greyxmin,greyxmax)
ax.set_ylim(greyymin,greyymax)
#plt.legend(loc=3)
plt.savefig('/Volumes/NARNIA/pilot_cutouts/'+src+'_DES.png',dpi=dpi,transparent=True,bbox_inches='tight')
plt.savefig('/Users/emma/OneDrive/PhD/thesis/Figures/new/'+src+'_mwl_RBG.png',dpi=dpi,transparent=True,bbox_inches='tight')
##############################################################################################
plt.figure(dpi= dpi,figsize=(8.25,8.25*ratio))
# plot the greyscale
ax=plt.subplot(projection=wcs)
ax.imshow(greyscale_data,origin='lower',cmap=greyscale_r,vmin=vmax(greyscale_data,1),vmax=vmax(greyscale_data,99.9))
ax.imshow(R,transform=ax.get_transform(DES_wcs),origin='lower',vmin=vmax(R,1),vmax=vmax(R,99.5),cmap=greyscale_r)
#purple data
if purple_im!=None:
#ax.contour(smoothed, transform=ax.get_transform(purple_wcs), colors=[twilightcmap(0.4)],levels=purple_contours,linewidths=0.2)
kernel = Gaussian2DKernel(x_stddev=2)
smoothed=convolve(purple_data,kernel)
# calculate transparency array for map (alphas) by normalising the data to be between 0 and 1
# any data below the threshold value will have alpha=0
# if you want to saturate the scale at a value lower than the maximum, change np.nanmax(purple_data) in the line below
purple_alphas=np.divide(smoothed-purple_thresh,vmax(smoothed,99.9)-purple_thresh)
purple_alphas=np.where(purple_alphas<0,0,purple_alphas)
purple_alphas=np.where(purple_alphas>1,1,purple_alphas)
purple_alphas=0.5*np.sqrt(purple_alphas)
ax2=plt.imshow(purple_data,origin='lower',transform=ax.get_transform(purple_wcs),cmap=cmap1,alpha=purple_alphas,vmin=-10*np.nanmax(purple_data),vmax=10*np.nanmax(purple_data))
purple_label='XMM-Newton'
ax.contour(smoothed, transform=ax.get_transform(purple_wcs), colors=[twilightcmap(0.4)],levels=purple_contours,linewidths=0.8)
if purple_label!=None:
ax.plot(-100,-100,'-',c=twilightcmap(0.4),label=purple_label,linewidth=3)
#blue data
if blue_im!=None:
blue_data,blue_header=fitsopen(blue_im)
blue_wcs=WCS(blue_header)
blue_contours = [blue_thresh * i for i in contourmults]
ax.contour(blue_data, transform=ax.get_transform(blue_wcs), colors=[twilightcmap(0.2)],levels=blue_contours,linewidths=0.8)
# calculate transparency array for map (alphas) by normalising the data to be between 0 and 1
blue_alphas=np.divide(blue_data-blue_thresh,np.nanmax(blue_data)-blue_thresh)
blue_alphas=np.sqrt(np.where(blue_alphas<0,0,blue_alphas))
ax3=plt.imshow(blue_data,origin='lower',transform=ax.get_transform(blue_wcs),cmap=bluecmap,alpha=blue_alphas,vmin=-10*np.nanmax(blue_data),vmax=10*np.nanmax(blue_data))
if blue_label!=None:
ax.plot(-100,-100,'-',c=twilightcmap(0.2),label=blue_label,linewidth=3)
#add EMU data
if EMUSB!='NaN':
# calculate transparency array for map (alphas) by normalising the data to be between 0 and 1
red_alphas=np.divide(red_data-red_thresh,vmax(red_data,99.9)-red_thresh)
red_alphas=np.where(red_alphas<0,0,red_alphas)
red_alphas=np.where(red_alphas>1,1,red_alphas)
red_alphas=0.8*np.sqrt(red_alphas)
red_contours = [red_thresh * i for i in contourmults]
ax.contour(red_data, transform=ax.get_transform(red_wcs), colors=[twilightcmap(0.7)],levels=red_contours,linewidths=0.8)
ax4=plt.imshow(red_data,origin='lower',transform=ax.get_transform(red_wcs),cmap=cmap2,alpha=red_alphas,vmin=-10*np.nanmax(red_data),vmax=10*np.nanmax(red_data))
if red_label!=None:
ax.plot(-100,-100,'-',c=twilightcmap(0.75),label=red_label,linewidth=3)
ellipse= Ellipse(xy=(greyxmin+5,greyymin+5),width=r_bmaj_pix,height=r_bmin_pix,angle=r_bpa+90,edgecolor=twilightcmap(0.7),fc=twilightcmap(0.7),lw=1)
ax.add_patch(ellipse)
#add possum data
if POSSUMSB!='NaN' and POSSUMSB!='10035':
#ax.contour(yellow_data, transform=ax.get_transform(yellow_wcs), colors=['gold'],levels=yellow_contours,linewidths=0.2)
# calculate transparency array for map (alphas) by normalising the data to be between 0 and 1
yellow_alphas=np.divide(yellow_data-yellow_thresh,vmax(yellow_data,99.9)-yellow_thresh)
yellow_alphas=np.where(yellow_alphas<0,0,yellow_alphas)
yellow_alphas=np.where(yellow_alphas>1,1,yellow_alphas)
yellow_alphas=0.5*np.sqrt(yellow_alphas)
ax5=plt.imshow(yellow_data,origin='lower',transform=ax.get_transform(yellow_wcs),cmap=cmap3,alpha=yellow_alphas,vmin=-10*
|
np.nanmax(yellow_data)
|
numpy.nanmax
|
# Built-in
import sys
import os
import warnings
if sys.version[0] == '3':
import inspect
else:
# Python 2 back-porting
import funcsigs as inspect
# Common
import numpy as np
# tofu
try:
import tofu.geom._core as _core
except Exception:
from . import _core
__all__ = ['coords_transform',
'get_nIne1e2', 'get_X12fromflat',
'compute_RaysCones',
'create_config',
'create_CamLOS1D', 'create_CamLOS2D']
_sep = '_'
_dict_lexcept_key = []
_lok = np.arange(0,9)
_lok = np.array([_lok, _lok+10])
_here = os.path.abspath(os.path.dirname(__file__))
_root = _here[:_here.rfind('/tofu')]
_path_testcases = os.path.join(_root,'tofu/geom/inputs')
###########################################################
# COCOS
###########################################################
class CoordinateInputError(Exception):
_cocosref = "<NAME>, <NAME>, "
_cocosref += "Computer Physics Communications 184 (2103) 293-302"
msg = "The provided coords flag should be a str\n"
msg += "It should match a known flag:\n"
msg += " - 'cart' / 'xyz' : cartesian coordinates\n"
msg += " - cocos flag indicating the cocos number (1-8, 11-18)\n"
msg += " Valid cocos flags include:\n"
msg += " '11', '02', '5', '14', ..."
msg += "\n"
msg += "The cocos (COordinates COnvetionS) are descibed in:\n"
msg += " [1] %s"%_cocosref
def __init__(self, msg, errors):
# Call the base class constructor with the parameters it
# needs
super(CoordinateInputError, self).__init__(msg + '\n\n' + self.msg)
# Now for your custom code...
self.errors = errors
def _coords_checkformatcoords(coords='11'):
if not type(coords) is str:
msg = "Arg coords must be a str !"
raise CoordinateInputError(msg)
coords = coords.lower()
iint = np.array([ss.isdigit() for ss in coords]).nonzero()[0]
if coords in ['cart','xyz']:
coords = 'xyz'
elif iint.size in [1,2]:
coords = int(''.join([coords[jj] for jj in iint]))
if not coords in _lok.ravel():
msg = 'Not allowed number ({0) !'.format(coords)
raise CoordinateInputError(msg)
else:
msg = "Not allowed coords ({0}) !".format(coords)
raise CoordinateInputError(msg)
return coords
def _coords_cocos2cart(pts, coords=11):
R = pts[0,:]
if (coords%0)%2==1:
indphi, indZi, sig = 1, 2, 1.
else:
indphi, indZ , sig= 2, 1, -1.
phi = sig*pts[indphi,:]
X = R*np.cos(phi)
Y = R*np.sin(phi)
Z = pts[indZ,:]
return np.array([X,Y,Z])
def _coords_cart2cocos(pts, coords=11):
R = np.hypot(pts[0,:],pts[1,:])
phi = np.arctan2(pts[1,:],pts[0,:])
Z = pts[2,:]
if (coords%0)%2==1:
indphi, indZ, sig = 1, 2, 1.
else:
indphi, indZ , sig= 2, 1, -1.
pts_out = np.empty((3,pts.shape[1]),dtype=float)
pts_out[0,:] = R
pts_out[indphi,:] = sig*phi
pts_out[indZ,:] = Z
return pts_out
def coords_transform(pts, coords_in='11', coords_out='11'):
coords_in = _coords_checkformatcoords(coords=coords_in)
coords_out = _coords_checkformatcoords(coords=coords_out)
if coords_in==coords_out:
pass
elif coords_in=='xyz':
pts = _coords_cart2cocos(pts, coords_out)
elif coords_out=='xyz':
pts = _coords_cocos2cart(pts, coords_out)
else:
pts = _coords_cocos2cart(pts, coords_in)
pts = _coords_cocos2cart(pts, coords_out)
return pts
###########################################################
###########################################################
# Useful functions
###########################################################
def get_nIne1e2(P, nIn=None, e1=None, e2=None):
assert np.hypot(P[0],P[1])>1.e-12
phi = np.arctan2(P[1],P[0])
ephi = np.array([-np.sin(phi), np.cos(phi), 0.])
ez = np.array([0.,0.,1.])
if nIn is None:
nIn = -P
nIn = nIn / np.linalg.norm(nIn)
if e1 is None:
if np.abs(np.abs(nIn[2])-1.) < 1.e-12:
e1 = ephi
else:
e1 = np.cross(nIn,ez)
e1 = e1 if np.sum(e1*ephi) > 0. else -e1
e1 = e1 / np.linalg.norm(e1)
if not np.abs(np.sum(nIn*e1))<1.e-12:
msg = "Identified local base does not seem valid!\n"
msg += "nIn = %s\n"%str(nIn)
msg += "e1 = %s\n"%str(e1)
msg += "np.sum(nIn*e1) = sum(%s) = %s"%(nIn*e1, np.sum(nIn*e1))
raise Exception(msg)
if e2 is None:
e2 = np.cross(nIn,e1)
e2 = e2 / np.linalg.norm(e2)
return nIn, e1, e2
def get_X12fromflat(X12, x12u=None, nx12=None):
if x12u is None:
x1u, x2u = np.unique(X12[0,:]), np.unique(X12[1,:])
if x1u.size*x2u.size != X12.shape[1]:
tol = np.linalg.norm(np.diff(X12[:,:2],axis=1))/100.
tolmag = int(np.log10(tol))-1
x1u = np.unique(np.round(X12[0,:], -tolmag))
x2u = np.unique(np.round(X12[1,:], -tolmag))
indx1 = np.digitize(X12[0,:], 0.5*(x1u[1:]+x1u[:-1]))
indx2 = np.digitize(X12[1,:], 0.5*(x2u[1:]+x2u[:-1]))
indx1u, indx2u = np.unique(indx1), np.unique(indx2)
x1u = np.unique([np.mean(X12[0,indx1==ii]) for ii in indx1u])
x2u = np.unique([np.mean(X12[1,indx2==ii]) for ii in indx2u])
else:
x1u, x2u = x12u
if nx12 is None:
nx1, nx2 = x1u.size, x2u.size
else:
nx1, nx2 = nx12
Dx12 = (x1u[1]-x1u[0], x2u[1]-x2u[0])
ind = np.zeros((nx1,nx2),dtype=int)
indr = np.array([np.digitize(X12[0,:], 0.5*(x1u[1:]+x1u[:-1])),
np.digitize(X12[1,:], 0.5*(x2u[1:]+x2u[:-1]))])
ind[indr[0,:],indr[1,:]] = np.arange(0,X12.shape[1])
return x1u, x2u, ind, Dx12
###########################################################
###########################################################
# Fast computation of cones with rays
###########################################################
def compute_RaysCones(Ds, us, angs=np.pi/90., nP=40):
# Check inputs
Ddim, udim = Ds.ndim, us.ndim
assert Ddim in [1,2]
assert Ds.shape[0]==3 and Ds.size%3==0
assert udim in [1,2]
assert us.shape[0]==3 and us.size%3==0
assert type(angs) in [int,float,np.int64,np.float64]
if udim==2:
assert Ds.shape==us.shape
if Ddim==1:
Ds = Ds.reshape((3,1))
nD = Ds.shape[1]
# Compute
phi = np.linspace(0.,2.*np.pi, nP)
phi = np.tile(phi,nD)[np.newaxis,:]
if udim==1:
us = us[:,np.newaxis]/np.linalg.norm(us)
us = us.repeat(nD,axis=1)
else:
us = us/np.sqrt(np.sum(us**2,axis=0))[np.newaxis,:]
us = us.repeat(nP, axis=1)
e1 = np.array([us[1,:],-us[0,:],np.zeros((us.shape[1],))])
e2 = np.array([-us[2,:]*e1[1,:], us[2,:]*e1[0,:],
us[0,:]*e1[1,:]-us[1,:]*e1[0,:]])
ub = (us*np.cos(angs)
+ (np.cos(phi)*e1+np.sin(phi)*e2)*np.sin(angs))
Db = Ds.repeat(nP,axis=1)
return Db, ub
###########################################################
###########################################################
# Fast computation of poly
###########################################################
def _compute_VesPoly(R=2.4, r=1., elong=0., Dshape=0.,
divlow=True, divup=True, nP=200):
""" Utility to compute three 2D (R,Z) polygons
One represents a vacuum vessel, one an outer bumper, one a baffle
The vessel polygon is centered on (R,0.), with minor radius r
It can have a vertical (>0) or horizontal(<0) elongation in [-1;1]
It can be D-shaped (Dshape in [0.,1.], typically 0.2)
It can be non-convex, with:
* a lower divertor-like shape
* a upper divertor-like shape
The elongation also affects the outer bumper and baffle
Parameters
----------
R: int / float
Major radius used as a center of the vessel
r : int / float
Minor radius of the vessel
elong: int / float
Dimensionless elongation parameter in [-1;1]
Dshape: int / float
Dimensionless parameter for the D-shape (in-out asymmetry) in [0;1]
divlow: bool
Flag indicating whether to incude a lower divertor-like shape
divup: bool
Flag indicating whether to incude an upper divertor-like shape
nP : int
Parameter specifying approximately the number of points of the vessel
Return
------
poly: np.ndarray
Closed (2,nP) polygon of the vacuum vessel, optionnally with divertors
pbump: np.ndarray
Closed (2,N) polygon defining the outer bumper
pbaffle: np.ndarray
Closed (2,N) polygon defining the lower baffle
"""
# Basics (center, theta, unit vectors)
cent = np.r_[R,0.]
theta = np.linspace(-np.pi,np.pi,nP)
poly = np.array([np.cos(theta), np.sin(theta)])
# Divertors
pdivR = np.r_[-0.1,0.,0.1]
pdivZ = np.r_[-0.1,0.,-0.1]
if divlow:
ind = (np.sin(theta)<-0.85).nonzero()[0]
pinsert = np.array([pdivR, -1.+pdivZ])
poly = np.concatenate((poly[:,:ind[0]], pinsert, poly[:,ind[-1]+1:]),
axis=1)
if divup:
theta = np.arctan2(poly[1,:], poly[0,:])
ind = (np.sin(theta)>0.85).nonzero()[0]
pinsert = np.array([pdivR[::-1], 1.-pdivZ])
poly = np.concatenate((poly[:,:ind[0]], pinsert, poly[:,ind[-1]+1:]),
axis=1)
# Modified radius (by elongation and Dshape)
rbis = r*np.hypot(poly[0,:],poly[1,:])
theta = np.arctan2(poly[1,:],poly[0,:])
rbis = rbis*(1+elong*0.15*np.sin(2.*theta-np.pi/2.))
if Dshape>0.:
ind = np.cos(theta)<0.
coef = 1 + Dshape*(
|
np.sin(theta[ind])
|
numpy.sin
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 13:54:36 2021
@author: suraj
"""
import numpy as np
import keras
from numpy.random import seed
seed(10)
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.layers import concatenate
import tensorflow.keras.backend as K
import os
from keras import backend as kb
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from keras.regularizers import l2
import matplotlib.pyplot as plt
import tensorflow as tf
tf.python.framework_ops.disable_eager_execution()
def deep_ensemble_regression_nll_loss(sigma_sq, epsilon = 1e-6):
"""
Regression loss for a Deep Ensemble, using the negative log-likelihood loss.
This function returns a keras regression loss, given a symbolic tensor for the sigma square output of the model.
The training model should return the mean, while the testing/prediction model should return the mean and variance.4
"""
def nll_loss(y_true, y_pred):
return 0.5 * K.mean(K.log(sigma_sq + epsilon) + K.square(y_true - y_pred) / (sigma_sq + epsilon))
return nll_loss
def toy_dataset(input):
output = []
for inp in input:
std = 3 if inp < 0 else 1
out = [inp ** 3 + np.random.normal(0, std), 10*
|
np.sin(inp)
|
numpy.sin
|
"""Tests for the policies in the hbaselines/base_policies subdirectory."""
import unittest
import numpy as np
import tensorflow as tf
from gym.spaces import Box
from hbaselines.base_policies import ActorCriticPolicy
from hbaselines.base_policies import ImitationLearningPolicy
from hbaselines.algorithms.off_policy import FEEDFORWARD_PARAMS
class TestActorCriticPolicy(unittest.TestCase):
"""Test ActorCriticPolicy in hbaselines/base_policies/actor_critic.py."""
def setUp(self):
sess = tf.compat.v1.Session()
self.policy_params = {
'sess': sess,
'ac_space': Box(low=-1, high=1, shape=(1,)),
'ob_space': Box(low=-2, high=2, shape=(2,)),
'co_space': Box(low=-3, high=3, shape=(3,)),
'verbose': 0,
}
self.policy_params.update(FEEDFORWARD_PARAMS.copy())
def tearDown(self):
self.policy_params['sess'].close()
del self.policy_params
def test_init(self):
"""Validate that the variables are initialized properly."""
policy = ActorCriticPolicy(**self.policy_params)
# Check that the abstract class has all the required attributes.
self.assertEqual(policy.sess, self.policy_params['sess'])
self.assertEqual(policy.ac_space, self.policy_params['ac_space'])
self.assertEqual(policy.ob_space, self.policy_params['ob_space'])
self.assertEqual(policy.co_space, self.policy_params['co_space'])
self.assertEqual(policy.buffer_size, self.policy_params['buffer_size'])
self.assertEqual(policy.batch_size, self.policy_params['batch_size'])
self.assertEqual(policy.actor_lr, self.policy_params['actor_lr'])
self.assertEqual(policy.critic_lr, self.policy_params['critic_lr'])
self.assertEqual(policy.verbose, self.policy_params['verbose'])
self.assertEqual(policy.tau, self.policy_params['tau'])
self.assertEqual(policy.gamma, self.policy_params['gamma'])
self.assertEqual(policy.use_huber, self.policy_params['use_huber'])
# Check that the abstract class has all the required methods.
self.assertRaises(NotImplementedError, policy.initialize)
self.assertRaises(NotImplementedError, policy.update,
update_actor=None)
self.assertRaises(NotImplementedError, policy.get_action,
obs=None, context=None, apply_noise=None,
random_actions=None)
self.assertRaises(NotImplementedError, policy.store_transition,
obs0=None, context0=None, action=None, reward=None,
obs1=None, context1=None, done=None,
is_final_step=None, evaluate=False)
self.assertRaises(NotImplementedError, policy.get_td_map)
def test_init_assertions(self):
"""Test the assertions in the __init__ methods.
This tests the following cases:
1. the required model_params are not specified
2. the required conv-related model_params are not specified
3. the model_type is not an applicable one.
"""
# test case 1
policy_params = self.policy_params.copy()
model_type = policy_params["model_params"]["model_type"]
layers = policy_params["model_params"]["layers"]
del policy_params["model_params"]["model_type"]
del policy_params["model_params"]["layers"]
self.assertRaises(AssertionError, ActorCriticPolicy, **policy_params)
# Undo changes.
policy_params["model_params"]["model_type"] = model_type
policy_params["model_params"]["layers"] = layers
# test case 2
policy_params = policy_params.copy()
policy_params["model_params"]["model_type"] = "conv"
strides = policy_params["model_params"]["strides"]
filters = policy_params["model_params"]["filters"]
del policy_params["model_params"]["strides"]
del policy_params["model_params"]["filters"]
self.assertRaises(AssertionError, ActorCriticPolicy, **policy_params)
# Undo changes.
policy_params["model_params"]["strides"] = strides
policy_params["model_params"]["filters"] = filters
# test case 3
policy_params = self.policy_params.copy()
policy_params["model_params"]["model_type"] = "blank"
self.assertRaises(AssertionError, ActorCriticPolicy, **policy_params)
# Undo changes.
policy_params["model_params"]["model_type"] = "fcnet"
def test_get_obs(self):
"""Check the functionality of the _get_obs() method.
This method is tested for three cases:
1. when the context is None
2. for 1-D observations and contexts
3. for 2-D observations and contexts
"""
policy = ActorCriticPolicy(**self.policy_params)
# test case 1
obs = np.array([0, 1, 2])
context = None
expected = obs
np.testing.assert_almost_equal(policy._get_obs(obs, context), expected)
# test case 2
obs = np.array([0, 1, 2])
context = np.array([3, 4])
expected = np.array([0, 1, 2, 3, 4])
np.testing.assert_almost_equal(policy._get_obs(obs, context), expected)
# test case 3
obs = np.array([[0, 1, 2]])
context = np.array([[3, 4]])
expected = np.array([[0, 1, 2, 3, 4]])
np.testing.assert_almost_equal(policy._get_obs(obs, context, axis=1),
expected)
def test_get_ob_dim(self):
"""Check the functionality of the _get_ob_dim() method.
This method is tested for two cases:
1. when the context is None
2. when the context is not None
"""
policy = ActorCriticPolicy(**self.policy_params)
# test case 1
ob_space = Box(0, 1, shape=(2,))
co_space = None
self.assertTupleEqual(policy._get_ob_dim(ob_space, co_space), (2,))
# test case 2
ob_space = Box(0, 1, shape=(2,))
co_space = Box(0, 1, shape=(3,))
self.assertTupleEqual(policy._get_ob_dim(ob_space, co_space), (5,))
def test_setup_target_updates(self):
"""Check the functionality of the _setup_target_updates() method.
This test validates both the init and soft update procedures generated
by the tested method.
"""
policy = ActorCriticPolicy(**self.policy_params)
_ = tf.Variable(initial_value=[[1, 1, 1, 1]], dtype=tf.float32,
name="0")
val1 = tf.Variable(initial_value=[[0, 0, 0, 0]], dtype=tf.float32,
name="1")
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
init, soft = policy._setup_target_updates("0", "1", None, 0.1, 0)
# test soft update
policy.sess.run(soft)
expected = np.array([[0.1, 0.1, 0.1, 0.1]])
np.testing.assert_almost_equal(policy.sess.run(val1), expected)
# test init update
policy.sess.run(init)
expected = np.array([[1, 1, 1, 1]])
np.testing.assert_almost_equal(policy.sess.run(val1), expected)
class TestImitationLearningPolicy(unittest.TestCase):
"""Test ImitationLearningPolicy in hbaselines/base_policies."""
def setUp(self):
sess = tf.compat.v1.Session()
self.policy_params = {
'sess': sess,
'ac_space': Box(low=-1, high=1, shape=(1,)),
'ob_space': Box(low=-2, high=2, shape=(2,)),
'co_space': Box(low=-3, high=3, shape=(3,)),
'verbose': 0,
}
self.policy_params.update({
"buffer_size": 200000,
"batch_size": 128,
"learning_rate": 3e-4,
"layer_norm": False,
"layers": [256, 256],
"act_fun": tf.nn.relu,
"use_huber": False,
"stochastic": False
})
def tearDown(self):
self.policy_params['sess'].close()
del self.policy_params
def test_init(self):
"""Validate that the variables are initialized properly."""
policy = ImitationLearningPolicy(**self.policy_params)
# Check that the abstract class has all the required attributes.
self.assertEqual(policy.sess, self.policy_params['sess'])
self.assertEqual(policy.ob_space, self.policy_params['ob_space'])
self.assertEqual(policy.ac_space, self.policy_params['ac_space'])
self.assertEqual(policy.co_space, self.policy_params['co_space'])
self.assertEqual(policy.buffer_size, self.policy_params['buffer_size'])
self.assertEqual(policy.batch_size, self.policy_params['batch_size'])
self.assertEqual(policy.learning_rate,
self.policy_params['learning_rate'])
self.assertEqual(policy.verbose, self.policy_params['verbose'])
self.assertEqual(policy.layer_norm, self.policy_params['layer_norm'])
self.assertEqual(policy.layers, self.policy_params['layers'])
self.assertEqual(policy.act_fun, self.policy_params['act_fun'])
self.assertEqual(policy.use_huber, self.policy_params['use_huber'])
self.assertEqual(policy.stochastic, self.policy_params['stochastic'])
# Check that the abstract class has all the required methods.
self.assertRaises(NotImplementedError, policy.update)
self.assertRaises(NotImplementedError, policy.get_action,
obs=None, context=None)
self.assertRaises(NotImplementedError, policy.store_transition,
obs0=None, context0=None, action=None, obs1=None,
context1=None)
self.assertRaises(NotImplementedError, policy.get_td_map)
def test_get_obs(self):
"""Check the functionality of the _get_obs() method.
This method is tested for three cases:
1. when the context is None
2. for 1-D observations and contexts
3. for 2-D observations and contexts
"""
policy = ImitationLearningPolicy(**self.policy_params)
# test case 1
obs =
|
np.array([0, 1, 2])
|
numpy.array
|
import cv2
import numpy as np
def find_contours(heatmap, threshold=None, dilation=True, erosion=False):
"""
Find and sort text line contour based on score link image
@Parameters:
- heatmap: score link heatmap image
- threshold: threshold method, choices=[otsu, adaptive, simple]
- dilate: whether or not to use dilation
@Returns:
- contours: list of contours
- contour_index: contour sort index
"""
# Convert to grayscale
gray = heatmap # cv2.cvtColor(heatmap, cv2.COLOR_RGB2GRAY)
# gray = cv2.GaussianBlur(gray, (5,5), 0)
height, width = gray.shape[:2]
# Threshold
thresh = gray
if threshold == "otsu":
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
elif threshold == "adaptive":
thresh = cv2.adaptiveThreshold(
gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2
)
elif threshold == "simple":
# 180 -> 127
thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)[1]
# kernel = np.ones((3, 1), np.uint8)
# thresh = cv2.erode(thresh, kernel, iterations=1)
# Dilate
dilate = thresh
if dilation:
# width // 50
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (width // 50, 1))
dilate = cv2.dilate(thresh, kernel, iterations=3)
# Erode
erode = dilate
if erosion:
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 1))
erode = cv2.erode(dilate, kernel, iterations=1)
# Find and sort contour
contours = cv2.findContours(erode, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
contours = [c.squeeze() for c in contours if len(c) > 2]
contour_left = []
for c in contours:
index = np.argsort(c[:, 0])
contour_left.append(c[index[0], 1])
contour_index = np.argsort(contour_left)
return contours, contour_index
def arrange_boxes(boxes, contours, contour_index, ratio, ratio_net=2):
"""
Arrange word bounding boxes to lines based on contour
@Parameters:
- boxes: array of word bounding boxes
- contours: list of contours
- contour_index: contour sorting index
- ratio: image resize ratio
- ratio_net: CRAFT resize ratio, default=2
@Returns:
- lines: line index of each bounding box
"""
# Calculate distance from each box center to each contour
centers = (boxes[:, 0:2] + boxes[:, 4:6]) // (2 * ratio * ratio_net)
distances = np.zeros((len(contours), len(boxes)))
for idx, c_idx in enumerate(contour_index):
c = contours[c_idx]
distances[idx, :] = p_poly_dist(centers, c)
line_idx = np.argmin(distances, axis=0)
# Sorting boxes on the same line
idx = np.argsort(line_idx)
boxes = boxes[idx]
_, count = np.unique(line_idx, return_counts=True)
start = 0
lines = np.zeros(boxes.shape[0], dtype=np.int)
for i, c in enumerate(count):
# Get boxes on the same line
box_line = boxes[start : start + c]
# Sorting in order of increasing x
idx = np.argsort(box_line[:, 0])
box_line = box_line[idx]
# Update boxes and move to next line
boxes[start : start + c] = box_line
lines[start : start + c] = i
start += c
return boxes, lines
def p_poly_dist(p, poly):
"""
Calculate distance from a list of points to a polygon
@Parameters:
- p: array of points [x,y]
- poly: polygon, array of points [x,y]
@Returns:
- d: distance from each point in p to polygon poly
Algorithm:
https://www.mathworks.com/matlabcentral/fileexchange/12744-distance-from-points-to-polyline-or-polygon
"""
# Polygon must have at least 3 points
assert len(poly) > 2
# Check if poly is closed, if not then close it
if ~(poly[0] == poly[-1]).all():
poly = np.vstack((poly, poly[0]))
# Get number of point and number of vertices
Np = len(p)
Nv = len(poly)
# Calculate distance from each point to polygon vertices
dpv = np.hypot(
np.tile(np.expand_dims(p[:, 0], axis=1), [1, Nv])
- np.tile(poly[:, 0].T, [Np, 1]),
np.tile(np.expand_dims(p[:, 1], axis=1), [1, Nv])
- np.tile(poly[:, 1].T, [Np, 1]),
)
# Find closest vertex
dpv_min = np.amin(dpv, axis=1)
I_dpv_min = np.argmin(dpv, axis=1)
# coordinate of consecutive vertices
P1 = poly[:-1, :]
P2 = poly[1:, :]
dv = P2 - P1
# distance between consecutive vertices
vds = np.hypot(dv[:, 0], dv[:, 1])
# Find rotation matrix
ctheta = dv[:, 0] / vds
stheta = dv[:, 1] / vds
Cer = np.zeros((2, 2, Nv - 1))
Cer[0, 0, :] = ctheta
Cer[0, 1, :] = stheta
Cer[1, 0, :] = -stheta
Cer[1, 1, :] = ctheta
# rotate P1 vector
P1r = np.array(
[P1[:, 0] * ctheta + P1[:, 1] * stheta, -P1[:, 0] * stheta + P1[:, 1] * ctheta]
).T
# compute points new coordinate: rotation -> translation
Cer21 = Cer[0, :, :]
Cer22 = Cer[1, :, :]
Pp1 =
|
np.zeros((2, Np, Nv - 1))
|
numpy.zeros
|
#!/usr/bin/env python2.7
import rospy
import cv2
import numpy as np
from sensor_msgs.msg import CompressedImage, PointCloud2
from tf2_geometry_msgs import PoseStamped
from geometry_msgs.msg import Quaternion
import sensor_msgs.point_cloud2 as pc2
import tf2_ros
import tf
# This scirpt is still at developing... by <NAME>
global kinect2
class pointcloud():
def __init__(self, cloud_topic):
rospy.Subscriber(cloud_topic, PointCloud2, self.cb)
def cb(self, msg):
self.data = msg
def edges_map(heights, th=0.05, display=False):
heights = np.abs(heights.reshape(heights.shape[0:2]))
height_sh = heights.shape
# Calculate the horizinal and vertical edges and sum them
edges_map = (np.abs(np.concatenate((np.diff(heights, axis=0),
np.zeros((1, height_sh[1]))), axis=0)) +
np.abs(np.concatenate((
|
np.diff(heights, axis=1)
|
numpy.diff
|
import difflib
import functools
import operator
import sys
from functools import reduce
from itertools import islice
import numpy as np
from .misc import indent
__all__ = ['fixed_width_indent', 'diff_values', 'report_diff_values',
'where_not_allclose']
# Smaller default shift-width for indent
fixed_width_indent = functools.partial(indent, width=2)
def diff_values(a, b, rtol=0.0, atol=0.0):
"""
Diff two scalar values. If both values are floats, they are compared to
within the given absolute and relative tolerance.
Parameters
----------
a, b : int, float, str
Scalar values to compare.
rtol, atol : float
Relative and absolute tolerances as accepted by
:func:`numpy.allclose`.
Returns
-------
is_different : bool
`True` if they are different, else `False`.
"""
if isinstance(a, float) and isinstance(b, float):
if np.isnan(a) and np.isnan(b):
return False
return not np.allclose(a, b, rtol=rtol, atol=atol)
else:
return a != b
def report_diff_values(a, b, fileobj=sys.stdout, indent_width=0):
"""
Write a diff report between two values to the specified file-like object.
Parameters
----------
a, b
Values to compare. Anything that can be turned into strings
and compared using :py:mod:`difflib` should work.
fileobj : obj
File-like object to write to.
The default is ``sys.stdout``, which writes to terminal.
indent_width : int
Character column(s) to indent.
Returns
-------
identical : bool
`True` if no diff, else `False`.
"""
typea = type(a)
typeb = type(b)
if (isinstance(a, str) and not isinstance(b, str)):
a = repr(a).lstrip('u')
elif (isinstance(b, str) and not isinstance(a, str)):
b = repr(b).lstrip('u')
if isinstance(a, (int, float, complex, np.number)):
a = repr(a)
if isinstance(b, (int, float, complex, np.number)):
b = repr(b)
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
diff_indices = np.where(a != b)
# NOTE: Two 5x5 arrays that are completely different would
# report num_diffs of 625 (25 * 25).
num_diffs = reduce(operator.mul, map(len, diff_indices), 1)
for idx in islice(zip(*diff_indices), 3):
fileobj.write(
fixed_width_indent(' at {!r}:\n'.format(list(idx)),
indent_width))
report_diff_values(a[idx], b[idx], fileobj=fileobj,
indent_width=indent_width + 1)
if num_diffs > 3:
fileobj.write(fixed_width_indent(
' ...and at {} more indices.\n'.format(num_diffs - 3),
indent_width))
return num_diffs == 0
padding = max(len(typea.__name__), len(typeb.__name__)) + 3
identical = True
for line in difflib.ndiff(str(a).splitlines(), str(b).splitlines()):
if line[0] == '-':
identical = False
line = 'a>' + line[1:]
if typea != typeb:
typename = '(' + typea.__name__ + ') '
line = typename.rjust(padding) + line
elif line[0] == '+':
identical = False
line = 'b>' + line[1:]
if typea != typeb:
typename = '(' + typeb.__name__ + ') '
line = typename.rjust(padding) + line
else:
line = ' ' + line
if typea != typeb:
line = ' ' * padding + line
fileobj.write(fixed_width_indent(
' {}\n'.format(line.rstrip('\n')), indent_width))
return identical
def where_not_allclose(a, b, rtol=1e-5, atol=1e-8):
"""
A version of :func:`numpy.allclose` that returns the indices
where the two arrays differ, instead of just a boolean value.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol, atol : float
Relative and absolute tolerances as accepted by
:func:`numpy.allclose`.
Returns
-------
idx : tuple of arrays
Indices where the two arrays differ.
"""
# Create fixed mask arrays to handle INF and NaN; currently INF and NaN
# are handled as equivalent
if not np.all(np.isfinite(a)):
a = np.ma.fix_invalid(a).data
if not np.all(np.isfinite(b)):
b = np.ma.fix_invalid(b).data
if atol == 0.0 and rtol == 0.0:
# Use a faster comparison for the most simple (and common) case
return
|
np.where(a != b)
|
numpy.where
|
# --------------------------------------------------------
# P2ORM: Formulation, Inference & Application
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import cv2
import sys
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import itertools
import time
from PIL import Image
from scipy import pi, ndimage
from .edge_nms import *
sys.path.append('../..')
from math import atan, tan
PI = 3.1416
# ===================================== functions for dataset generation ============================================= #
def gen_occ_order(K, depth, label_map, invalid_mask, ROI_sz, thr_depth, normal=None, lbl_type='mask',
depth_avg=False, dataset='interiornet', thr_pix=False, fast_abs_diff=False):
"""
convert depth to pixel-wise occ edge and pairwise occ order with givendepth , then corrected
by normal map and instance mask edge(optional)
:param K: current image camera intrinsic
:param depth: Euclidean distance between camera center and relevant pixel's 3D point
:param label_map: instance mask or edge mask which indicates image edge
:param invalid_mask: invalid raw data mask; [valid:0, invalid:1]
:param ROI_sz: size of region to determine occlusion order, default=3
:param lbl_type: ['edge'|'mask']: labeled occlusion edge or semantic mask
:param thr_depth: neighbor pixels depth difference rate (depth_diff / pixel_dist) thresh to detect occlusion
:param depth_avg: whether use average depth over one pixel's neighbor as pixel depth
:param dataset: dataset name, for dataset-specific pre-processing
:param thr_pix: whether use pixel-wise discontinuity threshold
:return occ_label: [H, W, (1(edge) + 8(order))]
"""
# pre-process depth, normal, label
H, W = depth.shape
padding = 2 # padding for depth
depth_pad_2 = cv2.copyMakeBorder(depth, padding, padding, padding, padding, cv2.BORDER_REPLICATE) # H+2,W+2
invalid_mask_pad = cv2.copyMakeBorder(invalid_mask, padding, padding, padding, padding, cv2.BORDER_REPLICATE) # H+2,W+2
if normal is not None:
if normal.dtype == np.uint16: # [0,65535] => [-1, 1]
normal = normal.astype(np.float32) / 65535.0 * 2 - 1.0 # H,W,3
normal[:, :, 1] = -normal[:, :, 1] # y-down => y-up, interiornet case
normal[:, :, 2] = -normal[:, :, 2] # z-in => z-out
normal_pad = cv2.copyMakeBorder(normal, 1, 1, 1, 1, cv2.BORDER_REPLICATE)
if lbl_type == 'edge': # occ edge map
edge_mask = label_map
edge_mask_pad = cv2.copyMakeBorder(edge_mask, 1, 1, 1, 1, cv2.BORDER_REPLICATE)
# init for ray-casting method
occ_edge = np.zeros(depth.shape[:2]) # 2-pix width occ edge (fg+bg)
occ_label = np.zeros((depth.shape[0], depth.shape[1], 9)) # occ edge + occ order (-1,0,1) w.r.t. 8 neighbors
occ_label_tmp = np.zeros((depth.shape[0], depth.shape[1], 9))
diff_abs_depth = np.zeros((depth.shape[0], depth.shape[1], 8)) # depth diff w.r.t 8 neighbors
diff_adj_depth = np.zeros((depth.shape[0], depth.shape[1], 8)) # adjusted depth diff (mid-pix ray) w.r.t 8 neighbors
shifts_pix = [[-1, -1], [-1, 0], [-1, 1],
[0, -1], [0, 1],
[1, -1], [1, 0], [1, 1]]
shift_midpix = np.array([[-0.5, -0.5], [-0.5, 0.0], [-0.5, 0.5],
[0.0, -0.5], [0.0, 0.5],
[0.5, -0.5], [0.5, 0.0], [0.5, 0.5]]) # shift from center pix to mid pix
origin = np.zeros((8, 3)) # I0 of rays, namely camera optical center
depth_err_map = np.zeros(depth.shape[:2]) # estimated GT depth error (only for real dataset)
# thr_depth_const = thr_depth
# firstly check absolute depth diff (avoid ROI op, check)
depth_pad_1 = depth_pad_2[1:-1, 1:-1] # H+1,W+1
for idx, shift_pix in enumerate(shifts_pix):
shift_h, shift_w = shift_pix
pix_dist = 1.414 if idx in [0, 2, 5, 7] else 1. # distance between neighbor pixels
depth_diff = (depth_pad_1[1 + shift_h:H + 1 + shift_h, 1 + shift_w:W + 1 + shift_w] - depth) / pix_dist # H,W
diff_abs_depth[:, :, idx] = depth_diff
occ_label_tmp[depth_diff > thr_depth, idx + 1] = 1. # fg
occ_label_tmp[depth_diff < -thr_depth, idx + 1] = -1. # bg
occ_exist_bool = np.any((occ_label_tmp != 0), axis=2) # H,W
if fast_abs_diff: # fast mode using only absolute depth difference as ablation study
occ_edge[occ_exist_bool] = 1.0
occ_label = occ_label_tmp
occ_label[occ_exist_bool, 0] = 1.0
return occ_edge, occ_label, diff_abs_depth, diff_adj_depth, depth_err_map
# gen occ order for each pixel over the image
for y_idx in range(0, depth.shape[0]):
for x_idx in range(0, depth.shape[1]):
if invalid_mask[y_idx, x_idx] == 1: continue # skip pixel
if occ_exist_bool[y_idx, x_idx] != 1: continue
ROI_depth_L = np.copy(depth_pad_2[y_idx:(y_idx + ROI_sz + padding), x_idx:(x_idx + ROI_sz + padding)])
ROI_invalid_L = np.copy(invalid_mask_pad[y_idx:(y_idx + ROI_sz + padding), x_idx:(x_idx + ROI_sz + padding)])
# ============================= special pre-processing for dataset ======================================= #
if dataset in ['interiornet', 'scenenet']:
if ROI_depth_L.min() == 0.0: # inf depth
ROI_depth_L[ROI_depth_L != 0.] = ROI_depth_L.max() # rm depth edge problem
ROI_depth_L[ROI_depth_L == 0.] = 65535.0 # max depth for inf depth
elif dataset == 'ibims':
if ROI_depth_L[2, 2] == 0:
continue # invalid center pixel, skip
else:
if thr_pix: # cal curr pixel depth discontinuity thresh
eta_d_ibims = 0.002 # depth angular err for ibims-1 dataset
err_d_ibims = 1. # depth translational err for ibims-1 dataset
center_2D = np.array([y_idx + 0.5, x_idx + 0.5], dtype=np.float32) # 2,
neighbors_2D = center_2D + 2. * shift_midpix # 8,2
ROI_2D = np.insert(neighbors_2D, int((ROI_sz ** 2 - 1) / 2),
center_2D, axis=0).astype(np.float32) # 9,2
center_ray = np.array([center_2D[1] - K[0, 2], K[1, 2] - center_2D[0], -K[0, 0]]) # 3,
# center_ray_unit = center_ray / np.linalg.norm(center_ray) # 3,
ROI_rays = np.stack((ROI_2D[:, 1] - K[0, 2],
K[1, 2] - ROI_2D[:, 0],
-K[0, 0].repeat(9)), axis=1) # 9,3
ROI_rays_unit = ROI_rays / np.linalg.norm(ROI_rays, axis=1).reshape(-1, 1) # 9,3
ROI_normal = np.copy(normal_pad[y_idx:(y_idx + ROI_sz), x_idx:(x_idx + ROI_sz), :]).reshape(-1, 3) # 3,3,3 => 9,3
ROI_normal_unit = ROI_normal / np.linalg.norm(ROI_normal, axis=1).reshape(-1, 1) # 9,3
center_normal = np.copy(normal_pad[y_idx+1, x_idx+1, :]) # 3,
center_normal_unit = center_normal / np.linalg.norm(center_normal)
# gazing angle between surface and line of sight
# gamma = np.arccos(np.sum(center_ray_unit * center_normal_unit)) - PI / 2
gamma_roi = np.arccos(np.sum(ROI_rays_unit * ROI_normal_unit, axis=1)) - PI / 2 # 9,
# if np.any(gamma_roi <= eta_d_ibims): continue # condition for depth err caused by angular err
tan_gamma = np.minimum(np.tan(gamma_roi), 1.) # consider possible normal estimation err
tan_gamma = np.maximum(tan_gamma, 0.0001) # required: tan(gamma) >> tan(err_d_ibims)
depth_err = eta_d_ibims / tan_gamma * ROI_depth_L[2, 2] + err_d_ibims
thr_depth = 25. + depth_err[4] + np.delete(depth_err, 4) # 8,
depth_err_map[y_idx, x_idx] = depth_err[4]
# guess zero-value neighbor depth by 3x3 average depth
if np.any(ROI_depth_L[1:-1, 1:-1] == 0):
for y in range(0, ROI_sz):
for x in range(0, ROI_sz):
if ROI_depth_L[y+1, x+1] == 0.:
ROI_depth_valid = ROI_depth_L[y:y + ROI_sz, x:x + ROI_sz]
ROI_depth_valid = ROI_depth_valid[ROI_depth_valid != 0]
ROI_depth_L[y+1, x+1] = ROI_depth_valid.mean()
# ======================================================================================================== #
ROI_depth = np.zeros((ROI_sz, ROI_sz))
if depth_avg: # avg each pixel depth in ROI
for y in range(0, ROI_sz):
for x in range(0, ROI_sz):
ROI_depth[y, x] = np.mean(ROI_depth_L[y:y + ROI_sz, x:x + ROI_sz])
else:
ROI_depth = ROI_depth_L[1:-1, 1:-1] # 3x3
ROI_invalid = ROI_invalid_L[1:-1, 1:-1] # 3x3
# pixel idx in flat vector and its relevant location in connectivity-8 neighborhood
# 0 1 2
# 3 4
# 5 6 7
center_depth = ROI_depth[int((ROI_sz - 1) / 2), int((ROI_sz - 1) / 2)]
ROI_depth_flat = ROI_depth.flatten()
neighbors_depth_flat = np.delete(ROI_depth_flat, (ROI_sz * ROI_sz - 1) / 2) # 8,
ROI_invalid_flat = ROI_invalid.flatten()
neighbors_invalid_flat = np.delete(ROI_invalid_flat, (ROI_sz * ROI_sz - 1) / 2) # 8,
ROI_depth_diff = ROI_depth - center_depth # cal abs depth diff
ROI_depth_diff_flat = ROI_depth_diff.flatten() # row-wise flatten
ROI_depth_diff_flat = np.delete(ROI_depth_diff_flat, (ROI_sz * ROI_sz - 1) / 2) # 8,
ROI_depth_diff_flat[[0, 2, 5, 7]] = ROI_depth_diff_flat[[0, 2, 5, 7]] / 1.414 # for diagonal neighbors
gen_occ_lbl = False
if lbl_type == 'edge' and edge_mask[y_idx, x_idx] == 1:
gen_occ_lbl = True
elif lbl_type == 'mask' and np.any(np.abs(ROI_depth_diff).max() > thr_depth):
gen_occ_lbl = True
if gen_occ_lbl: # gen occ edge/order
# ======================= cal relevant discontinuities if normal is available ======================== #
if normal is not None:
ROI_normal = np.copy(normal_pad[y_idx:(y_idx + ROI_sz), x_idx:(x_idx + ROI_sz), :]).reshape(-1, 3) # 3,3,3 => 9,3
center_normal = ROI_normal[int((ROI_sz ** 2 - 1) / 2), :] # 3,
neighbors_normal = np.delete(ROI_normal, int((ROI_sz ** 2 - 1) / 2), axis=0) # 8,3
# gen relevant pixels coordinates on image plane
center_2D = np.array([y_idx + 0.5, x_idx + 0.5], dtype=np.float32)
mid_2D = center_2D + shift_midpix # 8,2
neighbors_2D = center_2D + 2. * shift_midpix # 8,2
ROI_2D = np.insert(neighbors_2D, int((ROI_sz ** 2 - 1) / 2),
center_2D, axis=0).astype(np.float32) # 9,2
# gen rays from camera center to pixels/middle-pixels
mid_pix_rays = np.stack((mid_2D[:, 1] - K[0, 2],
K[1, 2] - mid_2D[:, 0],
-K[0, 0].repeat(8)), axis=1) # 8,3
ROI_rays = np.stack((ROI_2D[:, 1] - K[0, 2],
K[1, 2] - ROI_2D[:, 0],
-K[0, 0].repeat(9)), axis=1) # 9,3
ROI_rays_unit = ROI_rays / np.linalg.norm(ROI_rays, axis=1).reshape(-1, 1) # 9,3
# gen 3D points coordinates w.r.t. 2D ROI pixels
ROI_3D = ROI_rays_unit * ROI_depth.reshape(-1, 1) # 9,3
center_3D = ROI_3D[int((ROI_sz ** 2 - 1) / 2), :] # 3,
neighbors_3D = np.delete(ROI_3D, int((ROI_sz ** 2 - 1) / 2), axis=0) # rm center; 8,3
# cal intersected points between mid pix rays and local tangent planes in 3D
pts_midray_centerplane = insect_line_plane_3d_batch(origin, mid_pix_rays,
center_3D, center_normal) # 8,3
pts_midray_neighborplanes = insect_line_plane_3d_batch(origin, mid_pix_rays,
neighbors_3D, neighbors_normal) # 8,3
# ignore case where lines are parallel to planes
pts_midray_centerplane[np.isnan(pts_midray_centerplane)] = 0.
pts_midray_neighborplanes[np.isnan(pts_midray_neighborplanes)] = 0.
# cal intersected points between center ray and neighbors local planes
pts_centerray_neighborplanes = insect_line_plane_3d_batch(origin, center_3D, neighbors_3D, neighbors_normal)
# cal intersected point between neighbors rays and center local plane
pts_neighborrays_centerplane = insect_line_plane_3d_batch(origin, neighbors_3D, center_3D, center_normal)
# ignore case where lines are parallel to plane
pts_centerray_neighborplanes[np.isnan(pts_centerray_neighborplanes)] = 0. # 8,
pts_neighborrays_centerplane[
|
np.isnan(pts_neighborrays_centerplane)
|
numpy.isnan
|
"""Generate halfway phase shifted ring.
TODO[Faruk]: I need to revisit and seriously cleanup this script once revision
rush is over.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
OUTDIR = "/home/faruk/gdrive/paper-350_micron/paper_figures/revision-distortions"
DIMS = 900, 900
RADIUS_OUTER = 450
RADIUS_INNER = 150
SEGMENTS_INNER = 25
NR_LAYERS = 11
KAYCUBE_FACTOR = 32
DPI = 192
plt.style.use('dark_background')
# =============================================================================
# Generate coordinates
coords = np.zeros((DIMS[0], DIMS[1], 2))
center = DIMS[0]/2, DIMS[1]/2
for i in range(DIMS[0]):
for j in range(DIMS[1]):
coords[i, j, 0] = i
coords[i, j, 1] = j
coords[:, :, 0] -= center[0]
coords[:, :, 1] -= center[1]
# -----------------------------------------------------------------------------
# Generate ring
mag = np.linalg.norm(coords, axis=-1)
data = np.zeros(DIMS)
data[mag < RADIUS_OUTER] = 200
data[mag < RADIUS_INNER] = 0
# Extend horizontally
DIMS2 = DIMS[0], DIMS[1]+DIMS[1]*2//3
data2 = np.zeros((DIMS2[0], DIMS2[1]))
data2[0:DIMS[0]//2, 0:DIMS[1]] = data[0:DIMS[0]//2, :]
data2[DIMS[0]//2:, DIMS[1]*2//3:] = data[DIMS[0]//2:, :]
# Add notebook texture
lines = np.zeros(data2.shape)
lines[::KAYCUBE_FACTOR, :] = 100
lines[:, ::KAYCUBE_FACTOR] = 100
lines[data2 == 0] = 0
data3 = np.copy(data2)
data3[lines != 0] = 100
# -----------------------------------------------------------------------------
# Compute circumference ratio of equal line segments
circum_ratio = (2 * np.pi * RADIUS_OUTER) / (2 * np.pi * RADIUS_INNER)
SEGMENTS_OUTER = int(SEGMENTS_INNER / circum_ratio)
circum_ratio2 = (2 * np.pi * ((RADIUS_OUTER+RADIUS_INNER)/2)) / (2 * np.pi * RADIUS_INNER)
SEGMENTS_MIDDLE = int((SEGMENTS_INNER + circum_ratio) // 2)
nr_segments_outer = 360 // SEGMENTS_OUTER
nr_segments_inner = 360 // SEGMENTS_INNER
nr_segments_middle = 360 // SEGMENTS_MIDDLE
rhos = np.linspace(RADIUS_INNER, RADIUS_OUTER, NR_LAYERS)
# =============================================================================
# Generate points
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
# =============================================================================
# Deep surface mesh
# =============================================================================
# Part 1
points1 = np.zeros((NR_LAYERS, nr_segments_outer, 2))
phis = np.linspace(np.pi, 2*np.pi, nr_segments_outer)
for k, r in enumerate(rhos):
for i, j in enumerate(phis):
points1[k, i, :] = pol2cart(r, j)
# Adjust point coordinates to array grid coordinates
points1[k, :, :] += center
# Part 2
points2 = np.zeros((NR_LAYERS, nr_segments_inner, 2))
phis =
|
np.linspace(0, np.pi, nr_segments_inner)
|
numpy.linspace
|
import pandas as pd
import numpy as np
import talib
class Indicators(object):
"""
Input: Price DataFrame, Moving average/lookback period and standard deviation multiplier
This function returns a dataframe with 5 columns
Output: Prices, Moving Average, Upper BB, Lower BB and BB Val
"""
def bb(self, l_sym, df_price, time_period, st_dev_u, st_dev_l):
df_bb_u = pd.DataFrame(columns=l_sym, index=df_price.index)
df_bb_m = pd.DataFrame(columns=l_sym, index=df_price.index)
df_bb_l = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_bb_u[sym], df_bb_m[sym], df_bb_l[sym] = talib.BBANDS(np.asarray(df_price[sym]), timeperiod=time_period, nbdevup=st_dev_u, nbdevdn=st_dev_l)
except:
pass
return df_bb_u, df_bb_m, df_bb_l
def ema(self, l_sym, df_price, time_period):
df_ema = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_ema[sym] = talib.EMA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_ema
def ma(self, l_sym, df_price, time_period):
df_ma = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_ma[sym] = talib.MA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_ma
def sma(self, l_sym, df_price, time_period):
df_sma = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_sma[sym] = talib.SMA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_sma
def adx(self, l_sym, df_high, df_low, df_close, time_period):
df_adx = pd.DataFrame(columns=l_sym, index=df_high.index)
for sym in l_sym:
try:
df_adx[sym] = talib.ADX(high=np.asarray(df_high[sym]), low=np.asarray(df_low[sym]), close=np.asarray(df_close[sym]), timeperiod = time_period)
except:
pass
return df_adx
def mom(self, l_sym, df_price, time_period):
df_mom = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_mom[sym] = talib.MOM(np.asarray(df_price[sym]), timeperiod = time_period)
except:
pass
return df_mom
def atr(self, l_sym, df_high, df_low, df_close, time_period):
df_atr = pd.DataFrame(columns=l_sym, index=df_high.index)
for sym in l_sym:
try:
df_atr[sym] = talib.ATR(high=np.asarray(df_high[sym]), low=np.asarray(df_low[sym]), close=
|
np.asarray(df_close[sym])
|
numpy.asarray
|
from networkx.algorithms.assortativity import neighbor_degree
import pandas as pd
import networkx as nx
import numpy as np
from scipy.spatial.distance import cosine
import numpy as np
from copy import deepcopy
from tqdm import tqdm
from sklearn.neighbors import NearestNeighbors
import random
import pickle5 as pickle
import time
import scipy.sparse
import matplotlib.pyplot as plt
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
def difference(start, end, interval):
x = end - start
r = {
'week': int(x / np.timedelta64(1, 'W')),
'fortnight': int(x / np.timedelta64(2, 'W')),
'month': int(x / np.timedelta64(1, 'M'))
}
return r[interval]
def make_hin(X, df, id_feature='GKGRECORDID', date_feature='date_str', theme_feature='Themes', location_feature='Locations', person_feature='Persons', org_feature='Organizations'):
G = nx.Graph()
for index,row in df.iterrows():
node_id = row[id_feature]
# date conversion
date_value = pd.to_datetime(row[date_feature], format='%Y-%m-%d')
node_date = str(date_value.week) + '-' + str(date_value.year)
#node_themes_array = ''
node_locations_array = ''
node_people_array = ''
node_organizations_array = ''
try:
node_themes_array = row[theme_feature].split(';')
except:
node_themes_array = []
try:
node_locations_array = row[location_feature].split(';')
except:
node_locations_array = []
try:
node_people_array = row[person_feature].split(';')
except:
node_people_array = []
try:
node_organizations_array = row[org_feature].split(';')
except:
node_organizations_array = []
# event <-> date
G.add_edge(node_id,node_date,edge_type='event_date', edge_value=date_value)
G.nodes[node_id]['node_type'] = 'event'
G.nodes[node_date]['node_type'] = 'date'
# event <-> theme
for theme in node_themes_array:
if len(theme) > 0:
G.add_edge(node_id,theme,edge_type='event_theme')
G.nodes[theme]['node_type'] = 'theme'
# event <-> locations
for location in node_locations_array:
if len(location) > 0:
G.add_edge(node_id,location,edge_type='event_location')
G.nodes[location]['node_type'] = 'location'
# event <-> persons
for person in node_people_array:
if len(person) > 0:
G.add_edge(node_id,person,edge_type='event_person')
G.nodes[person]['node_type'] = 'person'
# event <-> organization
for org in node_organizations_array:
if len(org) > 0:
G.add_edge(node_id,org,edge_type='event_org')
G.nodes[org]['node_type'] = 'org'
# embedding
G.nodes[node_id]['embedding'] = X[index]
return G
def inner_connections(G, interval='week', embedding_feature='embedding', type_feature='edge_type', desired_type_feature='event_date', value_feature='edge_value', return_type_feature='event_event'):
edges_to_add = []
for node1, neighbor1 in G.edges:
if embedding_feature in G.nodes[node1]:
if G[node1][neighbor1][type_feature] == desired_type_feature:
for node2, neighbor2 in G.edges:
if embedding_feature in G.nodes[node2]:
if G[node2][neighbor2][type_feature] == desired_type_feature:
temp_cosine = cosine(G.nodes[node1][embedding_feature], G.nodes[node2][embedding_feature])
if temp_cosine <= 0.5 and temp_cosine != 0.0:
if abs(difference(G[node1][neighbor1][value_feature], G[node2][neighbor2][value_feature], interval)) <= 3:
edges_to_add.append((node1,node2))
for new_edge in edges_to_add:
G.add_edge(new_edge[0],new_edge[1],edge_type=return_type_feature)
return G
def inner_connections_dateless(G, embedding_feature='embedding', return_type_feature='event_event'):
edges_to_add = []
for node1 in G.nodes():
if embedding_feature in G.nodes[node1]:
for node2 in G.nodes():
if embedding_feature in G.nodes[node2]:
temp_cosine = cosine(G.nodes[node1][embedding_feature], G.nodes[node2][embedding_feature])
if temp_cosine <= 0.5 and temp_cosine != 0.0:
edges_to_add.append((node1,node2))
for new_edge in edges_to_add:
G.add_edge(new_edge[0],new_edge[1],edge_type=return_type_feature)
return G
def is_equal(x, true_feature='true', restored_feature='restored'):
if x[true_feature][0] == x[restored_feature][0] and x[true_feature][1] == x[restored_feature][1]:
return 1
elif x[true_feature][0] == x[restored_feature][1] and x[true_feature][1] == x[restored_feature][0]:
return 1
return 0
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
def get_metric(metric, pred):
if metric == 'acc':
return accuracy_score([1] * pred.shape[0], list(pred))
elif metric == 'precision':
return precision_score([1] * pred.shape[0], list(pred))
elif metric == 'recall':
return recall_score([1] * pred.shape[0], list(pred))
elif metric == 'f1':
return f1_score([1] * pred.shape[0], list(pred))
def disturbed_hin(G, split=0.1, random_state=None, edge_type=['event_date', 'event_event', 'event_location', 'event_person', 'event_org', 'event_theme'], type_feature='edge_type'):
"""
G: hin;
split: percentage to be cut from the hin;
random_state: ;
edge_type: listlike object of types of edges to be cut;
type_feature: feature name of edge_type on your hin.
"""
def keep_left(x, G):
edge_split = x['type'].split('_')
if G.nodes[x['node']]['node_type'] != edge_split[0]:
x['node'], x['neighbor'] = x['neighbor'], x['node']
return x
# prepare data for type counting
edges = list(G.edges)
edge_types = [G[edge[0]][edge[1]][type_feature] for edge in edges]
edges = pd.DataFrame(edges)
edges = edges.rename(columns={0: 'node', 1: 'neighbor'})
edges['type'] = edge_types
edges = edges.apply(keep_left, G=G, axis=1)
edges_group = edges.groupby(by=['type'], as_index=False).count().reset_index(drop=True)
# preparar arestas para eliminar
edges = edges.sample(frac=1, random_state=random_state).reset_index(drop=True)
edges_group = edges_group.rename(columns={'node': 'count', 'neighbor': 'to_cut_count'})
edges_group['to_cut_count'] = edges_group['to_cut_count'].apply(lambda x:round(x * split))
to_cut = {}
for index, row in edges_group.iterrows():
if row['type'] in edge_type:
to_cut[row['type']] = edges[edges['type'] == row['type']].reset_index(drop=True).loc[0:row['to_cut_count']-1]
G_disturbed = deepcopy(G)
for key, tc_df in to_cut.items():
for index, row in tc_df.iterrows():
G_disturbed.remove_edge(row['node'],row['neighbor'])
return G_disturbed, to_cut
def hide_nodes(G, percentual=0.4, random_state=None, node_type=['event'], type_feature='node_type'):
nodes = list(G.nodes)
node_types = [G.nodes[node][type_feature] for node in nodes]
nodes = pd.DataFrame(nodes)
nodes = nodes.rename(columns={0: 'node'})
nodes['type'] = node_types
nodes_group = nodes.groupby(by=['type'], as_index=False).count().reset_index(drop=True)
nodes = nodes.sample(frac=1, random_state=random_state).reset_index(drop=True)
nodes_group = nodes_group.rename(columns={'node': 'count'})
nodes_group['to_cut_count'] = nodes_group['count'].apply(lambda x:round(x * percentual))
to_hide = {}
for index, row in nodes_group.iterrows():
if row['type'] in node_type:
to_hide[row['type']] = nodes[nodes['type'] == row['type']].reset_index(drop=True).loc[0:row['to_cut_count']-1]
G_hidden = deepcopy(G)
for key, th_df in to_hide.items():
neighbors = []
for index, row in th_df.iterrows():
event_neighbors = list(G_hidden.neighbors(row['node']))
neighbors.append(event_neighbors)
for neighbor in event_neighbors:
G_hidden.remove_edge(row['node'],neighbor)
th_df['neighbors'] = neighbors
return G_hidden, to_hide
def find_nodes(G, hidden, percentual=1.0, random_state=None, type_feature='node_type'):
G_found = deepcopy(G)
for key, th_df in hidden.items():
th_df = th_df.sample(frac=1, random_state=random_state).reset_index(drop=True)
adding_df = th_df.loc[0:round(th_df.shape[0] * percentual)-1]
remaining_df = th_df.loc[round(th_df.shape[0] * percentual):th_df.shape[0]-1]
for index, row in adding_df.iterrows():
to_add = row.neighbors
for add in to_add:
edge_type_str = '{}_{}'.format(row['type'], G.nodes[add][type_feature])
G_found.add_edge(row['node'], add, edge_type=edge_type_str)
hidden[key] = remaining_df
return G_found, hidden
def regularization(G, dim=512, embedding_feature: str = 'embedding', iterations=15, mi=0.85):
nodes = []
# inicializando vetor f para todos os nodes
for node in G.nodes():
if 'f' not in G.nodes[node]:
G.nodes[node]['f'] = np.array([0.0]*dim)
elif embedding_feature in G.nodes[node]:
G.nodes[node]['f'] = G.nodes[node][embedding_feature]*1.0
nodes.append(node)
pbar = tqdm(range(0, iterations))
for iteration in pbar:
random.shuffle(nodes)
energy = 0.0
# percorrendo cada node
for node in nodes:
f_new = np.array([0.0]*dim)
f_old = np.array(G.nodes[node]['f'])*1.0
sum_w = 0.0
# percorrendo vizinhos do onde
for neighbor in G.neighbors(node):
w = 1.0
if 'weight' in G[node][neighbor]:
w = G[node][neighbor]['weight']
w /= np.sqrt(G.degree[neighbor])
f_new = f_new + w*G.nodes[neighbor]['f']
sum_w = sum_w + w
if sum_w == 0.0: sum_w = 1.0
f_new /= sum_w
G.nodes[node]['f'] = f_new*1.0
if embedding_feature in G.nodes[node]:
G.nodes[node]['f'] = G.nodes[node][embedding_feature] * \
mi + G.nodes[node]['f']*(1.0-mi)
energy = energy + np.linalg.norm(f_new-f_old)
iteration = iteration + 1
message = 'Iteration '+str(iteration)+' | Energy = '+str(energy)
pbar.set_description(message)
return G
def get_knn_data(G, node, embedding_feature: str = 'f'):
knn_data, knn_nodes = [], []
for node in nx.non_neighbors(G, node):
if embedding_feature in G.nodes[node]:
knn_data.append(G.nodes[node][embedding_feature])
knn_nodes.append(node)
return pd.DataFrame(knn_data), pd.DataFrame(knn_nodes)
def run_knn(k, G_restored, row, knn_data, knn_nodes, node_feature='node', embedding_feature='f'):
if k == -1:
k = knn_data.shape[0]
knn = NearestNeighbors(n_neighbors=k, metric='cosine')
knn.fit(knn_data)
indice = knn.kneighbors(G_restored.nodes[row[node_feature]][embedding_feature].reshape(-1, 512), return_distance=False)
return [knn_nodes[0].iloc[indice[0][i]] for i in range(k)]
from annoy import AnnoyIndex
def run_annoy(k, G_restored, row, knn_data, knn_nodes, node_feature='node', embedding_feature='f', dim=512):
knn = AnnoyIndex(dim, 'angular')
for knn_index, knn_row in knn_data.iterrows():
knn.add_item(knn_index, knn_row)
knn.build(k)
indice = knn.get_nns_by_vector(G_restored.nodes[row[node_feature]][embedding_feature], k, include_distances=False)
return [knn_nodes[0].loc[indice[i]] for i in range(k)]
import multiprocessing
def restore_hin(G, cutted_dict, nn_method='knn', n_jobs=-1, k=-1, node_feature='node', neighbor_feature='neighbor', node_type_feature='node_type', embedding_feature='f'):
def process(start, end, G, nearest_neighbor_selector, key, value, return_dict, thread_id):
value_thread = value.loc[start:(end-1)]
restored_dict_thread = {'true': [], 'restored': [], 'edge_type': []}
for index, row in tqdm(value_thread.iterrows(), total=value_thread.shape[0]):
edge_to_add = key.split('_')
edge_to_add[0] = row[node_feature]
edge_to_add = [row[node_feature] if e == G.nodes[row[node_feature]][node_type_feature] and row[node_feature] != edge_to_add[0] else e for e in edge_to_add]
knn_data, knn_nodes = get_knn_data(G, row[node_feature])
knn_nodes['type'] = knn_nodes[0].apply(lambda x: G.nodes[x][node_type_feature])
knn_data = knn_data[knn_nodes['type'].isin(edge_to_add)]
knn_nodes = knn_nodes[knn_nodes['type'].isin(edge_to_add)]
edge_to_add[1] = nearest_neighbor_selector[nn_method](k, G, row, knn_data, knn_nodes)
restored_dict_thread['true'].append([row[node_feature], row[neighbor_feature]])
restored_dict_thread['restored'].append(edge_to_add)
restored_dict_thread['edge_type'].append(key)
for key in restored_dict_thread.keys():
_key = key + str(thread_id)
return_dict[_key] = (restored_dict_thread[key])
def split_processing(n_jobs, G, nearest_neighbor_selector, key, value, return_dict):
split_size = round(len(value) / n_jobs)
threads = []
for i in range(n_jobs):
# determine the indices of the list this thread will handle
start = i * split_size
# special case on the last chunk to account for uneven splits
end = len(value) if i+1 == n_jobs else (i+1) * split_size
# create the thread
threads.append(
multiprocessing.Process(target=process, args=(start, end, G, nearest_neighbor_selector, key, value, return_dict, i)))
threads[-1].start() # start the thread we just created
# wait for all threads to finish
for t in threads:
t.join()
if n_jobs == -1:
n_jobs = multiprocessing.cpu_count()
restored_dict = {'true': [], 'restored': [], 'edge_type': []}
return_dict = multiprocessing.Manager().dict()
nearest_neighbor_selector = {
'knn': run_knn,
'annoy': run_annoy
}
for key, value in cutted_dict.items():
split_processing(n_jobs, G, nearest_neighbor_selector, key, value, return_dict)
return_dict = dict(return_dict)
for thread_key in restored_dict.keys():
for job in range(n_jobs):
for res in return_dict[thread_key + str(job)]:
restored_dict[thread_key].append(res)
return pd.DataFrame(restored_dict)
def restore_edges(G, restored):
G_restored = deepcopy(G)
for idx, restored in enumerate(restored.restored.to_list()):
G_restored.add_edge(restored[0],restored[1][0], edge_type=restored.edge_type.to_list()[idx])
return G_restored
# put embeddings on graph
def embedding_graph(G, embeddings, embedding_feature='f'):
for key, value in embeddings.items():
G.nodes[key][embedding_feature] = value
return G
def masked_softmax_cross_entropy(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_accuracy(preds, labels, mask):
"""Accuracy with masking."""
correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
accuracy_all *= mask
return tf.reduce_mean(accuracy_all)
import layers.graph as lg
import utils.sparse as us
def gcn(G, target, i, split, label_feature='node_type', label_number='type_code', embedding_feature='f'):
node_list = []
for node in G.nodes():
node_list.append(node)
label_codes = {}
for node in node_list:
if label_feature in G.nodes[node]:
label = G.nodes[node][label_feature]
if label not in label_codes:
label_codes[label] = len(label_codes)
G.nodes[node][label_number] = label_codes[label]
else:
G.nodes[node][label_number] = -1
else:
G.nodes[node][label_number] = -1
adj = nx.adj_matrix(G,nodelist=node_list)
# Get important parameters of adjacency matrix
n_nodes = adj.shape[0]
# Some preprocessing
adj_tilde = adj + np.identity(n=adj.shape[0])
d_tilde_diag = np.squeeze(np.sum(np.array(adj_tilde), axis=1))
d_tilde_inv_sqrt_diag = np.power(d_tilde_diag, -1/2)
d_tilde_inv_sqrt = np.diag(d_tilde_inv_sqrt_diag)
adj_norm = np.dot(np.dot(d_tilde_inv_sqrt, adj_tilde), d_tilde_inv_sqrt)
adj_norm_tuple = us.sparse_to_tuple(scipy.sparse.coo_matrix(adj_norm))
# Features are just the identity matrix
feat_x = np.identity(n=adj.shape[0])
feat_x_tuple = us.sparse_to_tuple(scipy.sparse.coo_matrix(feat_x))
# Preparing train data
memberships = [m for m in nx.get_node_attributes(G, label_number).values()]
nb_classes = len(set(memberships))
targets = np.array([memberships], dtype=np.int32).reshape(-1)
one_hot_targets = np.eye(nb_classes)[targets]
labels_to_keep = [i for i in range(len(node_list))]
y_train = np.zeros(shape=one_hot_targets.shape,
dtype=np.float32)
train_mask = np.zeros(shape=(n_nodes,), dtype=np.bool)
for l in labels_to_keep:
y_train[l, :] = one_hot_targets[l, :]
train_mask[l] = True
# TensorFlow placeholders
ph = {
'adj_norm': tf.sparse_placeholder(tf.float32, name="adj_mat"),
'x': tf.sparse_placeholder(tf.float32, name="x"),
'labels': tf.placeholder(tf.float32, shape=(n_nodes, nb_classes)),
'mask': tf.placeholder(tf.int32)}
l_sizes = [1024, 1024, 512, nb_classes]
name_text = str(target) + '_' + str(i) + '_' + str(split)
o_fc1 = lg.GraphConvLayer(
input_dim=feat_x.shape[-1],
output_dim=l_sizes[0],
name='fc1_'+name_text,
activation=tf.nn.tanh)(adj_norm=ph['adj_norm'], x=ph['x'], sparse=True)
o_fc2 = lg.GraphConvLayer(
input_dim=l_sizes[0],
output_dim=l_sizes[1],
name='fc2_'+name_text,
activation=tf.nn.tanh)(adj_norm=ph['adj_norm'], x=o_fc1)
o_fc3 = lg.GraphConvLayer(
input_dim=l_sizes[1],
output_dim=l_sizes[2],
name='fc3_'+name_text,
activation=tf.nn.tanh)(adj_norm=ph['adj_norm'], x=o_fc2)
o_fc4 = lg.GraphConvLayer(
input_dim=l_sizes[2],
output_dim=l_sizes[3],
name='fc4_'+name_text,
activation=tf.identity)(adj_norm=ph['adj_norm'], x=o_fc3)
with tf.name_scope('optimizer'):
loss = masked_softmax_cross_entropy(preds=o_fc4, labels=ph['labels'], mask=ph['mask'])
accuracy = masked_accuracy(preds=o_fc4, labels=ph['labels'], mask=ph['mask'])
optimizer = tf.train.AdamOptimizer(learning_rate=1e-2)
opt_op = optimizer.minimize(loss)
feed_dict_train = {ph['adj_norm']: adj_norm_tuple,
ph['x']: feat_x_tuple,
ph['labels']: y_train,
ph['mask']: train_mask}
sess = tf.Session()
sess.run(tf.global_variables_initializer())
epochs = 20
save_every = 50
t = time.time()
embedding_out = []
# Train model
for epoch in range(epochs):
_, train_loss, train_acc = sess.run(
(opt_op, loss, accuracy), feed_dict=feed_dict_train)
if True:
val_loss, val_acc = sess.run((loss, accuracy), feed_dict=feed_dict_train)
# # Print results
# #print("Epoch:", '%04d' % (epoch + 1),
# "train_loss=", "{:.5f}".format(train_loss),
# "time=", "{:.5f}".format(time.time() - t))
feed_dict_output = {ph['adj_norm']: adj_norm_tuple,
ph['x']: feat_x_tuple}
embeddings = sess.run(o_fc3, feed_dict=feed_dict_output)
if epoch + 1 == epochs:
embedding_out = embeddings
for idx, node in enumerate(G.nodes()):
G.nodes[node][embedding_feature] = embedding_out[idx]
return G
def date_string(x):
x = str(x)
l = []
l[:0] = x
l.insert(4,'-')
l.insert(7,'-')
l = l[:10]
s = ''
return s.join(l)
from bs4 import BeautifulSoup
def decode_html_text(x):
x = BeautifulSoup(x, 'html.parser')
return x.get_text()
from gensim.models import Word2Vec
from stellargraph.data import UniformRandomMetaPathWalk
from stellargraph import StellarGraph
def metapath2vec(graph, dimensions = 512, num_walks = 1, walk_length = 100, context_window_size = 10,
num_iter = 1, workers = 1, node_type='node_type', edge_type='edge_type',
user_metapaths=[
['event','date','event'],['event','what','event'],['event','where','event'],
['event','who','event'],['event','why','event'],['event','how','event'],
['event','date','event','trend','event'],['event','what','event','trend','event'],
['event','where','event','trend','event'],['event','who','event','trend','event'],
['event','why','event','trend','event'],['event','how','event','trend','event'],
]
):
s_graph = StellarGraph.from_networkx(graph, node_type_attr=node_type, edge_type_attr=edge_type)
rw = UniformRandomMetaPathWalk(s_graph)
walks = rw.run(
s_graph.nodes(), n=num_walks, length=walk_length, metapaths=user_metapaths
)
print(f"Number of random walks: {len(walks)}")
model = Word2Vec(
walks,
size=dimensions,
window=context_window_size,
min_count=0,
sg=1,
workers=workers,
iter=num_iter,
)
def get_embeddings(model, graph):
if model is None:
print("model not train")
return {}
_embeddings = {}
for word in graph.nodes():
try:
_embeddings[word] = model.wv[word]
except:
_embeddings[word] =
|
np.zeros(dimensions)
|
numpy.zeros
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 24 19:16:51 2019
@author: mostafamousavi, @filefolder
last update: 08/05/21
"""
from __future__ import division, print_function
import numpy as np
import h5py
import matplotlib
matplotlib.use('agg')
from tqdm import tqdm
import os
os.environ['KERAS_BACKEND']='tensorflow'
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.layers import add, Activation, LSTM, Conv1D, InputSpec
from tensorflow.keras.layers import MaxPooling1D, UpSampling1D, Cropping1D, SpatialDropout1D, Bidirectional, BatchNormalization
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from obspy.signal.trigger import trigger_onset
import matplotlib
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
class DataGenerator(keras.utils.Sequence):
"""
Keras generator with preprocessing
Parameters
----------
list_IDsx: str
List of trace names.
file_name: str
Name of hdf5 file containing waveforms data.
dim: tuple
Dimension of input traces.
batch_size: int, default=32
Batch size.
n_channels: int, default=3
Number of channels.
phase_window: int, fixed=40
The number of samples (window) around each phaset.
shuffle: bool, default=True
Shuffeling the list.
norm_mode: str, default=max
The mode of normalization, 'max' or 'std'.
label_type: str, default=gaussian
Labeling type: 'gaussian', 'triangle', or 'box'.
augmentation: bool, default=True
If True, half of each batch will be augmented version of the other half.
add_event_r: {float, None}, default=None
Chance for randomly adding a second event into the waveform.
add_gap_r: {float, None}, default=None
Add an interval with zeros into the waveform representing filled gaps.
coda_ratio: {float, 0.4}, default=0.4
% of S-P time to extend event/coda envelope past S pick.
shift_event_r: {float, None}, default=0.9
Rate of augmentation for randomly shifting the event within a trace.
add_noise_r: {float, None}, default=None
Chance for randomly adding Gaussian noise into the waveform.
drop_channe_r: {float, None}, default=None
Chance for randomly dropping some of the channels.
scale_amplitude_r: {float, None}, default=None
Chance for randomly amplifying the waveform amplitude.
pre_emphasis: bool, default=False
If True, waveforms will be pre emphasized.
Returns
--------
Tuple of (X, [y1,y2,y3], [sw1,sw2,sw3]), where:
X = pre-processed waveform as input
y1,y2,y3 = numpy arrays as labels for detection, P, and S respectively
sw1,sw2,sw3 = sample weights for detection, P, and S respectively / e.g. sw1 = [.11,.89] if 11% of the the y1 labels are non-zero
"""
def __init__(self,
list_IDs,
file_name,
dim,
batch_size=32,
n_channels=3,
phase_window= 40,
shuffle = True,
norm_mode = 'max',
label_type = 'gaussian',
augmentation = False,
add_event_r = None,
add_gap_r = None,
coda_ratio = 0.4,
shift_event_r = None,
add_noise_r = None,
drop_channe_r = None,
scale_amplitude_r = None,
pre_emphasis = True):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.phase_window = phase_window
self.list_IDs = list_IDs
self.file_name = file_name
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.norm_mode = norm_mode
self.label_type = label_type
self.augmentation = augmentation
self.add_event_r = add_event_r
self.add_gap_r = add_gap_r
self.coda_ratio = coda_ratio
self.shift_event_r = shift_event_r
self.add_noise_r = add_noise_r
self.drop_channe_r = drop_channe_r
self.scale_amplitude_r = scale_amplitude_r
self.pre_emphasis = pre_emphasis
def __len__(self):
'Denotes the number of batches per epoch'
if self.augmentation:
return 2*int(np.floor(len(self.list_IDs) / self.batch_size))
else:
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
if self.augmentation:
indexes = self.indexes[index*self.batch_size//2:(index+1)*self.batch_size//2]
indexes = np.append(indexes, indexes)
else:
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [self.list_IDs[k] for k in indexes]
X, y1, y2, y3 = self.__data_generation(list_IDs_temp)
cw1 = np.array(np.where(y1==1)).size/y1.size
cw2 = np.array(np.where( y2>0)).size/y2.size
cw3 = np.array(np.where( y3>0)).size/y3.size
class_weights = [[cw1, 1-cw1],[cw2, 1-cw2],[cw3, 1-cw3]]
sample_weights = np.array([y1,y2,y3].copy())
for i,y in enumerate([y1,y2,y3]):
sample_weights[i][np.where(y >0)] = class_weights[i][1]
sample_weights[i][np.where(y==0)] = class_weights[i][0]
return (X, [y1,y2,y3],list(sample_weights))
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def _normalize(self, data, mode = 'max'):
'Normalize waveforms in each batch'
data -= np.mean(data, axis=0, keepdims=True)
if mode == 'max':
max_data = np.max(data, axis=0, keepdims=True)
assert(max_data.shape[-1] == data.shape[-1])
max_data[max_data == 0] = 1
data /= max_data
elif mode == 'std':
std_data = np.std(data, axis=0, keepdims=True)
assert(std_data.shape[-1] == data.shape[-1])
std_data[std_data == 0] = 1
data /= std_data
return data
def _scale_amplitude(self, data, rate):
'Scale amplitude or waveforms'
tmp = np.random.uniform(0, 1)
if tmp < rate:
data *= np.random.uniform(1, 3)
elif tmp < 2*rate:
data /= np.random.uniform(1, 3)
return data
def _drop_channel(self, data, snr, rate):
'Randomly replace values of one or two components to zeros in earthquake data'
if np.random.uniform(0, 1) < rate and all(snr >= 10.0):
data = np.copy(data)
c1 = np.random.choice([0, 1])
c2 = np.random.choice([0, 1])
c3 = np.random.choice([0, 1])
if c1 + c2 + c3 > 0:
data[..., np.array([c1, c2, c3]) == 0] = 0
return data
def _drop_channel_noise(self, data, rate):
'Randomly replace values of one or two components to zeros in noise data'
if np.random.uniform(0, 1) < rate:
data = np.copy(data)
c1 = np.random.choice([0, 1])
c2 = np.random.choice([0, 1])
c3 = np.random.choice([0, 1])
if c1 + c2 + c3 > 0:
data[..., np.array([c1, c2, c3]) == 0] = 0
return data
def _add_gaps(self, data, rate):
'Randomly add gaps (zeros) of different sizes into waveforms'
if np.random.uniform(0, 1) < rate:
data = np.copy(data)
gap_start = np.random.randint(0, 4000)
gap_end = np.random.randint(gap_start, 5900)
data[gap_start:gap_end,:] = 0
return data
def _add_noise(self, data, snr, rate):
'Randomly add Gaussian noise with a random SNR into waveforms'
if np.random.uniform(0, 1) < rate and all(snr >= 6.0): #reduced snr from 10 to 6
data_noisy = np.empty((data.shape))
data_noisy[:, 0] = data[:,0] + np.random.normal(0, np.random.uniform(0, 0.15)*abs(max(data[:,0])), data.shape[0])
data_noisy[:, 1] = data[:,1] + np.random.normal(0, np.random.uniform(0, 0.15)*abs(max(data[:,1])), data.shape[0])
data_noisy[:, 2] = data[:,2] + np.random.normal(0, np.random.uniform(0, 0.15)*abs(max(data[:,2])), data.shape[0])
else:
data_noisy = data
return data_noisy
def _adjust_amplitude_for_multichannels(self, data):
'Adjust the amplitude of multichannel data'
tmp = np.max(np.abs(data), axis=0, keepdims=True)
assert(tmp.shape[-1] == data.shape[-1])
if np.count_nonzero(tmp) > 0:
data *= data.shape[-1] / np.count_nonzero(tmp)
return data
def _trilabel(self, a=0, b=20, c=40):
'Used for triangular labeling'
z = np.linspace(a, c, num = 2*(b-a)+1)
y = np.zeros(z.shape)
y[z <= a] = 0
y[z >= c] = 0
first_half = np.logical_and(a < z, z <= b)
y[first_half] = (z[first_half]-a) / (b-a)
second_half = np.logical_and(b < z, z < c)
y[second_half] = (c-z[second_half]) / (c-b)
return y
def _gausslabel(self,i,y2,y3,spt,sst,a=31):
'Used for gaussian labeling- do both y2 and y3 at once'
dim = y2.shape[1]
g = np.exp(-(np.arange(-a,a+1))**2/(2*(a/3)**2))
if spt:
Lg = abs(min(0,spt-a))
Rg = len(g) - (spt+a-dim+1)
L = max(0,spt-a)
R = min(dim,spt+a+1)
y2[i,L:R,0] = g[Lg:Rg]
if sst:
Lg = abs(min(0,sst-a))
Rg = len(g) - (sst+a-dim+1)
L = max(0,sst-a)
R = min(dim,sst+a+1)
y3[i,L:R,0] = g[Lg:Rg]
return y2,y3
def _add_event(self, data, addp, adds, coda_end, snr, rate):
'Add a scaled version of the event into the empty part of the trace'
added = np.copy(data)
additions = None
spt_secondEV = None
sst_secondEV = None
if addp and adds:
s_p = adds - addp
if np.random.uniform(0, 1) < rate and all(snr>=10.0) and (data.shape[0]-s_p-21-coda_end) > 20:
secondEV_strt = np.random.randint(coda_end, data.shape[0]-s_p-21)
scaleAM = 1/np.random.randint(1, 10)
space = data.shape[0]-secondEV_strt
added[secondEV_strt:secondEV_strt+space, 0] += data[addp:addp+space, 0]*scaleAM
added[secondEV_strt:secondEV_strt+space, 1] += data[addp:addp+space, 1]*scaleAM
added[secondEV_strt:secondEV_strt+space, 2] += data[addp:addp+space, 2]*scaleAM
spt_secondEV = secondEV_strt
if spt_secondEV + s_p + 21 <= data.shape[0]:
sst_secondEV = spt_secondEV + s_p
if spt_secondEV and sst_secondEV:
additions = [spt_secondEV, sst_secondEV]
data = added
return data, additions
def _shift_event_orig(self, data, addp, adds, coda_end, snr, rate):
'Randomly rotate the array to shift the event location'
org_len = len(data)
data2 = np.copy(data)
addp2 = adds2 = coda_end2 = None;
if np.random.uniform(0, 1) < rate:
nrotate = int(np.random.uniform(1, int(org_len - coda_end)))
data2[:, 0] = list(data[:, 0])[-nrotate:] + list(data[:, 0])[:-nrotate]
data2[:, 1] = list(data[:, 1])[-nrotate:] + list(data[:, 1])[:-nrotate]
data2[:, 2] = list(data[:, 2])[-nrotate:] + list(data[:, 2])[:-nrotate]
if addp+nrotate >= 0 and addp+nrotate < org_len:
addp2 = addp+nrotate;
else:
addp2 = None;
if adds+nrotate >= 0 and adds+nrotate < org_len:
adds2 = adds+nrotate;
else:
adds2 = None;
if coda_end+nrotate < org_len:
coda_end2 = coda_end+nrotate
else:
coda_end2 = org_len
if addp2 and adds2:
data = data2;
addp = addp2;
adds = adds2;
coda_end= coda_end2;
return data, addp, adds, coda_end
def _shift_event(self, data, addp, adds, coda_end, snr, rate):
'Randomly rotate the array to shift the event location'
if rate == 1 or np.random.uniform(0, 1) < rate:
org_len = len(data)
nrotate = np.random.randint(0,org_len)
data = np.roll(data,nrotate,axis=0) #shape of data is (6000,3)
#define new values
coda_end = (coda_end+nrotate)%org_len
addp = (addp+nrotate)%org_len
adds = (adds+nrotate)%org_len
#truncate coda_end to end of data where needed & remove S-pick if truncated
if coda_end < addp:
coda_end = org_len
if adds < addp:
coda_end = org_len
adds = None
return data, addp, adds, coda_end
def _pre_emphasis(self, data, pre_emphasis=0.97):
'apply the pre_emphasis'
for ch in range(self.n_channels):
bpf = data[:, ch]
data[:, ch] = np.append(bpf[0], bpf[1:] - pre_emphasis * bpf[:-1])
return data
def __data_generation(self, list_IDs_temp):
'read the waveforms'
X = np.zeros((self.batch_size, self.dim, self.n_channels))
y1 = np.zeros((self.batch_size, self.dim, 1))
y2 = np.zeros((self.batch_size, self.dim, 1))
y3 = np.zeros((self.batch_size, self.dim, 1))
fl = h5py.File(self.file_name, 'r')
# Generate data
for i, ID in enumerate(list_IDs_temp):
additions = None
dataset = fl.get('data/'+str(ID))
if ID.split('_')[-1] == 'EV':
data = np.array(dataset)
spt = int(dataset.attrs['p_arrival_sample']);
sst = int(dataset.attrs['s_arrival_sample']);
coda_end = int(dataset.attrs['coda_end_sample']);
snr = dataset.attrs['snr_db'];
elif ID.split('_')[-1] == 'NO':
data = np.array(dataset)
## augmentation
if self.augmentation == True:
if i <= self.batch_size//2:
if self.shift_event_r and dataset.attrs['trace_category'] == 'earthquake_local':
data, spt, sst, coda_end = self._shift_event(data, spt, sst, coda_end, snr, self.shift_event_r/2);
if self.norm_mode:
data = self._normalize(data, self.norm_mode)
else:
if dataset.attrs['trace_category'] == 'earthquake_local':
if self.shift_event_r:
data, spt, sst, coda_end = self._shift_event(data, spt, sst, coda_end, snr, self.shift_event_r)
if self.add_event_r:
data, additions = self._add_event(data, spt, sst, coda_end, snr, self.add_event_r)
if self.add_noise_r:
data = self._add_noise(data, snr, self.add_noise_r)
if self.drop_channe_r:
data = self._drop_channel(data, snr, self.drop_channe_r)
data = self._adjust_amplitude_for_multichannels(data)
if self.scale_amplitude_r:
data = self._scale_amplitude(data, self.scale_amplitude_r)
if self.pre_emphasis:
data = self._pre_emphasis(data)
if self.norm_mode:
data = self._normalize(data, self.norm_mode)
elif dataset.attrs['trace_category'] == 'noise':
if self.drop_channe_r:
data = self._drop_channel_noise(data, self.drop_channe_r)
if self.add_noise_r:
data = self._add_noise(data, np.array([10.]), self.add_noise_r)
if self.add_gap_r:
data = self._add_gaps(data, self.add_gap_r)
if self.shift_event_r:
data, _, __, ___ = self._shift_event(data, 0, 0, 0, 10., self.shift_event_r)
if self.norm_mode:
data = self._normalize(data, self.norm_mode)
elif self.augmentation == False:
if self.shift_event_r and dataset.attrs['trace_category'] == 'earthquake_local':
data, spt, sst, coda_end = self._shift_event(data, spt, sst, coda_end, snr, self.shift_event_r/2)
if self.norm_mode:
data = self._normalize(data, self.norm_mode)
X[i, :, :] = data
## labeling
if dataset.attrs['trace_category'] == 'earthquake_local':
if self.label_type == 'gaussian':
sd = None
if spt and sst:
sd = sst - spt
if sd and sst:
if sst+int(self.coda_ratio*sd) <= self.dim:
y1[i, spt:int(sst+(self.coda_ratio*sd)), 0] = 1
else:
y1[i, spt:self.dim, 0] = 1
y2,y3 = self._gausslabel(i,y2,y3,spt,sst,a=31)
"""
if spt and (spt-20 >= 0) and (spt+20 < self.dim):
y2[i, spt-20:spt+20, 0] = np.exp(-(np.arange(spt-20,spt+20)-spt)**2/(2*(10)**2))[:self.dim-(spt-20)]
elif spt and (spt-20 < self.dim):
y2[i, 0:spt+20, 0] = np.exp(-(np.arange(0,spt+20)-spt)**2/(2*(10)**2))[:self.dim-(spt-20)]
if sst and (sst-20 >= 0) and (sst-20 < self.dim):
y3[i, sst-20:sst+20, 0] = np.exp(-(np.arange(sst-20,sst+20)-sst)**2/(2*(10)**2))[:self.dim-(sst-20)]
elif sst and (sst-20 < self.dim):
y3[i, 0:sst+20, 0] = np.exp(-(np.arange(0,sst+20)-sst)**2/(2*(10)**2))[:self.dim-(sst-20)]
"""
if additions:
add_sd = None
add_spt = additions[0]
add_sst = additions[1]
if add_spt and add_sst:
add_sd = add_sst - add_spt
if add_sd and add_sst+int(self.coda_ratio*add_sd) <= self.dim:
y1[i, add_spt:int(add_sst+(self.coda_ratio*add_sd)), 0] = 1
else:
y1[i, add_spt:self.dim, 0] = 1
y2,y3 = self._gausslabel(i,y2,y3,add_spt,add_sst,a=31)
"""
if add_spt and (add_spt-20 >= 0) and (add_spt+20 < self.dim):
y2[i, add_spt-20:add_spt+20, 0] = np.exp(-(np.arange(add_spt-20,add_spt+20)-add_spt)**2/(2*(10)**2))[:self.dim-(add_spt-20)]
elif add_spt and (add_spt+20 < self.dim):
y2[i, 0:add_spt+20, 0] = np.exp(-(np.arange(0,add_spt+20)-add_spt)**2/(2*(10)**2))[:self.dim-(add_spt-20)]
if add_sst and (add_sst-20 >= 0) and (add_sst+20 < self.dim):
y3[i, add_sst-20:add_sst+20, 0] = np.exp(-(np.arange(add_sst-20,add_sst+20)-add_sst)**2/(2*(10)**2))[:self.dim-(add_sst-20)]
elif add_sst and (add_sst+20 < self.dim):
y3[i, 0:add_sst+20, 0] = np.exp(-(np.arange(0,add_sst+20)-add_sst)**2/(2*(10)**2))[:self.dim-(add_sst-20)]
"""
elif self.label_type == 'triangle':
sd = None
if spt and sst:
sd = sst - spt
if sd and sst:
if sst+int(self.coda_ratio*sd) <= self.dim:
y1[i, spt:int(sst+(self.coda_ratio*sd)), 0] = 1
else:
y1[i, spt:self.dim, 0] = 1
if spt and (spt-20 >= 0) and (spt+21 < self.dim):
y2[i, spt-20:spt+21, 0] = self._trilabel()
elif spt and (spt+21 < self.dim):
y2[i, 0:spt+spt+1, 0] = self._trilabel(a=0, b=spt, c=2*spt)
elif spt and (spt-20 >= 0):
pdif = self.dim - spt
y2[i, spt-pdif-1:self.dim, 0] = self._trilabel(a=spt-pdif, b=spt, c=2*pdif)
if sst and (sst-20 >= 0) and (sst+21 < self.dim):
y3[i, sst-20:sst+21, 0] = self._trilabel()
elif sst and (sst+21 < self.dim):
y3[i, 0:sst+sst+1, 0] = self._trilabel(a=0, b=sst, c=2*sst)
elif sst and (sst-20 >= 0):
sdif = self.dim - sst
y3[i, sst-sdif-1:self.dim, 0] = self._trilabel(a=sst-sdif, b=sst, c=2*sdif)
if additions:
add_spt = additions[0]
add_sst = additions[1]
add_sd = None
if add_spt and add_sst:
add_sd = add_sst - add_spt
if add_sd and add_sst+int(self.coda_ratio*add_sd) <= self.dim:
y1[i, add_spt:int(add_sst+(self.coda_ratio*add_sd)), 0] = 1
else:
y1[i, add_spt:self.dim, 0] = 1
if add_spt and (add_spt-20 >= 0) and (add_spt+21 < self.dim):
y2[i, add_spt-20:add_spt+21, 0] = self._trilabel()
elif add_spt and (add_spt+21 < self.dim):
y2[i, 0:add_spt+add_spt+1, 0] = self._trilabel(a=0, b=add_spt, c=2*add_spt)
elif add_spt and (add_spt-20 >= 0):
pdif = self.dim - add_spt
y2[i, add_spt-pdif-1:self.dim, 0] = self._trilabel(a=add_spt-pdif, b=add_spt, c=2*pdif)
if add_sst and (add_sst-20 >= 0) and (add_sst+21 < self.dim):
y3[i, add_sst-20:add_sst+21, 0] = self._trilabel()
elif add_sst and (add_sst+21 < self.dim):
y3[i, 0:add_sst+add_sst+1, 0] = self._trilabel(a=0, b=add_sst, c=2*add_sst)
elif add_sst and (add_sst-20 >= 0):
sdif = self.dim - add_sst
y3[i, add_sst-sdif-1:self.dim, 0] = self._trilabel(a=add_sst-sdif, b=add_sst, c=2*sdif)
elif self.label_type == 'box':
sd = None
if sst and spt:
sd = sst - spt
if sd and sst+int(self.coda_ratio*sd) <= self.dim:
y1[i, spt:int(sst+(self.coda_ratio*sd)), 0] = 1
else:
y1[i, spt:self.dim, 0] = 1
if spt:
y2[i, spt-20:spt+20, 0] = 1
if sst:
y3[i, sst-20:sst+20, 0] = 1
if additions:
add_sd = None
add_spt = additions[0]
add_sst = additions[1]
if add_spt and add_sst:
add_sd = add_sst - add_spt
if add_sd and add_sst+int(self.coda_ratio*add_sd) <= self.dim:
y1[i, add_spt:int(add_sst+(self.coda_ratio*add_sd)), 0] = 1
else:
y1[i, add_spt:self.dim, 0] = 1
if add_spt:
y2[i, add_spt-20:add_spt+20, 0] = 1
if add_sst:
y3[i, add_sst-20:add_sst+20, 0] = 1
fl.close()
return X, y1.astype('float32'), y2.astype('float32'), y3.astype('float32')
class PreLoadGenerator(keras.utils.Sequence):
"""
Keras generator with preprocessing. Pre-load version.
Parameters
----------
list_IDsx: str
List of trace names.
inp_data: dic
A dictionary of input hdf5 datasets.
dim: tuple
Dimension of input traces.
batch_size: int, default=32
Batch size.
n_channels: int, default=3
Number of channels.
phase_window: int, fixed=40
The number of samples (window) around each phaset.
shuffle: bool, default=True
Shuffeling the list.
norm_mode: str, default=max
The mode of normalization, 'max' or 'std'.
label_type: str, default=gaussian
Labeling type. 'gaussian', 'triangle', or 'box'.
augmentation: bool, default=True
If True, half of each batch will be augmented version of the other half.
add_event_r: {float, None}, default=None
Chance for randomly adding a second event into the waveform.
add_gap_r: {float, None}, default=None
Add an interval with zeros into the waveform representing filled gaps.
coda_ratio: {float, 0.4}, default=0.4
% of S-P time to extend event/coda envelope past S pick.
shift_event_r: {float, None}, default=0.9
Rate of augmentation for randomly shifting the event within a trace.
add_noise_r: {float, None}, default=None
Chance for randomly adding Gaussian noise into the waveform.
drop_channe_r: {float, None}, default=None
Chance for randomly dropping some of the channels.
scale_amplitude_r: {float, None}, default=None
Chance for randomly amplifying the waveform amplitude.
pre_emphasis: bool, default=False
If True, waveforms will be pre emphasized.
Returns
--------
Batches of two dictionaries: {'input': X}: pre-processed waveform as input {'detector': y1, 'picker_P': y2, 'picker_S': y3}: outputs including three separate numpy arrays as labels for detection, P, and S respectively.
"""
def __init__(self,
inp_data,
list_IDs,
file_name,
dim,
batch_size=32,
n_channels=3,
phase_window= 40,
shuffle=True,
norm_mode = 'max',
label_type = 'gaussian',
augmentation = False,
add_event_r = None,
add_gap_r = None,
coda_ratio = 0.4,
shift_event_r = None,
add_noise_r = None,
drop_channe_r = None,
scale_amplitude_r = None,
pre_emphasis = True):
'Initialization'
self.inp_data =inp_data
self.dim = dim
self.batch_size = batch_size
self.phase_window = phase_window
self.list_IDs = list_IDs
self.file_name = file_name
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.norm_mode = norm_mode
self.label_type = label_type
self.augmentation = augmentation
self.add_event_r = add_event_r
self.add_gap_r = add_gap_r
self.coda_ratio = coda_ratio
self.shift_event_r = shift_event_r
self.add_noise_r = add_noise_r
self.drop_channe_r = drop_channe_r
self.scale_amplitude_r = scale_amplitude_r
self.pre_emphasis = pre_emphasis
def __len__(self):
'Denotes the number of batches per epoch'
if self.augmentation:
return 2*int(np.floor(len(self.list_IDs) / self.batch_size))
else:
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
if self.augmentation:
indexes = self.indexes[index*self.batch_size//2:(index+1)*self.batch_size//2]
indexes = np.append(indexes, indexes)
else:
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [self.list_IDs[k] for k in indexes]
X, y1, y2, y3 = self.__data_generation(list_IDs_temp)
cw1 = np.array(np.where(y1==1)).size/y1.size
cw2 = np.array(np.where( y2>0)).size/y2.size
cw3 = np.array(np.where( y3>0)).size/y3.size
class_weights = [[cw1, 1-cw1],[cw2, 1-cw2],[cw3, 1-cw3]]
sample_weights = np.array([y1,y2,y3].copy())
for i,y in enumerate([y1,y2,y3]):
sample_weights[i][np.where(y >0)] = class_weights[i][1]
sample_weights[i][np.where(y==0)] = class_weights[i][0]
return (X, [y1,y2,y3],list(sample_weights))
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def _normalize(self, data, mode = 'max'):
'Normalize waveforms in each batch'
data -= np.mean(data, axis=0, keepdims=True)
if mode == 'max':
max_data = np.max(data, axis=0, keepdims=True)
assert(max_data.shape[-1] == data.shape[-1])
max_data[max_data == 0] = 1
data /= max_data
elif mode == 'std':
std_data = np.std(data, axis=0, keepdims=True)
assert(std_data.shape[-1] == data.shape[-1])
std_data[std_data == 0] = 1
data /= std_data
return data
def _scale_amplitude(self, data, rate):
'Scale amplitude or waveforms'
tmp = np.random.uniform(0, 1)
if tmp < rate:
data *= np.random.uniform(1, 3)
elif tmp < 2*rate:
data /= np.random.uniform(1, 3)
return data
def _drop_channel(self, data, snr, rate):
'Randomly replace values of one or two components to zeros in earthquake data'
if np.random.uniform(0, 1) < rate and all(snr >= 10):
data = np.copy(data)
c1 = np.random.choice([0, 1])
c2 = np.random.choice([0, 1])
c3 = np.random.choice([0, 1])
if c1 + c2 + c3 > 0:
data[..., np.array([c1, c2, c3]) == 0] = 0
return data
def _drop_channel_noise(self, data, rate):
'Randomly replace values of one or two components to zeros in noise data'
if np.random.uniform(0, 1) < rate:
data = np.copy(data)
c1 = np.random.choice([0, 1])
c2 = np.random.choice([0, 1])
c3 = np.random.choice([0, 1])
if c1 + c2 + c3 > 0:
data[..., np.array([c1, c2, c3]) == 0] = 0
return data
def _add_gaps(self, data, rate):
'Randomly add gaps (zeros) of different sizes into waveforms'
if np.random.uniform(0, 1) < rate:
data = np.copy(data)
gap_start = np.random.randint(0, 4000)
gap_end = np.random.randint(gap_start, 5500)
data[gap_start:gap_end,:] = 0
return data
def _add_noise(self, data, snr, rate):
'Randomly add Gaussian noise with a random SNR into waveforms'
if np.random.uniform(0, 1) < rate and all(snr >= 10.0):
data_noisy = np.empty((data.shape))
data_noisy[:, 0] = data[:,0] + np.random.normal(0, np.random.uniform(0.01, 0.15)*abs(max(data[:,0])), data.shape[0])
data_noisy[:, 1] = data[:,1] + np.random.normal(0, np.random.uniform(0.01, 0.15)*abs(max(data[:,1])), data.shape[0])
data_noisy[:, 2] = data[:,2] + np.random.normal(0, np.random.uniform(0.01, 0.15)*abs(max(data[:,2])), data.shape[0])
else:
data_noisy = data
return data_noisy
def _adjust_amplitude_for_multichannels(self, data):
'Adjust the amplitude of multichaneel data'
tmp = np.max(np.abs(data), axis=0, keepdims=True)
assert(tmp.shape[-1] == data.shape[-1])
if np.count_nonzero(tmp) > 0:
data *= data.shape[-1] / np.count_nonzero(tmp)
return data
def _label(self, a=0, b=20, c=40):
'Used for triangular labeling'
z = np.linspace(a, c, num = 2*(b-a)+1)
y = np.zeros(z.shape)
y[z <= a] = 0
y[z >= c] = 0
first_half = np.logical_and(a < z, z <= b)
y[first_half] = (z[first_half]-a) / (b-a)
second_half = np.logical_and(b < z, z < c)
y[second_half] = (c-z[second_half]) / (c-b)
return y
def _add_event(self, data, addp, adds, coda_end, snr, rate):
'Add a scaled version of the event into the empty part of the trace'
added = np.copy(data)
additions = None
spt_secondEV = None
sst_secondEV = None
if addp and adds:
s_p = adds - addp
if np.random.uniform(0, 1) < rate and all(snr >= 10.0) and (data.shape[0]-s_p-21-coda_end) > 20:
secondEV_strt = np.random.randint(coda_end, data.shape[0]-s_p-21)
scaleAM = 1/np.random.randint(1, 10)
space = data.shape[0]-secondEV_strt
added[secondEV_strt:secondEV_strt+space, 0] += data[addp:addp+space, 0]*scaleAM
added[secondEV_strt:secondEV_strt+space, 1] += data[addp:addp+space, 1]*scaleAM
added[secondEV_strt:secondEV_strt+space, 2] += data[addp:addp+space, 2]*scaleAM
spt_secondEV = secondEV_strt
if spt_secondEV + s_p + 21 <= data.shape[0]:
sst_secondEV = spt_secondEV + s_p
if spt_secondEV and sst_secondEV:
additions = [spt_secondEV, sst_secondEV]
data = added
return data, additions
def _shift_event(self, data, addp, adds, coda_end, snr, rate):
'Randomly rotate the array to shift the event location'
org_len = len(data)
data2 = np.copy(data)
addp2 = adds2 = coda_end2 = None;
if np.random.uniform(0, 1) < rate:
nrotate = int(np.random.uniform(1, int(org_len - coda_end)))
data2[:, 0] = list(data[:, 0])[-nrotate:] + list(data[:, 0])[:-nrotate]
data2[:, 1] = list(data[:, 1])[-nrotate:] + list(data[:, 1])[:-nrotate]
data2[:, 2] = list(data[:, 2])[-nrotate:] + list(data[:, 2])[:-nrotate]
if addp+nrotate >= 0 and addp+nrotate < org_len:
addp2 = addp+nrotate;
else:
addp2 = None;
if adds+nrotate >= 0 and adds+nrotate < org_len:
adds2 = adds+nrotate;
else:
adds2 = None;
if coda_end+nrotate < org_len:
coda_end2 = coda_end+nrotate
else:
coda_end2 = org_len
if addp2 and adds2:
data = data2;
addp = addp2;
adds = adds2;
coda_end= coda_end2;
return data, addp, adds, coda_end
def _pre_emphasis(self, data, pre_emphasis=0.97):
'apply the pre_emphasis'
for ch in range(self.n_channels):
bpf = data[:, ch]
data[:, ch] = np.append(bpf[0], bpf[1:] - pre_emphasis * bpf[:-1])
return data
def __data_generation(self, list_IDs_temp):
'readint the waveforms'
X = np.zeros((self.batch_size, self.dim, self.n_channels))
y1 = np.zeros((self.batch_size, self.dim, 1))
y2 = np.zeros((self.batch_size, self.dim, 1))
y3 = np.zeros((self.batch_size, self.dim, 1))
# Generate data
for i, ID in enumerate(list_IDs_temp):
additions = None
dataset = self.inp_data[ID]
data = np.array(dataset)
if dataset.attrs['trace_category'] == 'earthquake_local':
spt = int(dataset.attrs['p_arrival_sample']);
sst = int(dataset.attrs['s_arrival_sample']);
coda_end = int(dataset.attrs['coda_end_sample']);
snr = dataset.attrs['snr_db'];
if self.augmentation == True:
if i <= self.batch_size//2:
if self.shift_event_r and dataset.attrs['trace_category'] == 'earthquake_local':
data, spt, sst, coda_end = self._shift_event(data, spt, sst, coda_end, snr, self.shift_event_r/2);
if self.norm_mode:
data = self._normalize(data, self.norm_mode)
else:
if dataset.attrs['trace_category'] == 'earthquake_local':
if self.shift_event_r and spt:
data, spt, sst, coda_end = self._shift_event(data, spt, sst, coda_end, snr, self.shift_event_r);
if self.add_event_r and spt and sst:
data, additions = self._add_event(data, spt, sst, coda_end, snr, self.add_event_r);
if self.add_noise_r:
data = self._add_noise(data, snr, self.add_noise_r);
if self.drop_channe_r:
data = self._drop_channel(data, snr, self.drop_channe_r);
data = self._adjust_amplitude_for_multichannels(data)
if self.scale_amplitude_r:
data = self._scale_amplitude(data, self.scale_amplitude_r);
if self.pre_emphasis:
data = self._pre_emphasis(data)
if self.norm_mode:
data = self._normalize(data, self.norm_mode)
elif dataset.attrs['trace_category'] == 'noise':
if self.drop_channe_r:
data = self._drop_channel_noise(data, self.drop_channe_r);
if self.add_gap_r:
data = self._add_gaps(data, self.add_gap_r)
if self.norm_mode:
data = self._normalize(data, self.norm_mode)
elif self.augmentation == False:
if self.shift_event_r and dataset.attrs['trace_category'] == 'earthquake_local':
data, spt, sst, coda_end = self._shift_event(data, spt, sst, coda_end, snr, self.shift_event_r/2);
if self.norm_mode:
data = self._normalize(data, self.norm_mode)
X[i, :, :] = data
## labeling
if dataset.attrs['trace_category'] == 'earthquake_local':
if self.label_type == 'gaussian':
sd = None
if spt and sst:
sd = sst - spt
if sd and sst:
if sst+int(self.coda_ratio*sd) <= self.dim:
y1[i, spt:int(sst+(self.coda_ratio*sd)), 0] = 1
else:
y1[i, spt:self.dim, 0] = 1
if spt and (spt-20 >= 0) and (spt+20 < self.dim):
y2[i, spt-20:spt+20, 0] = np.exp(-(np.arange(spt-20,spt+20)-spt)**2/(2*(10)**2))[:self.dim-(spt-20)]
elif spt and (spt-20 < self.dim):
y2[i, 0:spt+20, 0] = np.exp(-(np.arange(0,spt+20)-spt)**2/(2*(10)**2))[:self.dim-(spt-20)]
if sst and (sst-20 >= 0) and (sst-20 < self.dim):
y3[i, sst-20:sst+20, 0] = np.exp(-(np.arange(sst-20,sst+20)-sst)**2/(2*(10)**2))[:self.dim-(sst-20)]
elif sst and (sst-20 < self.dim):
y3[i, 0:sst+20, 0] = np.exp(-(np.arange(0,sst+20)-sst)**2/(2*(10)**2))[:self.dim-(sst-20)]
if additions:
add_spt = additions[0];
add_sst = additions[1];
add_sd = None
if add_spt and add_sst:
add_sd = add_sst - add_spt
if add_sd and add_sst+int(self.coda_ratio*add_sd) <= self.dim:
y1[i, add_spt:int(add_sst+(self.coda_ratio*add_sd)), 0] = 1
else:
y1[i, add_spt:self.dim, 0] = 1
if add_spt and (add_spt-20 >= 0) and (add_spt+20 < self.dim):
y2[i, add_spt-20:add_spt+20, 0] = np.exp(-(np.arange(add_spt-20,add_spt+20)-add_spt)**2/(2*(10)**2))[:self.dim-(add_spt-20)]
elif add_spt and (add_spt+20 < self.dim):
y2[i, 0:add_spt+20, 0] = np.exp(-(np.arange(0,add_spt+20)-add_spt)**2/(2*(10)**2))[:self.dim-(add_spt-20)]
if add_sst and (add_sst-20 >= 0) and (add_sst+20 < self.dim):
y3[i, add_sst-20:add_sst+20, 0] = np.exp(-(np.arange(add_sst-20,add_sst+20)-add_sst)**2/(2*(10)**2))[:self.dim-(add_sst-20)]
elif add_sst and (add_sst+20 < self.dim):
y3[i, 0:add_sst+20, 0] = np.exp(-(np.arange(0,add_sst+20)-add_sst)**2/(2*(10)**2))[:self.dim-(add_sst-20)]
elif self.label_type == 'triangle':
sd = None
if spt and sst:
sd = sst - spt
if sd and sst:
if sst+int(self.coda_ratio*sd) <= self.dim:
y1[i, spt:int(sst+(self.coda_ratio*sd)), 0] = 1
else:
y1[i, spt:self.dim, 0] = 1
if spt and (spt-20 >= 0) and (spt+21 < self.dim):
y2[i, spt-20:spt+21, 0] = self._label()
elif spt and (spt+21 < self.dim):
y2[i, 0:spt+spt+1, 0] = self._label(a=0, b=spt, c=2*spt)
elif spt and (spt-20 >= 0):
pdif = self.dim - spt
y2[i, spt-pdif-1:self.dim, 0] = self._label(a=spt-pdif, b=spt, c=2*pdif)
if sst and (sst-20 >= 0) and (sst+21 < self.dim):
y3[i, sst-20:sst+21, 0] = self._label()
elif sst and (sst+21 < self.dim):
y3[i, 0:sst+sst+1, 0] = self._label(a=0, b=sst, c=2*sst)
elif sst and (sst-20 >= 0):
sdif = self.dim - sst
y3[i, sst-sdif-1:self.dim, 0] = self._label(a=sst-sdif, b=sst, c=2*sdif)
if additions:
add_spt = additions[0];
add_sst = additions[1];
add_sd = None
if add_spt and add_sst:
add_sd = add_sst - add_spt
if add_sd and add_sst+int(self.coda_ratio*add_sd) <= self.dim:
y1[i, add_spt:int(add_sst+(self.coda_ratio*add_sd)), 0] = 1
else:
y1[i, add_spt:self.dim, 0] = 1
if add_spt and (add_spt-20 >= 0) and (add_spt+21 < self.dim):
y2[i, add_spt-20:add_spt+21, 0] = self.label()
elif add_spt and (add_spt+21 < self.dim):
y2[i, 0:add_spt+add_spt+1, 0] = self.label(a=0, b=add_spt, c=2*add_spt)
elif add_spt and (add_spt-20 >= 0):
pdif = self.dim - add_spt
y2[i, add_spt-pdif-1:self.dim, 0] = self.label(a=add_spt-pdif, b=add_spt, c=2*pdif)
if add_sst and (add_sst-20 >= 0) and (add_sst+21 < self.dim):
y3[i, add_sst-20:add_sst+21, 0] = self.label()
elif add_sst and (add_sst+21 < self.dim):
y3[i, 0:add_sst+add_sst+1, 0] = self.label(a=0, b=add_sst, c=2*add_sst)
elif add_sst and (add_sst-20 >= 0):
sdif = self.dim - add_sst
y3[i, add_sst-sdif-1:self.dim, 0] = self.label(a=add_sst-sdif, b=add_sst, c=2*sdif)
elif self.label_type == 'box':
sd = None
if sst and spt:
sd = sst - spt
if sd and sst+int(self.coda_ratio*sd) <= self.dim:
y1[i, spt:int(sst+(self.coda_ratio*sd)), 0] = 1
else:
y1[i, spt:self.dim, 0] = 1
if spt:
y2[i, spt-20:spt+20, 0] = 1
if sst:
y3[i, sst-20:sst+20, 0] = 1
if additions:
add_sd = None
add_spt = additions[0];
add_sst = additions[1];
if add_spt and add_sst:
add_sd = add_sst - add_spt
if add_sd and add_sst+int(self.coda_ratio*add_sd) <= self.dim:
y1[i, add_spt:int(add_sst+(self.coda_ratio*add_sd)), 0] = 1
else:
y1[i, add_spt:self.dim, 0] = 1
if add_spt:
y2[i, add_spt-20:add_spt+20, 0] = 1
if add_sst:
y3[i, add_sst-20:add_sst+20, 0] = 1
return X.astype('float32'), y1.astype('float32'), y2.astype('float32'), y3.astype('float32')
def data_reader( list_IDs,
file_name,
dim=6000,
n_channels=3,
norm_mode='max',
augmentation=False,
add_event_r=None,
add_gap_r=None,
coda_ratio=0.4,
shift_event_r=None,
add_noise_r=None,
drop_channe_r=None,
scale_amplitude_r=None,
pre_emphasis=True):
"""
For pre-processing and loading of data into memory.
Parameters
----------
list_IDsx: str
List of trace names.
file_name: str
Path to the input hdf5 datasets.
dim: int, default=6000
Dimension of input traces, in sample.
n_channels: int, default=3
Number of channels.
norm_mode: str, default=max
The mode of normalization, 'max' or 'std'.
augmentation: bool, default=True
If True, half of each batch will be augmented version of the other half.
add_event_r: {float, None}, default=None
Chance for randomly adding a second event into the waveform.
add_gap_r: {float, None}, default=None
Add an interval with zeros into the waveform representing filled gaps.
coda_ratio: {float, 0.4}, default=0.4
% of S-P time to extend event/coda envelope past S pick.
shift_event_r: {float, None}, default=0.9
Rate of augmentation for randomly shifting the event within a trace.
add_noise_r: {float, None}, default=None
Chance for randomly adding Gaussian noise into the waveform.
drop_channe_r: {float, None}, default=None
Chance for randomly dropping some of the channels.
scale_amplitude_r: {float, None}, default=None
Chance for randomly amplifying the waveform amplitude.
pre_emphasis: bool, default=False
If True, waveforms will be pre emphasized.
Returns
--------
Batches of two dictionaries: {'input': X}: pre-processed waveform as input {'detector': y1, 'picker_P': y2, 'picker_S': y3}: outputs including three separate numpy arrays as labels for detection, P, and S respectively.
Note
-----
Label type is fixed to box.
"""
def _normalize( data, mode = 'max'):
'Normalize waveforms in each batch'
data -= np.mean(data, axis=0, keepdims=True)
if mode == 'max':
max_data = np.max(data, axis=0, keepdims=True)
assert(max_data.shape[-1] == data.shape[-1])
max_data[max_data == 0] = 1
data /= max_data
elif mode == 'std':
std_data = np.std(data, axis=0, keepdims=True)
assert(std_data.shape[-1] == data.shape[-1])
std_data[std_data == 0] = 1
data /= std_data
return data
def _scale_amplitude( data, rate):
'Scale amplitude or waveforms'
tmp =
|
np.random.uniform(0, 1)
|
numpy.random.uniform
|
# -*- coding: utf-8 -*-
import os
import sys
import time
import numpy as np
from scipy.optimize import differential_evolution, minimize
from scipy.interpolate import interp1d
from scipy.integrate import solve_ivp, odeint
import sympy as sp
# from sklearn import ensemble #, tree # Left for gitch_doctor metamodel
import ProGED.mute_so as mt
from _io import TextIOWrapper as stdout_type
from ProGED.examples.tee_so import Tee
from ProGED.model_box import ModelBox
from ProGED.task import TASK_TYPES
# from ProGED.optimizers import DE_fit, DE_fit_metamodel, hyperopt_fit, min_fit
# glitch-doctor downloaded from github:
# from ProGED.glitch_doctor.metamodel import Metamodel
# import ProGED.glitch_doctor.metamodel.Metamodel
# import ProGED.glitch_doctor.model.Model
import warnings
warnings.filterwarnings("ignore", message="divide by zero encountered in divide")
warnings.filterwarnings("ignore", message="divide by zero encountered in true_divide")
warnings.filterwarnings("ignore", message="invalid value encountered in power")
warnings.filterwarnings("ignore", message="invalid value encountered in sqrt")
warnings.filterwarnings("ignore", message="invalid value encountered in double_scalars")
warnings.filterwarnings("ignore", message="overflow encountered in exp")
warnings.filterwarnings("ignore", message="overflow encountered in square")
warnings.filterwarnings("ignore", message="overflow encountered in double_scalars")
"""Methods for estimating model parameters. Currently implemented: differential evolution.
Methods:
fit_models: Performs parameter estimation on given models. Main interface to the module.
"""
def model_error (params, model, X, Y, _T=None, estimation_settings=None):
"""Defines mean squared error as the error metric."""
try:
verbosity = estimation_settings['verbosity']
testY = model.evaluate(X, *params)
res = np.mean((Y-testY)**2)
if np.isnan(res) or np.isinf(res) or not np.isreal(res):
if verbosity >= 3:
print("isnan, isinf, isreal =", np.isnan(res),
np.isinf(res), not np.isreal(res))
print(model.expr, model.params, model.sym_params, model.sym_vars)
return estimation_settings['default_error']
if verbosity >= 3:
print("Function model_error did not encounter any "
"errors, the output *square error/loss* is legit.")
return res
except Exception as error:
if verbosity >= 2:
print("model_error: Params at error:", params,
f"and {type(error)} with message:", error)
if verbosity >= 1:
print(f"Program is returning default_error:"
f"{estimation_settings['default_error']}")
return estimation_settings['default_error']
# def model_constant_error (model, params, X, Y):
# """Alternative to model_error, intended to allow the discovery of physical constants.
# Work in progress."""
# testY = model.evaluate(X, *params)
# return np.std(testY)#/np.linalg.norm(params)
def model_error_general (params, model, X, Y, T, **estimation_settings):
"""Calculate error of model with given parameters in general with
type of error given.
Input = TODO:
- X are columns without features that are derived.
- Y are columns of features that are derived via ode fitting.
- T is column of times at which samples in X and Y happen.
- estimation_settings: look description of fit_models()
"""
task_type = estimation_settings["task_type"]
if task_type in ("algebraic", "integer-algebraic"):
return model_error(params, model, X, Y, _T=None,
estimation_settings=estimation_settings)
elif task_type == "differential":
return model_ode_error(params, model, X, Y, T, estimation_settings)
else:
types_string = "\", \"".join(TASK_TYPES)
raise ValueError("Variable task_type has unsupported value "
f"\"{task_type}\", while list of possible values: "
f"\"{types_string}\".")
def ode (models_list, params_matrix, T, X_data, y0, **estimation_settings):
"""Solve system of ODEs defined by equations in models_list.
Raise error if input is incompatible.
Input:
models_list -- list (not dictionary) of models that e.g.
generate_models() generates.
params_matrix -- list of lists or ndarrays of parameters for
corresponding models.
y0 -- array (1-dim) of initial value of vector function y(t)
i.e. y0 = y(T[0]) = [y1(T[0]), y2(T[0]), y3(T[0]),...].
X_data -- 2-dim array (matrix) i.e. X = [X[0,:], X[1,:],...].
T -- (1-dim) array, i.e. of shape (N,)
max_ode_steps -- maximal number of steps inside ODE solver to
determine the minimal step size inside ODE solver.
Output:
Solution of ODE evaluated at times T.
"""
if not (isinstance(models_list, list)
and (isinstance(params_matrix, list)
and len(params_matrix)>0
and isinstance(params_matrix[0], (list, np.ndarray)))
and X_data.ndim == 2
and y0.ndim == 1):
message = str(type(params_matrix[0])) + "\n"
info = (isinstance(models_list, list),
isinstance(params_matrix, list),
len(params_matrix)>0,
isinstance(params_matrix[0], (list, np.ndarray)),
X_data.ndim == 2,
y0.ndim == 1 )
print(message, info)
print("Function ode's defined error: Input arguments are not"
+" in the required form!")
raise TypeError(f"Function ode's defined error: Input arguments are not"
+f" in required form!"
+f"\n{message, info}")
elif not T.shape[0] == X_data.shape[0]:
print("Number of samples in T and X does not match.")
raise IndexError("Number of samples in T and X does not match.")
elif not (y0.shape[0] == len(models_list) #len(equations)=len(models used)
and len(models_list[0].sym_vars) == X_data.shape[1] + y0.shape[0]):
print("Number of symbols in models and combination of "
+ "number of equations and dimensions of input data"
+ " does not match.")
raise IndexError("Number of symbols in models and combination of "
+ "number of equations and dimensions of input data"
+ " does not match.")
X = interp1d(T, X_data, axis=0, kind='cubic', fill_value="extrapolate") # N-D
lamb_exprs = [
# sp.lambdify(model.sym_vars, model.full_expr(*params), "numpy")
model.lambdify(*params)
for model, params in zip(models_list, params_matrix)
]
def dy_dt(t, y):
"""Represents \frac{dy}{dt}.
y -- [y1,y2,y3,...] i.e. ( shape= (n,) ) """
# N-D:
b = np.concatenate((y, X(t))) # =[y,X(t)] =[y,X1(t),X2(t),...]
# Older version with *b.T:
return np.array([lamb_expr(*b) for lamb_expr in lamb_exprs])
# Older (default RK45) method:
# Yode = solve_ivp(dy_dt, (T[0], T[-1]), y0, t_eval=T, atol=0)
# Set min_step via prescribing maximum number of steps:
if "max_ode_steps" in estimation_settings:
max_steps = estimation_settings["max_ode_steps"]
else:
# max_steps = 10**6 # On laptop, this would need less than 3 seconds.
max_steps = T.shape[0]*10**3 # Set to |timepoints|*1000.
# Convert max_steps to min_step:
min_step_from_max_steps = abs(T[-1] - T[0])/max_steps
# The minimal min_step to avoid min step error in LSODA:
min_step_error = 10**(-15)
min_step = max(min_step_from_max_steps, min_step_error) # Force them both.
rtol = 10**(-4)
atol = 10**(-6)
# Yode = solve_ivp(dy_dt, (T[0], T[-1]), y0, t_eval=T, method="LSODA", rtol=rtol, atol=atol, min_step=min_step).y
# Alternative LSODA using odeint (may be faster?):
Yode = odeint(dy_dt, y0, T, rtol=rtol, atol=atol, tfirst=True, hmin=min_step).T
return Yode
def model_ode_error (params, model, X, Y, T, estimation_settings):
"""Defines mean squared error of solution to differential equation
as the error metric.
Input:
- T is column of times at which samples in X and Y happen.
- X are columns without features that are derived.
- Y are columns of features that are derived via ode fitting.
"""
model_list = [model]; params_matrix = [params] # 12multi conversion (temporary)
try:
# Next few lines strongly suppress any warnning messages
# produced by LSODA solver, called by ode() function.
# Suppression further complicates if making log files (Tee):
change_std2tee = False # Normaly no need for this mess.
if isinstance(sys.stdout, Tee):
# In this case the real standard output (sys.stdout) is not
# saved in original location sys.stdout. We have to obtain
# it inside of Tee object (look module tee_so).
tee_object = sys.stdout # obtain Tee object that has sys.stdout
std_output = tee_object.stdout # Obtain sys.stdout.
sys.stdout = std_output # Change fake stdout to real stdout.
change_std2tee = True # Remember to change it back.
def run_ode():
return ode(model_list, params_matrix, T, X, y0=Y[:1],
**estimation_settings) # Y[:1] if _ or Y[0] if |
# Next line works only when sys.stdout is real. Thats why above.
if isinstance(sys.stdout, stdout_type):
with open(os.devnull, 'w') as f, mt.stdout_redirected(f):
try:
odeY = run_ode()
except Exception as error:
if estimation_settings["verbosity"] >= 1:
print("Inside ode(), preventing tee/IO error. Params at error:",
params, f"and {type(error)} with message:", error)
else:
odeY = run_ode()
if change_std2tee:
sys.stdout = tee_object # Change it back to fake stdout (tee).
# odeY = odeY.T # solve_ivp() returns in _ oposite (DxN) shape.
odeY = odeY[0] # If Y is landscape, i.e. _.
if not odeY.shape == Y.shape:
if estimation_settings["verbosity"] >= 3:
print("The ODE solver did not found ys at all times -> returning default error.")
if estimation_settings["verbosity"] >= 4:
print(odeY.shape, Y.shape)
return estimation_settings['default_error']
try:
res = np.mean((Y-odeY)**2)
if estimation_settings["verbosity"] >= 4:
print("succesfully returning now inside model_ode_error")
if np.isnan(res) or
|
np.isinf(res)
|
numpy.isinf
|
import pandas as pd
import numpy as np
import scipy.stats
from inferelator import utils
from inferelator.regression import bayes_stats
from inferelator.regression import base_regression
from inferelator.regression import mi
from inferelator.distributed.inferelator_mp import MPControl
# Default number of predictors to include in the model
DEFAULT_nS = 10
# Default weight for priors & Non-priors
# If prior_weight is the same as no_prior_weight:
# Priors will be included in the pp matrix before the number of predictors is reduced to nS
# They won't get special treatment in the model though
DEFAULT_prior_weight = 1
DEFAULT_no_prior_weight = 1
# Throw away the priors which have a CLR that is 0 before the number of predictors is reduced by BIC
DEFAULT_filter_priors_for_clr = False
class BBSR(base_regression.BaseRegression):
# Bayseian correlation measurements
# Priors Data
prior_mat = None # [G x K] # numeric
filter_priors_for_clr = DEFAULT_filter_priors_for_clr # bool
# Weights for Predictors (weights_mat is set with _calc_weight_matrix)
weights_mat = None # [G x K] numeric
prior_weight = DEFAULT_prior_weight # numeric
no_prior_weight = DEFAULT_no_prior_weight # numeric
# Predictors to include in modeling (pp is set with _build_pp_matrix)
pp = None # [G x K] bool
nS = DEFAULT_nS # int
ols_only = False
def __init__(self, X, Y, clr_mat, prior_mat, nS=DEFAULT_nS, prior_weight=DEFAULT_prior_weight,
no_prior_weight=DEFAULT_no_prior_weight, ordinary_least_squares=False):
"""
Create a Regression object for Bayes Best Subset Regression
:param X: Expression or Activity data [N x K]
:type X: InferelatorData
:param Y: Response expression data [N x G]
:type Y: InferelatorData
:param clr_mat: Calculated CLR between features of X & Y [G x K]
:type clr_mat: pd.DataFrame
:param prior_mat: Prior data between features of X & Y [G x K]
:type prior_mat: pd.DataFrame
:param nS: int
Number of predictors to retain
:param prior_weight: int
Weight of a predictor which does have a prior
:param no_prior_weight: int
Weight of a predictor which doesn't have a prior
"""
super(BBSR, self).__init__(X, Y)
self.nS = nS
self.ols_only = ordinary_least_squares
# Calculate the weight matrix
self.prior_weight = prior_weight
self.no_prior_weight = no_prior_weight
weights_mat = self._calculate_weight_matrix(prior_mat, p_weight=prior_weight, no_p_weight=no_prior_weight)
utils.Debug.vprint("Weight matrix {} construction complete".format(weights_mat.shape))
# Rebuild weights, priors, and the CLR matrix for the features that are in this bootstrap
self.weights_mat = weights_mat.loc[self.genes, self.tfs]
self.prior_mat = prior_mat.loc[self.genes, self.tfs]
self.clr_mat = clr_mat.loc[self.genes, self.tfs]
# Build a boolean matrix indicating which tfs should be used as predictors for regression for each gene
self.pp = self._build_pp_matrix()
def regress(self):
"""
Execute BBSR
:return: pd.DataFrame [G x K], pd.DataFrame [G x K]
Returns the regression betas and beta error reductions for all threads if this is the master thread (rank 0)
Returns None, None if it's a subordinate thread
"""
if MPControl.is_dask():
from inferelator.distributed.dask_functions import bbsr_regress_dask
return bbsr_regress_dask(self.X, self.Y, self.pp, self.weights_mat, self.G, self.genes, self.nS)
def regression_maker(j):
level = 0 if j % 100 == 0 else 2
utils.Debug.allprint(base_regression.PROGRESS_STR.format(gn=self.genes[j], i=j, total=self.G),
level=level)
data = bayes_stats.bbsr(self.X.values,
utils.scale_vector(self.Y.get_gene_data(j, force_dense=True).flatten()),
self.pp.iloc[j, :].values.flatten(),
self.weights_mat.iloc[j, :].values.flatten(),
self.nS,
ordinary_least_squares=self.ols_only)
data['ind'] = j
return data
return MPControl.map(regression_maker, range(self.G), tell_children=False)
def _build_pp_matrix(self):
"""
From priors and context likelihood of relatedness, determine which predictors should be included in the model
:return pp: pd.DataFrame [G x K]
Boolean matrix indicating which predictor variables should be included in BBSR for each response variable
"""
# Create a predictor boolean array from priors
pp =
|
np.logical_or(self.prior_mat != 0, self.weights_mat != self.no_prior_weight)
|
numpy.logical_or
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import coo_matrix
from skfem.mapping import MappingAffine
from .mesh2d import Mesh2D, MeshType
from typing import Optional, Tuple, Type, Dict
from matplotlib.axes import Axes
from numpy import ndarray
class MeshTri(Mesh2D):
"""A mesh consisting of triangular elements.
The different constructors are:
- :meth:`~skfem.mesh.MeshTri.__init__`
- :meth:`~skfem.mesh.MeshTri.load` (requires meshio)
- :meth:`~skfem.mesh.MeshTri.init_symmetric`
- :meth:`~skfem.mesh.MeshTri.init_sqsymmetric`
- :meth:`~skfem.mesh.MeshTri.init_refdom`
- :meth:`~skfem.mesh.MeshTri.init_tensor`
- :meth:`~skfem.mesh.MeshTri.init_lshaped`
Attributes
----------
facets
An array containing the facet vertices (2 x Nfacets).
f2t
An array containing the triangles next to each facet (2 x Nfacets).
Each column contains two indices to t. If the second row is zero then
the facet is on the boundary.
t2f
An array containing the facets belonging to each triangle (3 x Nelems).
Each column contains three indices to facets.
Examples
--------
Initialise a symmetric mesh of the unit square.
>>> m = MeshTri.init_sqsymmetric()
>>> m.t.shape
(3, 8)
Facets (edges) and mappings from triangles to facets and vice versa are
automatically constructed. In the following example we have 5 facets
(edges).
>>> m = MeshTri()
>>> m.facets
array([[0, 0, 1, 1, 2],
[1, 2, 2, 3, 3]])
>>> m.t2f
array([[0, 2],
[2, 4],
[1, 3]])
>>> m.f2t
array([[ 0, 0, 1, 1, 1],
[-1, -1, 0, -1, -1]])
The value -1 implies that the facet (the edge) is on the boundary.
Refine the triangular mesh of the unit square three times.
>>> m = MeshTri()
>>> m.refine(3)
>>> m.p.shape
(2, 81)
"""
refdom: str = "tri"
brefdom: str = "line"
meshio_type: str = "triangle"
name: str = "Triangular"
def __init__(self,
p: Optional[ndarray] = None,
t: Optional[ndarray] = None,
boundaries: Optional[Dict[str, ndarray]] = None,
subdomains: Optional[Dict[str, ndarray]] = None,
validate: Optional[bool] = True,
sort_t: Optional[bool] = True):
"""Initialise a triangular mesh.
If no arguments are given, initialises a mesh with the following
topology::
*-------------*
|\ |
| \ |
| \ |
| \ |
| \ |
| \ |
| \|
*-------------*
Parameters
----------
p
An array containing the points of the mesh (2 x Nvertices).
t
An array containing the element connectivity (3 x Nelems), i.e.
indices to p.
validate
If true, run mesh validity checks.
sort_t
If true, sort the element connectivity matrix before building
mappings.
"""
if p is None and t is None:
p = np.array([[0., 1., 0., 1.],
[0., 0., 1., 1.]], dtype=np.float_)
t = np.array([[0, 1, 2],
[1, 3, 2]], dtype=np.intp).T
elif p is None or t is None:
raise Exception("Must provide p AND t or neither")
self.p = p
self.t = t
self.boundaries = boundaries
self.subdomains = subdomains
super(MeshTri, self).__init__()
if validate:
self._validate()
self._build_mappings(sort_t=sort_t)
@classmethod
def init_tensor(cls: Type[MeshType],
x: ndarray,
y: ndarray) -> MeshType:
"""Initialise a tensor product mesh.
Parameters
----------
x
The nodal coordinates in dimension x.
y
The nodal coordinates in dimension y.
"""
npx = len(x)
npy = len(y)
X, Y = np.meshgrid(np.sort(x), np.sort(y))
p = np.vstack((X.flatten('F'), Y.flatten('F')))
ix = np.arange(npx * npy)
nt = (npx - 1) * (npy - 1)
t = np.zeros((3, 2*nt))
ix = ix.reshape(npy, npx, order='F').copy()
t[0, :nt] = (ix[0:(npy-1), 0:(npx-1)].reshape(nt, 1, order='F')
.copy()
.flatten())
t[1, :nt] = (ix[1:npy, 0:(npx-1)].reshape(nt, 1, order='F')
.copy()
.flatten())
t[2, :nt] = (ix[1:npy, 1:npx].reshape(nt, 1, order='F')
.copy()
.flatten())
t[0, nt:] = (ix[0:(npy-1), 0:(npx-1)].reshape(nt, 1, order='F')
.copy()
.flatten())
t[1, nt:] = (ix[0:(npy-1), 1:npx].reshape(nt, 1, order='F')
.copy()
.flatten())
t[2, nt:] = (ix[1:npy, 1:npx].reshape(nt, 1, order='F')
.copy()
.flatten())
return cls(p, t.astype(np.int64))
@classmethod
def init_symmetric(cls):
"""Initialise a symmetric mesh of the unit square.
The mesh topology is as follows::
*------------*
|\ /|
| \ / |
| \ / |
| * |
| / \ |
| / \ |
|/ \|
*------------*
"""
p = np.array([[0, 1, 1, 0, 0.5],
[0, 0, 1, 1, 0.5]], dtype=np.float_)
t = np.array([[0, 1, 4],
[1, 2, 4],
[2, 3, 4],
[0, 3, 4]], dtype=np.intp).T
return cls(p, t)
@classmethod
def init_sqsymmetric(cls: Type[MeshType]) -> MeshType:
"""Initialise a symmetric mesh of the unit square.
The mesh topology is as follows::
*------*------*
|\ | /|
| \ | / |
| \ | / |
*------*------*
| / | \ |
| / | \ |
|/ | \|
*------*------*
"""
p = np.array([[0, 0.5, 1, 0, 0.5, 1, 0, 0.5, 1],
[0, 0, 0, 0.5, 0.5, 0.5, 1, 1, 1]], dtype=np.float_)
t = np.array([[0, 1, 4],
[1, 2, 4],
[2, 4, 5],
[0, 3, 4],
[3, 4, 6],
[4, 6, 7],
[4, 7, 8],
[4, 5, 8]], dtype=np.intp).T
return cls(p, t)
@classmethod
def init_refdom(cls: Type[MeshType]) -> MeshType:
"""Initialise a mesh that includes only the reference triangle.
The mesh topology is as follows::
*
|\
| \
| \
| \
| \
| \
| \
*-------------*
"""
p = np.array([[0., 1., 0.],
[0., 0., 1.]], dtype=np.float_)
t = np.array([[0, 1, 2]], dtype=np.intp).T
return cls(p, t)
@classmethod
def init_lshaped(cls: Type[MeshType]) -> MeshType:
"""Initialise a mesh for the L-shaped domain.
The mesh topology is as follows::
*-------*
| \ |
| \ |
| \ |
*-------*-------*
| / | \ |
| / | \ |
| / | \ |
*-------*-------*
where the origin is at the L-corner and the horizontal and vertical
edges have unit length.
"""
p = np.array([[0., 1., 0., -1., 0., -1., -1., 1.],
[0., 0., 1., 0., -1., -1., 1., -1.]], dtype=np.float_)
t = np.array([[0, 1, 7],
[0, 2, 6],
[0, 6, 3],
[0, 7, 4],
[0, 4, 5],
[0, 3, 5]], dtype=np.intp).T
return cls(p, t)
def _build_mappings(self, sort_t=True):
# sort to preserve orientations etc.
if sort_t:
self.t = np.sort(self.t, axis=0)
# define facets: in the order (0,1) (1,2) (0,2)
self.facets = np.sort(np.hstack((
self.t[[0, 1], :],
self.t[[1, 2], :],
self.t[[0, 2], :],
)), axis=0)
# get unique facets and build triangle-to-facet
# mapping: 3 (edges) x Ntris
tmp = np.ascontiguousarray(self.facets.T)
tmp, ixa, ixb = np.unique(tmp.view([('', tmp.dtype)] * tmp.shape[1]),
return_index=True, return_inverse=True)
self.facets = self.facets[:, ixa]
self.t2f = ixb.reshape((3, self.t.shape[1]))
# build facet-to-triangle mapping: 2 (triangles) x Nedges
e_tmp = np.hstack((self.t2f[0, :], self.t2f[1, :], self.t2f[2, :]))
t_tmp = np.tile(np.arange(self.t.shape[1]), (1, 3))[0]
e_first, ix_first = np.unique(e_tmp, return_index=True)
# this emulates matlab unique(e_tmp,'last')
e_last, ix_last = np.unique(e_tmp[::-1], return_index=True)
ix_last = e_tmp.shape[0] - ix_last - 1
self.f2t = np.zeros((2, self.facets.shape[1]), dtype=np.int64)
self.f2t[0, e_first] = t_tmp[ix_first]
self.f2t[1, e_last] = t_tmp[ix_last]
# second row to zero if repeated (i.e., on boundary)
self.f2t[1, np.nonzero(self.f2t[0, :] == self.f2t[1, :])[0]] = -1
def plot(self,
z: ndarray,
smooth: Optional[bool] = False,
ax: Optional[Axes] = None,
zlim: Optional[Tuple[float, float]] = None,
edgecolors: Optional[str] = None,
aspect: float = 1.,
colorbar: bool = False) -> Axes:
"""Visualise piecewise-linear or piecewise-constant function, 2D plot.
Parameters
----------
z
An array of nodal values (Nvertices) or elemental values (Nelems).
smooth
If true, use gouraud shading.
ax
Plot onto the given preinitialised Matplotlib axes.
zlim
Use the given minimum and maximum values for coloring.
edgecolors
A string describing the edge coloring, e.g. 'k' for black.
aspect
The ratio of vertical to horizontal length-scales; ignored if ax
specified.
colorbar
If True, show colorbar. By default not shown.
Returns
-------
Axes
The Matplotlib axes onto which the mesh was plotted.
Examples
--------
Mesh the unit square :math:`(0,1)^2` and visualise the function
:math:`f(x)=x^2`.
>>> from skfem.mesh import MeshTri
>>> m = MeshTri()
>>> m.refine(3)
>>> ax = m.plot(m.p[0, :]**2, smooth=True)
>>> m.show()
"""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_aspect(aspect)
ax.set_axis_off()
if edgecolors is None:
edgecolors = 'k'
if zlim == None:
if smooth:
im = ax.tripcolor(self.p[0, :],
self.p[1, :],
self.t.T,
z,
shading='gouraud',
edgecolors=edgecolors)
else:
im = ax.tripcolor(self.p[0, :],
self.p[1, :],
self.t.T,
z,
edgecolors=edgecolors)
else:
if smooth:
im = ax.tripcolor(self.p[0, :],
self.p[1, :],
self.t.T,
z,
shading='gouraud',
vmin=zlim[0],
vmax=zlim[1],
edgecolors=edgecolors)
else:
im = ax.tripcolor(self.p[0, :],
self.p[1, :],
self.t.T,
z,
vmin=zlim[0],
vmax=zlim[1],
edgecolors=edgecolors)
if colorbar:
plt.colorbar(im)
return ax
def plot3(self,
z: ndarray,
ax: Optional[Axes] = None) -> Axes:
"""Visualise piecewise-linear or piecewise-constant function, 3D plot.
Parameters
----------
z
An array of nodal values (Nvertices), elemental values (Nelems)
or three elemental values (3 x Nelems, piecewise linear DG).
ax
Plot onto the given preinitialised Matplotlib axes.
Returns
-------
Axes
The Matplotlib axes onto which the mesh was plotted.
Examples
--------
Mesh the unit square :math:`(0,1)^2` and visualise the function
:math:`f(x)=x^2`.
>>> from skfem.mesh import MeshTri
>>> m = MeshTri()
>>> m.refine(3)
>>> ax = m.plot3(m.p[1, :]**2)
>>> m.show()
"""
from mpl_toolkits.mplot3d import Axes3D
if ax is None:
fig = plt.figure()
ax = Axes3D(fig)
if len(z) == self.p.shape[1]:
# use matplotlib
ax.plot_trisurf(self.p[0, :],
self.p[1, :],
z,
triangles=self.t.T,
cmap=plt.cm.viridis)
elif len(z) == self.t.shape[1]:
# one value per element (piecewise const)
nt = self.t.shape[1]
newt = np.arange(3 * nt, dtype=np.int64).reshape((nt, 3))
newpx = self.p[0, self.t].flatten(order='F')
newpy = self.p[1, self.t].flatten(order='F')
newz = np.vstack((z, z, z)).flatten(order='F')
ax.plot_trisurf(newpx, newpy, newz,
triangles=newt.T,
cmap=plt.cm.viridis)
elif len(z) == 3 * self.t.shape[1]:
# three values per element (piecewise linear)
nt = self.t.shape[1]
newt = np.arange(3 * nt, dtype=np.int64).reshape((nt, 3))
newpx = self.p[0, self.t].flatten(order='F')
newpy = self.p[1, self.t].flatten(order='F')
ax.plot_trisurf(newpx,
newpy,
z,
triangles=newt.T,
cmap=plt.cm.viridis)
else:
raise NotImplementedError("MeshTri.plot3: not implemented for "
"the given shape of input vector!")
return ax
def _uniform_refine(self):
"""Perform a single mesh refine."""
# rename variables
t = np.copy(self.t)
p = np.copy(self.p)
e = self.facets
sz = p.shape[1]
t2f = self.t2f + sz
# new vertices are the midpoints of edges
new_p = 0.5 * np.vstack((p[0, e[0, :]] + p[0, e[1, :]],
p[1, e[0, :]] + p[1, e[1, :]]))
self.p = np.hstack((p, new_p))
# build new triangle definitions
self.t = np.hstack((
np.vstack((t[0, :], t2f[0, :], t2f[2, :])),
np.vstack((t[1, :], t2f[0, :], t2f[1, :])),
np.vstack((t[2, :], t2f[2, :], t2f[1, :])),
np.vstack((t2f[0, :], t2f[1, :], t2f[2, :])),
))
# mapping of indices between old and new facets
new_facets = np.zeros((2, e.shape[1]), dtype=np.int64)
ix0 = np.arange(t.shape[1], dtype=np.int64)
ix1 = ix0 + t.shape[1]
ix2 = ix0 + 2*t.shape[1]
# rebuild mappings
self._build_mappings()
# finish mapping of indices between old and new facets
new_facets[0, t2f[2, :] - sz] = self.t2f[2, ix0]
new_facets[0, t2f[1, :] - sz] = self.t2f[2, ix1]
new_facets[0, t2f[0, :] - sz] = self.t2f[0, ix0]
new_facets[1, t2f[2, :] - sz] = self.t2f[0, ix2]
new_facets[1, t2f[1, :] - sz] = self.t2f[2, ix2]
new_facets[1, t2f[0, :] - sz] = self.t2f[0, ix1]
self._fix_boundaries(new_facets)
def _adaptive_refine(self, marked):
"""Refine the set of provided elements."""
def sort_mesh(p, t):
"""Make (0, 2) the longest edge in t."""
l01 = np.sqrt(np.sum((p[:, t[0, :]] - p[:, t[1, :]])**2, axis=0))
l12 = np.sqrt(np.sum((p[:, t[1, :]] - p[:, t[2, :]])**2, axis=0))
l02 = np.sqrt(np.sum((p[:, t[0, :]] - p[:, t[2, :]])**2, axis=0))
ix01 = (l01 > l02)*(l01 > l12)
ix12 = (l12 > l01)*(l12 > l02)
# row swaps
tmp = t[2, ix01]
t[2, ix01] = t[1, ix01]
t[1, ix01] = tmp
tmp = t[0, ix12]
t[0, ix12] = t[1, ix12]
t[1, ix12] = tmp
return t
def find_facets(m, marked_elems):
"""Find the facets to split."""
facets = np.zeros(m.facets.shape[1], dtype=np.int64)
facets[m.t2f[:, marked_elems].flatten('F')] = 1
prev_nnz = -1e10
while np.count_nonzero(facets) - prev_nnz > 0:
prev_nnz = np.count_nonzero(facets)
t2facets = facets[m.t2f]
t2facets[2, t2facets[0, :] + t2facets[1, :] > 0] = 1
facets[m.t2f[t2facets == 1]] = 1
return facets
def split_elements(m, facets):
"""Define new elements."""
ix = (-1)*np.ones(m.facets.shape[1], dtype=np.int64)
ix[facets == 1] = np.arange(np.count_nonzero(facets)) + m.p.shape[1]
ix = ix[m.t2f] # (0, 1) (1, 2) (0, 2)
red = (ix[0, :] >= 0) * (ix[1, :] >= 0) * (ix[2, :] >= 0)
blue1 = (ix[0, :] ==-1) * (ix[1, :] >= 0) * (ix[2, :] >= 0)
blue2 = (ix[0, :] >= 0) * (ix[1, :] ==-1) * (ix[2, :] >= 0)
green = (ix[0, :] ==-1) * (ix[1, :] ==-1) * (ix[2, :] >= 0)
rest = (ix[0, :] ==-1) * (ix[1, :] ==-1) * (ix[2, :] ==-1)
# new red elements
t_red = np.hstack((
np.vstack((m.t[0, red], ix[0, red], ix[2, red])),
np.vstack((m.t[1, red], ix[0, red], ix[1, red])),
np.vstack((m.t[2, red], ix[1, red], ix[2, red])),
np.vstack(( ix[1, red], ix[2, red], ix[0, red])),
))
# new blue elements
t_blue1 = np.hstack((
np.vstack((m.t[1, blue1], m.t[0, blue1], ix[2, blue1])),
np.vstack((m.t[1, blue1], ix[1, blue1], ix[2, blue1])),
np.vstack((m.t[2, blue1], ix[2, blue1], ix[1, blue1])),
))
t_blue2 = np.hstack((
np.vstack((m.t[0, blue2], ix[0, blue2], ix[2, blue2])),
np.vstack(( ix[2, blue2], ix[0, blue2], m.t[1, blue2])),
np.vstack((m.t[2, blue2], ix[2, blue2], m.t[1, blue2])),
))
# new green elements
t_green = np.hstack((
np.vstack((m.t[1, green], ix[2, green], m.t[0, green])),
np.vstack((m.t[2, green], ix[2, green], m.t[1, green])),
))
# new nodes
p = .5 * (m.p[:, m.facets[0, facets == 1]] +
m.p[:, m.facets[1, facets == 1]])
return
|
np.hstack((m.p, p))
|
numpy.hstack
|
# AUTO GENERATED. DO NOT CHANGE!
from ctypes import *
import numpy as np
class MJCONTACT(Structure):
_fields_ = [
("dist", c_double),
("pos", c_double * 3),
("frame", c_double * 9),
("includemargin", c_double),
("friction", c_double * 5),
("solref", c_double * 2),
("solimp", c_double * 3),
("mu", c_double),
("coef", c_double * 5),
("zone", c_int),
("dim", c_int),
("geom1", c_int),
("geom2", c_int),
("exclude", c_int),
("efc_address", c_int),
]
class MJRRECT(Structure):
_fields_ = [
("left", c_int),
("bottom", c_int),
("width", c_int),
("height", c_int),
]
class MJVCAMERAPOSE(Structure):
_fields_ = [
("head_pos", c_double * 3),
("head_right", c_double * 3),
("window_pos", c_double * 3),
("window_right", c_double * 3),
("window_up", c_double * 3),
("window_normal", c_double * 3),
("window_size", c_double * 2),
("scale", c_double),
("ipd", c_double),
]
class MJROPTION(Structure):
_fields_ = [
("stereo", c_ubyte),
("flags", c_ubyte * 6),
]
class MJRCONTEXT(Structure):
_fields_ = [
("linewidth", c_float),
("znear", c_float),
("zfar", c_float),
("shadowclip", c_float),
("shadowscale", c_float),
("shadowsize", c_int),
("offwidth", c_uint),
("offheight", c_uint),
("offFBO", c_uint),
("offColor", c_uint),
("offDepthStencil", c_uint),
("shadowFBO", c_uint),
("shadowTex", c_uint),
("ntexture", c_uint),
("texture", c_int * 100),
("textureType", c_int * 100),
("basePlane", c_uint),
("baseMesh", c_uint),
("baseHField", c_uint),
("baseBuiltin", c_uint),
("baseFontNormal", c_uint),
("baseFontBack", c_uint),
("baseFontBig", c_uint),
("rangePlane", c_int),
("rangeMesh", c_int),
("rangeHField", c_int),
("rangeBuiltin", c_int),
("rangeFont", c_int),
("charWidth", c_int * 127),
("charWidthBig", c_int * 127),
("charHeight", c_int),
("charHeightBig", c_int),
("glewInitialized", c_int),
]
class MJVCAMERA(Structure):
_fields_ = [
("fovy", c_double),
("camid", c_int),
("trackbodyid", c_int),
("lookat", c_double * 3),
("azimuth", c_double),
("elevation", c_double),
("distance", c_double),
("pose", MJVCAMERAPOSE),
("VR", c_ubyte),
]
class MJVOPTION(Structure):
_fields_ = [
("label", c_int),
("frame", c_int),
("geomgroup", c_ubyte * 5),
("sitegroup", c_ubyte * 5),
("flags", c_ubyte * 18),
]
class MJVGEOM(Structure):
_fields_ = [
("type", c_int),
("dataid", c_int),
("objtype", c_int),
("objid", c_int),
("category", c_int),
("texid", c_int),
("texuniform", c_int),
("texrepeat", c_float * 2),
("size", c_float * 3),
("pos", c_float * 3),
("mat", c_float * 9),
("rgba", c_float * 4),
("emission", c_float),
("specular", c_float),
("shininess", c_float),
("reflectance", c_float),
("label", c_char * 100),
("camdist", c_float),
("rbound", c_float),
("transparent", c_ubyte),
]
class MJVLIGHT(Structure):
_fields_ = [
("pos", c_float * 3),
("dir", c_float * 3),
("attenuation", c_float * 3),
("cutoff", c_float),
("exponent", c_float),
("ambient", c_float * 3),
("diffuse", c_float * 3),
("specular", c_float * 3),
("headlight", c_ubyte),
("directional", c_ubyte),
("castshadow", c_ubyte),
]
class MJVOBJECTS(Structure):
_fields_ = [
("nlight", c_int),
("ngeom", c_int),
("maxgeom", c_int),
("lights", MJVLIGHT * 8),
("geoms", POINTER(MJVGEOM)),
("geomorder", POINTER(c_int)),
]
class MJOPTION(Structure):
_fields_ = [
("timestep", c_double),
("apirate", c_double),
("tolerance", c_double),
("impratio", c_double),
("gravity", c_double * 3),
("wind", c_double * 3),
("magnetic", c_double * 3),
("density", c_double),
("viscosity", c_double),
("o_margin", c_double),
("o_solref", c_double * 2),
("o_solimp", c_double * 3),
("mpr_tolerance", c_double),
("mpr_iterations", c_int),
("integrator", c_int),
("collision", c_int),
("impedance", c_int),
("reference", c_int),
("solver", c_int),
("iterations", c_int),
("disableflags", c_int),
("enableflags", c_int),
]
class MJVISUAL(Structure):
class ANON_GLOBAL(Structure):
_fields_ = [
("fovy", c_float),
("ipd", c_float),
("linewidth", c_float),
("glow", c_float),
("offwidth", c_int),
("offheight", c_int),
]
class ANON_QUALITY(Structure):
_fields_ = [
("shadowsize", c_int),
("numSlices", c_int),
("numStacks", c_int),
("numArrows", c_int),
("numQuads", c_int),
]
class ANON_HEADLIGHT(Structure):
_fields_ = [
("ambient", c_float * 3),
("diffuse", c_float * 3),
("specular", c_float * 3),
("active", c_int),
]
class ANON_MAP(Structure):
_fields_ = [
("stiffness", c_float),
("force", c_float),
("torque", c_float),
("alpha", c_float),
("fogstart", c_float),
("fogend", c_float),
("znear", c_float),
("zfar", c_float),
("shadowclip", c_float),
("shadowscale", c_float),
]
class ANON_SCALE(Structure):
_fields_ = [
("forcewidth", c_float),
("contactwidth", c_float),
("contactheight", c_float),
("connect", c_float),
("com", c_float),
("camera", c_float),
("light", c_float),
("selectpoint", c_float),
("jointlength", c_float),
("jointwidth", c_float),
("actuatorlength", c_float),
("actuatorwidth", c_float),
("framelength", c_float),
("framewidth", c_float),
("constraint", c_float),
("slidercrank", c_float),
]
class ANON_RGBA(Structure):
_fields_ = [
("fog", c_float * 4),
("force", c_float * 4),
("inertia", c_float * 4),
("joint", c_float * 4),
("actuator", c_float * 4),
("com", c_float * 4),
("camera", c_float * 4),
("light", c_float * 4),
("selectpoint", c_float * 4),
("connect", c_float * 4),
("contactpoint", c_float * 4),
("contactforce", c_float * 4),
("contactfriction", c_float * 4),
("contacttorque", c_float * 4),
("constraint", c_float * 4),
("slidercrank", c_float * 4),
("crankbroken", c_float * 4),
]
_fields_ = [
("global_", ANON_GLOBAL),
("quality", ANON_QUALITY),
("headlight", ANON_HEADLIGHT),
("map_", ANON_MAP),
("scale", ANON_SCALE),
("rgba", ANON_RGBA),
]
class MJSTATISTIC(Structure):
_fields_ = [
("meanmass", c_double),
("meansize", c_double),
("extent", c_double),
("center", c_double * 3),
]
class MJDATA(Structure):
_fields_ = [
("nstack", c_int),
("nbuffer", c_int),
("pstack", c_int),
("maxstackuse", c_int),
("ne", c_int),
("nf", c_int),
("nefc", c_int),
("ncon", c_int),
("nwarning", c_int * 8),
("warning_info", c_int * 8),
("timer_duration", c_double * 14),
("timer_ncall", c_double * 14),
("mocaptime", c_double * 3),
("time", c_double),
("energy", c_double * 2),
("solverstat", c_double * 4),
("solvertrace", c_double * 200),
("buffer", POINTER(c_ubyte)),
("stack", POINTER(c_double)),
("qpos", POINTER(c_double)),
("qvel", POINTER(c_double)),
("act", POINTER(c_double)),
("ctrl", POINTER(c_double)),
("qfrc_applied", POINTER(c_double)),
("xfrc_applied", POINTER(c_double)),
("qacc", POINTER(c_double)),
("act_dot", POINTER(c_double)),
("mocap_pos", POINTER(c_double)),
("mocap_quat", POINTER(c_double)),
("userdata", POINTER(c_double)),
("sensordata", POINTER(c_double)),
("xpos", POINTER(c_double)),
("xquat", POINTER(c_double)),
("xmat", POINTER(c_double)),
("xipos", POINTER(c_double)),
("ximat", POINTER(c_double)),
("xanchor", POINTER(c_double)),
("xaxis", POINTER(c_double)),
("geom_xpos", POINTER(c_double)),
("geom_xmat", POINTER(c_double)),
("site_xpos", POINTER(c_double)),
("site_xmat", POINTER(c_double)),
("cam_xpos", POINTER(c_double)),
("cam_xmat", POINTER(c_double)),
("light_xpos", POINTER(c_double)),
("light_xdir", POINTER(c_double)),
("com_subtree", POINTER(c_double)),
("cdof", POINTER(c_double)),
("cinert", POINTER(c_double)),
("ten_wrapadr", POINTER(c_int)),
("ten_wrapnum", POINTER(c_int)),
("ten_length", POINTER(c_double)),
("ten_moment", POINTER(c_double)),
("wrap_obj", POINTER(c_int)),
("wrap_xpos", POINTER(c_double)),
("actuator_length", POINTER(c_double)),
("actuator_moment", POINTER(c_double)),
("crb", POINTER(c_double)),
("qM", POINTER(c_double)),
("qLD", POINTER(c_double)),
("qLDiagInv", POINTER(c_double)),
("qLDiagSqrtInv", POINTER(c_double)),
("contact", POINTER(MJCONTACT)),
("efc_type", POINTER(c_int)),
("efc_id", POINTER(c_int)),
("efc_rownnz", POINTER(c_int)),
("efc_rowadr", POINTER(c_int)),
("efc_colind", POINTER(c_int)),
("efc_rownnz_T", POINTER(c_int)),
("efc_rowadr_T", POINTER(c_int)),
("efc_colind_T", POINTER(c_int)),
("efc_solref", POINTER(c_double)),
("efc_solimp", POINTER(c_double)),
("efc_margin", POINTER(c_double)),
("efc_frictionloss", POINTER(c_double)),
("efc_pos", POINTER(c_double)),
("efc_J", POINTER(c_double)),
("efc_J_T", POINTER(c_double)),
("efc_diagApprox", POINTER(c_double)),
("efc_D", POINTER(c_double)),
("efc_R", POINTER(c_double)),
("efc_AR", POINTER(c_double)),
("e_ARchol", POINTER(c_double)),
("fc_e_rect", POINTER(c_double)),
("fc_AR", POINTER(c_double)),
("ten_velocity", POINTER(c_double)),
("actuator_velocity", POINTER(c_double)),
("cvel", POINTER(c_double)),
("cdof_dot", POINTER(c_double)),
("qfrc_bias", POINTER(c_double)),
("qfrc_passive", POINTER(c_double)),
("efc_vel", POINTER(c_double)),
("efc_aref", POINTER(c_double)),
("actuator_force", POINTER(c_double)),
("qfrc_actuator", POINTER(c_double)),
("qfrc_unc", POINTER(c_double)),
("qacc_unc", POINTER(c_double)),
("efc_b", POINTER(c_double)),
("fc_b", POINTER(c_double)),
("efc_force", POINTER(c_double)),
("qfrc_constraint", POINTER(c_double)),
("qfrc_inverse", POINTER(c_double)),
("cacc", POINTER(c_double)),
("cfrc_int", POINTER(c_double)),
("cfrc_ext", POINTER(c_double)),
]
class MJMODEL(Structure):
_fields_ = [
("nq", c_int),
("nv", c_int),
("nu", c_int),
("na", c_int),
("nbody", c_int),
("njnt", c_int),
("ngeom", c_int),
("nsite", c_int),
("ncam", c_int),
("nlight", c_int),
("nmesh", c_int),
("nmeshvert", c_int),
("nmeshface", c_int),
("nmeshgraph", c_int),
("nhfield", c_int),
("nhfielddata", c_int),
("ntex", c_int),
("ntexdata", c_int),
("nmat", c_int),
("npair", c_int),
("nexclude", c_int),
("neq", c_int),
("ntendon", c_int),
("nwrap", c_int),
("nsensor", c_int),
("nnumeric", c_int),
("nnumericdata", c_int),
("ntext", c_int),
("ntextdata", c_int),
("nkey", c_int),
("nuser_body", c_int),
("nuser_jnt", c_int),
("nuser_geom", c_int),
("nuser_site", c_int),
("nuser_tendon", c_int),
("nuser_actuator", c_int),
("nuser_sensor", c_int),
("nnames", c_int),
("nM", c_int),
("nemax", c_int),
("njmax", c_int),
("nconmax", c_int),
("nstack", c_int),
("nuserdata", c_int),
("nmocap", c_int),
("nsensordata", c_int),
("nbuffer", c_int),
("opt", MJOPTION),
("vis", MJVISUAL),
("stat", MJSTATISTIC),
("buffer", POINTER(c_ubyte)),
("qpos0", POINTER(c_double)),
("qpos_spring", POINTER(c_double)),
("body_parentid", POINTER(c_int)),
("body_rootid", POINTER(c_int)),
("body_weldid", POINTER(c_int)),
("body_mocapid", POINTER(c_int)),
("body_jntnum", POINTER(c_int)),
("body_jntadr", POINTER(c_int)),
("body_dofnum", POINTER(c_int)),
("body_dofadr", POINTER(c_int)),
("body_geomnum", POINTER(c_int)),
("body_geomadr", POINTER(c_int)),
("body_pos", POINTER(c_double)),
("body_quat", POINTER(c_double)),
("body_ipos", POINTER(c_double)),
("body_iquat", POINTER(c_double)),
("body_mass", POINTER(c_double)),
("body_inertia", POINTER(c_double)),
("body_invweight0", POINTER(c_double)),
("body_user", POINTER(c_double)),
("jnt_type", POINTER(c_int)),
("jnt_qposadr", POINTER(c_int)),
("jnt_dofadr", POINTER(c_int)),
("jnt_bodyid", POINTER(c_int)),
("jnt_limited", POINTER(c_ubyte)),
("jnt_solref", POINTER(c_double)),
("jnt_solimp", POINTER(c_double)),
("jnt_pos", POINTER(c_double)),
("jnt_axis", POINTER(c_double)),
("jnt_stiffness", POINTER(c_double)),
("jnt_range", POINTER(c_double)),
("jnt_margin", POINTER(c_double)),
("jnt_user", POINTER(c_double)),
("dof_bodyid", POINTER(c_int)),
("dof_jntid", POINTER(c_int)),
("dof_parentid", POINTER(c_int)),
("dof_Madr", POINTER(c_int)),
("dof_frictional", POINTER(c_ubyte)),
("dof_solref", POINTER(c_double)),
("dof_solimp", POINTER(c_double)),
("dof_frictionloss", POINTER(c_double)),
("dof_armature", POINTER(c_double)),
("dof_damping", POINTER(c_double)),
("dof_invweight0", POINTER(c_double)),
("geom_type", POINTER(c_int)),
("geom_contype", POINTER(c_int)),
("geom_conaffinity", POINTER(c_int)),
("geom_condim", POINTER(c_int)),
("geom_bodyid", POINTER(c_int)),
("geom_dataid", POINTER(c_int)),
("geom_matid", POINTER(c_int)),
("geom_group", POINTER(c_int)),
("geom_solmix", POINTER(c_double)),
("geom_solref", POINTER(c_double)),
("geom_solimp", POINTER(c_double)),
("geom_size", POINTER(c_double)),
("geom_rbound", POINTER(c_double)),
("geom_pos", POINTER(c_double)),
("geom_quat", POINTER(c_double)),
("geom_friction", POINTER(c_double)),
("geom_margin", POINTER(c_double)),
("geom_gap", POINTER(c_double)),
("geom_user", POINTER(c_double)),
("geom_rgba", POINTER(c_float)),
("site_type", POINTER(c_int)),
("site_bodyid", POINTER(c_int)),
("site_matid", POINTER(c_int)),
("site_group", POINTER(c_int)),
("site_size", POINTER(c_double)),
("site_pos", POINTER(c_double)),
("site_quat", POINTER(c_double)),
("site_user", POINTER(c_double)),
("site_rgba", POINTER(c_float)),
("cam_mode", POINTER(c_int)),
("cam_bodyid", POINTER(c_int)),
("cam_targetbodyid", POINTER(c_int)),
("cam_pos", POINTER(c_double)),
("cam_quat", POINTER(c_double)),
("cam_poscom0", POINTER(c_double)),
("cam_pos0", POINTER(c_double)),
("cam_mat0", POINTER(c_double)),
("cam_fovy", POINTER(c_double)),
("cam_ipd", POINTER(c_double)),
("light_mode", POINTER(c_int)),
("light_bodyid", POINTER(c_int)),
("light_targetbodyid", POINTER(c_int)),
("light_directional", POINTER(c_ubyte)),
("light_castshadow", POINTER(c_ubyte)),
("light_active", POINTER(c_ubyte)),
("light_pos", POINTER(c_double)),
("light_dir", POINTER(c_double)),
("light_poscom0", POINTER(c_double)),
("light_pos0", POINTER(c_double)),
("light_dir0", POINTER(c_double)),
("light_attenuation", POINTER(c_float)),
("light_cutoff", POINTER(c_float)),
("light_exponent", POINTER(c_float)),
("light_ambient", POINTER(c_float)),
("light_diffuse", POINTER(c_float)),
("light_specular", POINTER(c_float)),
("mesh_faceadr", POINTER(c_int)),
("mesh_facenum", POINTER(c_int)),
("mesh_vertadr", POINTER(c_int)),
("mesh_vertnum", POINTER(c_int)),
("mesh_graphadr", POINTER(c_int)),
("mesh_vert", POINTER(c_float)),
("mesh_normal", POINTER(c_float)),
("mesh_face", POINTER(c_int)),
("mesh_graph", POINTER(c_int)),
("hfield_size", POINTER(c_double)),
("hfield_nrow", POINTER(c_int)),
("hfield_ncol", POINTER(c_int)),
("hfield_adr", POINTER(c_int)),
("hfield_data", POINTER(c_float)),
("tex_type", POINTER(c_int)),
("tex_height", POINTER(c_int)),
("tex_width", POINTER(c_int)),
("tex_adr", POINTER(c_int)),
("tex_rgb", POINTER(c_ubyte)),
("mat_texid", POINTER(c_int)),
("mat_texuniform", POINTER(c_ubyte)),
("mat_texrepeat", POINTER(c_float)),
("mat_emission", POINTER(c_float)),
("mat_specular", POINTER(c_float)),
("mat_shininess", POINTER(c_float)),
("mat_reflectance", POINTER(c_float)),
("mat_rgba", POINTER(c_float)),
("pair_dim", POINTER(c_int)),
("pair_geom1", POINTER(c_int)),
("pair_geom2", POINTER(c_int)),
("pair_signature", POINTER(c_int)),
("pair_solref", POINTER(c_double)),
("pair_solimp", POINTER(c_double)),
("pair_margin", POINTER(c_double)),
("pair_gap", POINTER(c_double)),
("pair_friction", POINTER(c_double)),
("exclude_signature", POINTER(c_int)),
("eq_type", POINTER(c_int)),
("eq_obj1id", POINTER(c_int)),
("eq_obj2id", POINTER(c_int)),
("eq_active", POINTER(c_ubyte)),
("eq_solref", POINTER(c_double)),
("eq_solimp", POINTER(c_double)),
("eq_data", POINTER(c_double)),
("tendon_adr", POINTER(c_int)),
("tendon_num", POINTER(c_int)),
("tendon_matid", POINTER(c_int)),
("tendon_limited", POINTER(c_ubyte)),
("tendon_frictional", POINTER(c_ubyte)),
("tendon_width", POINTER(c_double)),
("tendon_solref_lim", POINTER(c_double)),
("tendon_solimp_lim", POINTER(c_double)),
("tendon_solref_fri", POINTER(c_double)),
("tendon_solimp_fri", POINTER(c_double)),
("tendon_range", POINTER(c_double)),
("tendon_margin", POINTER(c_double)),
("tendon_stiffness", POINTER(c_double)),
("tendon_damping", POINTER(c_double)),
("tendon_frictionloss", POINTER(c_double)),
("tendon_lengthspring", POINTER(c_double)),
("tendon_length0", POINTER(c_double)),
("tendon_invweight0", POINTER(c_double)),
("tendon_user", POINTER(c_double)),
("tendon_rgba", POINTER(c_float)),
("wrap_type", POINTER(c_int)),
("wrap_objid", POINTER(c_int)),
("wrap_prm", POINTER(c_double)),
("actuator_trntype", POINTER(c_int)),
("actuator_dyntype", POINTER(c_int)),
("actuator_gaintype", POINTER(c_int)),
("actuator_biastype", POINTER(c_int)),
("actuator_trnid", POINTER(c_int)),
("actuator_ctrllimited", POINTER(c_ubyte)),
("actuator_forcelimited", POINTER(c_ubyte)),
("actuator_dynprm", POINTER(c_double)),
("actuator_gainprm", POINTER(c_double)),
("actuator_biasprm", POINTER(c_double)),
("actuator_ctrlrange", POINTER(c_double)),
("actuator_forcerange", POINTER(c_double)),
("actuator_gear", POINTER(c_double)),
("actuator_cranklength", POINTER(c_double)),
("actuator_invweight0", POINTER(c_double)),
("actuator_length0", POINTER(c_double)),
("actuator_lengthrange", POINTER(c_double)),
("actuator_user", POINTER(c_double)),
("sensor_type", POINTER(c_int)),
("sensor_objid", POINTER(c_int)),
("sensor_dim", POINTER(c_int)),
("sensor_adr", POINTER(c_int)),
("sensor_scale", POINTER(c_double)),
("sensor_user", POINTER(c_double)),
("numeric_adr", POINTER(c_int)),
("numeric_size", POINTER(c_int)),
("numeric_data", POINTER(c_double)),
("text_adr", POINTER(c_int)),
("text_data", POINTER(c_char)),
("key_time", POINTER(c_double)),
("key_qpos", POINTER(c_double)),
("key_qvel", POINTER(c_double)),
("key_act", POINTER(c_double)),
("name_bodyadr", POINTER(c_int)),
("name_jntadr", POINTER(c_int)),
("name_geomadr", POINTER(c_int)),
("name_siteadr", POINTER(c_int)),
("name_camadr", POINTER(c_int)),
("name_lightadr", POINTER(c_int)),
("name_meshadr", POINTER(c_int)),
("name_hfieldadr", POINTER(c_int)),
("name_texadr", POINTER(c_int)),
("name_matadr", POINTER(c_int)),
("name_eqadr", POINTER(c_int)),
("name_tendonadr", POINTER(c_int)),
("name_actuatoradr", POINTER(c_int)),
("name_sensoradr", POINTER(c_int)),
("name_numericadr", POINTER(c_int)),
("name_textadr", POINTER(c_int)),
("names", POINTER(c_char)),
]
class MjContactWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def dist(self):
return self._wrapped.contents.dist
@dist.setter
def dist(self, value):
self._wrapped.contents.dist = value
@property
def pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pos, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@pos.setter
def pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.pos, val_ptr, 3 * sizeof(c_double))
@property
def frame(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.frame, dtype=np.double, count=(9)), (9, ))
arr.setflags(write=False)
return arr
@frame.setter
def frame(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.frame, val_ptr, 9 * sizeof(c_double))
@property
def includemargin(self):
return self._wrapped.contents.includemargin
@includemargin.setter
def includemargin(self, value):
self._wrapped.contents.includemargin = value
@property
def friction(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.friction, dtype=np.double, count=(5)), (5, ))
arr.setflags(write=False)
return arr
@friction.setter
def friction(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.friction, val_ptr, 5 * sizeof(c_double))
@property
def solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.solref, dtype=np.double, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@solref.setter
def solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.solref, val_ptr, 2 * sizeof(c_double))
@property
def solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.solimp, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@solimp.setter
def solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.solimp, val_ptr, 3 * sizeof(c_double))
@property
def mu(self):
return self._wrapped.contents.mu
@mu.setter
def mu(self, value):
self._wrapped.contents.mu = value
@property
def coef(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.coef, dtype=np.double, count=(5)), (5, ))
arr.setflags(write=False)
return arr
@coef.setter
def coef(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.coef, val_ptr, 5 * sizeof(c_double))
@property
def zone(self):
return self._wrapped.contents.zone
@zone.setter
def zone(self, value):
self._wrapped.contents.zone = value
@property
def dim(self):
return self._wrapped.contents.dim
@dim.setter
def dim(self, value):
self._wrapped.contents.dim = value
@property
def geom1(self):
return self._wrapped.contents.geom1
@geom1.setter
def geom1(self, value):
self._wrapped.contents.geom1 = value
@property
def geom2(self):
return self._wrapped.contents.geom2
@geom2.setter
def geom2(self, value):
self._wrapped.contents.geom2 = value
@property
def exclude(self):
return self._wrapped.contents.exclude
@exclude.setter
def exclude(self, value):
self._wrapped.contents.exclude = value
@property
def efc_address(self):
return self._wrapped.contents.efc_address
@efc_address.setter
def efc_address(self, value):
self._wrapped.contents.efc_address = value
class MjrRectWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def left(self):
return self._wrapped.contents.left
@left.setter
def left(self, value):
self._wrapped.contents.left = value
@property
def bottom(self):
return self._wrapped.contents.bottom
@bottom.setter
def bottom(self, value):
self._wrapped.contents.bottom = value
@property
def width(self):
return self._wrapped.contents.width
@width.setter
def width(self, value):
self._wrapped.contents.width = value
@property
def height(self):
return self._wrapped.contents.height
@height.setter
def height(self, value):
self._wrapped.contents.height = value
class MjvCameraPoseWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def head_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.head_pos, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@head_pos.setter
def head_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.head_pos, val_ptr, 3 * sizeof(c_double))
@property
def head_right(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.head_right, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@head_right.setter
def head_right(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.head_right, val_ptr, 3 * sizeof(c_double))
@property
def window_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_pos, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@window_pos.setter
def window_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_pos, val_ptr, 3 * sizeof(c_double))
@property
def window_right(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_right, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@window_right.setter
def window_right(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_right, val_ptr, 3 * sizeof(c_double))
@property
def window_up(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_up, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@window_up.setter
def window_up(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_up, val_ptr, 3 * sizeof(c_double))
@property
def window_normal(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_normal, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@window_normal.setter
def window_normal(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_normal, val_ptr, 3 * sizeof(c_double))
@property
def window_size(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_size, dtype=np.double, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@window_size.setter
def window_size(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_size, val_ptr, 2 * sizeof(c_double))
@property
def scale(self):
return self._wrapped.contents.scale
@scale.setter
def scale(self, value):
self._wrapped.contents.scale = value
@property
def ipd(self):
return self._wrapped.contents.ipd
@ipd.setter
def ipd(self, value):
self._wrapped.contents.ipd = value
class MjrOptionWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def stereo(self):
return self._wrapped.contents.stereo
@stereo.setter
def stereo(self, value):
self._wrapped.contents.stereo = value
@property
def flags(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.flags, dtype=np.uint8, count=(6)), (6, ))
arr.setflags(write=False)
return arr
@flags.setter
def flags(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.flags, val_ptr, 6 * sizeof(c_ubyte))
class MjrContextWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def linewidth(self):
return self._wrapped.contents.linewidth
@linewidth.setter
def linewidth(self, value):
self._wrapped.contents.linewidth = value
@property
def znear(self):
return self._wrapped.contents.znear
@znear.setter
def znear(self, value):
self._wrapped.contents.znear = value
@property
def zfar(self):
return self._wrapped.contents.zfar
@zfar.setter
def zfar(self, value):
self._wrapped.contents.zfar = value
@property
def shadowclip(self):
return self._wrapped.contents.shadowclip
@shadowclip.setter
def shadowclip(self, value):
self._wrapped.contents.shadowclip = value
@property
def shadowscale(self):
return self._wrapped.contents.shadowscale
@shadowscale.setter
def shadowscale(self, value):
self._wrapped.contents.shadowscale = value
@property
def shadowsize(self):
return self._wrapped.contents.shadowsize
@shadowsize.setter
def shadowsize(self, value):
self._wrapped.contents.shadowsize = value
@property
def offwidth(self):
return self._wrapped.contents.offwidth
@offwidth.setter
def offwidth(self, value):
self._wrapped.contents.offwidth = value
@property
def offheight(self):
return self._wrapped.contents.offheight
@offheight.setter
def offheight(self, value):
self._wrapped.contents.offheight = value
@property
def offFBO(self):
return self._wrapped.contents.offFBO
@offFBO.setter
def offFBO(self, value):
self._wrapped.contents.offFBO = value
@property
def offColor(self):
return self._wrapped.contents.offColor
@offColor.setter
def offColor(self, value):
self._wrapped.contents.offColor = value
@property
def offDepthStencil(self):
return self._wrapped.contents.offDepthStencil
@offDepthStencil.setter
def offDepthStencil(self, value):
self._wrapped.contents.offDepthStencil = value
@property
def shadowFBO(self):
return self._wrapped.contents.shadowFBO
@shadowFBO.setter
def shadowFBO(self, value):
self._wrapped.contents.shadowFBO = value
@property
def shadowTex(self):
return self._wrapped.contents.shadowTex
@shadowTex.setter
def shadowTex(self, value):
self._wrapped.contents.shadowTex = value
@property
def ntexture(self):
return self._wrapped.contents.ntexture
@ntexture.setter
def ntexture(self, value):
self._wrapped.contents.ntexture = value
@property
def texture(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.texture, dtype=np.int, count=(100)), (100, ))
arr.setflags(write=False)
return arr
@texture.setter
def texture(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.texture, val_ptr, 100 * sizeof(c_int))
@property
def textureType(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.textureType, dtype=np.int, count=(100)), (100, ))
arr.setflags(write=False)
return arr
@textureType.setter
def textureType(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.textureType, val_ptr, 100 * sizeof(c_int))
@property
def basePlane(self):
return self._wrapped.contents.basePlane
@basePlane.setter
def basePlane(self, value):
self._wrapped.contents.basePlane = value
@property
def baseMesh(self):
return self._wrapped.contents.baseMesh
@baseMesh.setter
def baseMesh(self, value):
self._wrapped.contents.baseMesh = value
@property
def baseHField(self):
return self._wrapped.contents.baseHField
@baseHField.setter
def baseHField(self, value):
self._wrapped.contents.baseHField = value
@property
def baseBuiltin(self):
return self._wrapped.contents.baseBuiltin
@baseBuiltin.setter
def baseBuiltin(self, value):
self._wrapped.contents.baseBuiltin = value
@property
def baseFontNormal(self):
return self._wrapped.contents.baseFontNormal
@baseFontNormal.setter
def baseFontNormal(self, value):
self._wrapped.contents.baseFontNormal = value
@property
def baseFontBack(self):
return self._wrapped.contents.baseFontBack
@baseFontBack.setter
def baseFontBack(self, value):
self._wrapped.contents.baseFontBack = value
@property
def baseFontBig(self):
return self._wrapped.contents.baseFontBig
@baseFontBig.setter
def baseFontBig(self, value):
self._wrapped.contents.baseFontBig = value
@property
def rangePlane(self):
return self._wrapped.contents.rangePlane
@rangePlane.setter
def rangePlane(self, value):
self._wrapped.contents.rangePlane = value
@property
def rangeMesh(self):
return self._wrapped.contents.rangeMesh
@rangeMesh.setter
def rangeMesh(self, value):
self._wrapped.contents.rangeMesh = value
@property
def rangeHField(self):
return self._wrapped.contents.rangeHField
@rangeHField.setter
def rangeHField(self, value):
self._wrapped.contents.rangeHField = value
@property
def rangeBuiltin(self):
return self._wrapped.contents.rangeBuiltin
@rangeBuiltin.setter
def rangeBuiltin(self, value):
self._wrapped.contents.rangeBuiltin = value
@property
def rangeFont(self):
return self._wrapped.contents.rangeFont
@rangeFont.setter
def rangeFont(self, value):
self._wrapped.contents.rangeFont = value
@property
def charWidth(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.charWidth, dtype=np.int, count=(127)), (127, ))
arr.setflags(write=False)
return arr
@charWidth.setter
def charWidth(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.charWidth, val_ptr, 127 * sizeof(c_int))
@property
def charWidthBig(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.charWidthBig, dtype=np.int, count=(127)), (127, ))
arr.setflags(write=False)
return arr
@charWidthBig.setter
def charWidthBig(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.charWidthBig, val_ptr, 127 * sizeof(c_int))
@property
def charHeight(self):
return self._wrapped.contents.charHeight
@charHeight.setter
def charHeight(self, value):
self._wrapped.contents.charHeight = value
@property
def charHeightBig(self):
return self._wrapped.contents.charHeightBig
@charHeightBig.setter
def charHeightBig(self, value):
self._wrapped.contents.charHeightBig = value
@property
def glewInitialized(self):
return self._wrapped.contents.glewInitialized
@glewInitialized.setter
def glewInitialized(self, value):
self._wrapped.contents.glewInitialized = value
class MjvCameraWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def fovy(self):
return self._wrapped.contents.fovy
@fovy.setter
def fovy(self, value):
self._wrapped.contents.fovy = value
@property
def camid(self):
return self._wrapped.contents.camid
@camid.setter
def camid(self, value):
self._wrapped.contents.camid = value
@property
def trackbodyid(self):
return self._wrapped.contents.trackbodyid
@trackbodyid.setter
def trackbodyid(self, value):
self._wrapped.contents.trackbodyid = value
@property
def lookat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.lookat, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@lookat.setter
def lookat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.lookat, val_ptr, 3 * sizeof(c_double))
@property
def azimuth(self):
return self._wrapped.contents.azimuth
@azimuth.setter
def azimuth(self, value):
self._wrapped.contents.azimuth = value
@property
def elevation(self):
return self._wrapped.contents.elevation
@elevation.setter
def elevation(self, value):
self._wrapped.contents.elevation = value
@property
def distance(self):
return self._wrapped.contents.distance
@distance.setter
def distance(self, value):
self._wrapped.contents.distance = value
@property
def pose(self):
return self._wrapped.contents.pose
@pose.setter
def pose(self, value):
self._wrapped.contents.pose = value
@property
def VR(self):
return self._wrapped.contents.VR
@VR.setter
def VR(self, value):
self._wrapped.contents.VR = value
class MjvOptionWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def label(self):
return self._wrapped.contents.label
@label.setter
def label(self, value):
self._wrapped.contents.label = value
@property
def frame(self):
return self._wrapped.contents.frame
@frame.setter
def frame(self, value):
self._wrapped.contents.frame = value
@property
def geomgroup(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geomgroup, dtype=np.uint8, count=(5)), (5, ))
arr.setflags(write=False)
return arr
@geomgroup.setter
def geomgroup(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.geomgroup, val_ptr, 5 * sizeof(c_ubyte))
@property
def sitegroup(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.sitegroup, dtype=np.uint8, count=(5)), (5, ))
arr.setflags(write=False)
return arr
@sitegroup.setter
def sitegroup(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.sitegroup, val_ptr, 5 * sizeof(c_ubyte))
@property
def flags(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.flags, dtype=np.uint8, count=(18)), (18, ))
arr.setflags(write=False)
return arr
@flags.setter
def flags(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.flags, val_ptr, 18 * sizeof(c_ubyte))
class MjvGeomWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def type(self):
return self._wrapped.contents.type
@type.setter
def type(self, value):
self._wrapped.contents.type = value
@property
def dataid(self):
return self._wrapped.contents.dataid
@dataid.setter
def dataid(self, value):
self._wrapped.contents.dataid = value
@property
def objtype(self):
return self._wrapped.contents.objtype
@objtype.setter
def objtype(self, value):
self._wrapped.contents.objtype = value
@property
def objid(self):
return self._wrapped.contents.objid
@objid.setter
def objid(self, value):
self._wrapped.contents.objid = value
@property
def category(self):
return self._wrapped.contents.category
@category.setter
def category(self, value):
self._wrapped.contents.category = value
@property
def texid(self):
return self._wrapped.contents.texid
@texid.setter
def texid(self, value):
self._wrapped.contents.texid = value
@property
def texuniform(self):
return self._wrapped.contents.texuniform
@texuniform.setter
def texuniform(self, value):
self._wrapped.contents.texuniform = value
@property
def texrepeat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.texrepeat, dtype=np.float, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@texrepeat.setter
def texrepeat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.texrepeat, val_ptr, 2 * sizeof(c_float))
@property
def size(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.size, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@size.setter
def size(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.size, val_ptr, 3 * sizeof(c_float))
@property
def pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pos, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@pos.setter
def pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.pos, val_ptr, 3 * sizeof(c_float))
@property
def mat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mat, dtype=np.float, count=(9)), (9, ))
arr.setflags(write=False)
return arr
@mat.setter
def mat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.mat, val_ptr, 9 * sizeof(c_float))
@property
def rgba(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.rgba, dtype=np.float, count=(4)), (4, ))
arr.setflags(write=False)
return arr
@rgba.setter
def rgba(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.rgba, val_ptr, 4 * sizeof(c_float))
@property
def emission(self):
return self._wrapped.contents.emission
@emission.setter
def emission(self, value):
self._wrapped.contents.emission = value
@property
def specular(self):
return self._wrapped.contents.specular
@specular.setter
def specular(self, value):
self._wrapped.contents.specular = value
@property
def shininess(self):
return self._wrapped.contents.shininess
@shininess.setter
def shininess(self, value):
self._wrapped.contents.shininess = value
@property
def reflectance(self):
return self._wrapped.contents.reflectance
@reflectance.setter
def reflectance(self, value):
self._wrapped.contents.reflectance = value
@property
def label(self):
return self._wrapped.contents.label
@label.setter
def label(self, value):
self._wrapped.contents.label = value
@property
def camdist(self):
return self._wrapped.contents.camdist
@camdist.setter
def camdist(self, value):
self._wrapped.contents.camdist = value
@property
def rbound(self):
return self._wrapped.contents.rbound
@rbound.setter
def rbound(self, value):
self._wrapped.contents.rbound = value
@property
def transparent(self):
return self._wrapped.contents.transparent
@transparent.setter
def transparent(self, value):
self._wrapped.contents.transparent = value
class MjvLightWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pos, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@pos.setter
def pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.pos, val_ptr, 3 * sizeof(c_float))
@property
def dir(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dir, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@dir.setter
def dir(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.dir, val_ptr, 3 * sizeof(c_float))
@property
def attenuation(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.attenuation, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@attenuation.setter
def attenuation(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.attenuation, val_ptr, 3 * sizeof(c_float))
@property
def cutoff(self):
return self._wrapped.contents.cutoff
@cutoff.setter
def cutoff(self, value):
self._wrapped.contents.cutoff = value
@property
def exponent(self):
return self._wrapped.contents.exponent
@exponent.setter
def exponent(self, value):
self._wrapped.contents.exponent = value
@property
def ambient(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ambient, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@ambient.setter
def ambient(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.ambient, val_ptr, 3 * sizeof(c_float))
@property
def diffuse(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.diffuse, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@diffuse.setter
def diffuse(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.diffuse, val_ptr, 3 * sizeof(c_float))
@property
def specular(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.specular, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@specular.setter
def specular(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.specular, val_ptr, 3 * sizeof(c_float))
@property
def headlight(self):
return self._wrapped.contents.headlight
@headlight.setter
def headlight(self, value):
self._wrapped.contents.headlight = value
@property
def directional(self):
return self._wrapped.contents.directional
@directional.setter
def directional(self, value):
self._wrapped.contents.directional = value
@property
def castshadow(self):
return self._wrapped.contents.castshadow
@castshadow.setter
def castshadow(self, value):
self._wrapped.contents.castshadow = value
class MjvObjectsWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def nlight(self):
return self._wrapped.contents.nlight
@nlight.setter
def nlight(self, value):
self._wrapped.contents.nlight = value
@property
def ngeom(self):
return self._wrapped.contents.ngeom
@ngeom.setter
def ngeom(self, value):
self._wrapped.contents.ngeom = value
@property
def maxgeom(self):
return self._wrapped.contents.maxgeom
@maxgeom.setter
def maxgeom(self, value):
self._wrapped.contents.maxgeom = value
@property
def lights(self):
return self._wrapped.contents.lights
@lights.setter
def lights(self, value):
self._wrapped.contents.lights = value
class MjOptionWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def timestep(self):
return self._wrapped.contents.timestep
@timestep.setter
def timestep(self, value):
self._wrapped.contents.timestep = value
@property
def apirate(self):
return self._wrapped.contents.apirate
@apirate.setter
def apirate(self, value):
self._wrapped.contents.apirate = value
@property
def tolerance(self):
return self._wrapped.contents.tolerance
@tolerance.setter
def tolerance(self, value):
self._wrapped.contents.tolerance = value
@property
def impratio(self):
return self._wrapped.contents.impratio
@impratio.setter
def impratio(self, value):
self._wrapped.contents.impratio = value
@property
def gravity(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.gravity, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@gravity.setter
def gravity(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.gravity, val_ptr, 3 * sizeof(c_double))
@property
def wind(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.wind, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@wind.setter
def wind(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.wind, val_ptr, 3 * sizeof(c_double))
@property
def magnetic(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.magnetic, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@magnetic.setter
def magnetic(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.magnetic, val_ptr, 3 * sizeof(c_double))
@property
def density(self):
return self._wrapped.contents.density
@density.setter
def density(self, value):
self._wrapped.contents.density = value
@property
def viscosity(self):
return self._wrapped.contents.viscosity
@viscosity.setter
def viscosity(self, value):
self._wrapped.contents.viscosity = value
@property
def o_margin(self):
return self._wrapped.contents.o_margin
@o_margin.setter
def o_margin(self, value):
self._wrapped.contents.o_margin = value
@property
def o_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.o_solref, dtype=np.double, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@o_solref.setter
def o_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.o_solref, val_ptr, 2 * sizeof(c_double))
@property
def o_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.o_solimp, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@o_solimp.setter
def o_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.o_solimp, val_ptr, 3 * sizeof(c_double))
@property
def mpr_tolerance(self):
return self._wrapped.contents.mpr_tolerance
@mpr_tolerance.setter
def mpr_tolerance(self, value):
self._wrapped.contents.mpr_tolerance = value
@property
def mpr_iterations(self):
return self._wrapped.contents.mpr_iterations
@mpr_iterations.setter
def mpr_iterations(self, value):
self._wrapped.contents.mpr_iterations = value
@property
def integrator(self):
return self._wrapped.contents.integrator
@integrator.setter
def integrator(self, value):
self._wrapped.contents.integrator = value
@property
def collision(self):
return self._wrapped.contents.collision
@collision.setter
def collision(self, value):
self._wrapped.contents.collision = value
@property
def impedance(self):
return self._wrapped.contents.impedance
@impedance.setter
def impedance(self, value):
self._wrapped.contents.impedance = value
@property
def reference(self):
return self._wrapped.contents.reference
@reference.setter
def reference(self, value):
self._wrapped.contents.reference = value
@property
def solver(self):
return self._wrapped.contents.solver
@solver.setter
def solver(self, value):
self._wrapped.contents.solver = value
@property
def iterations(self):
return self._wrapped.contents.iterations
@iterations.setter
def iterations(self, value):
self._wrapped.contents.iterations = value
@property
def disableflags(self):
return self._wrapped.contents.disableflags
@disableflags.setter
def disableflags(self, value):
self._wrapped.contents.disableflags = value
@property
def enableflags(self):
return self._wrapped.contents.enableflags
@enableflags.setter
def enableflags(self, value):
self._wrapped.contents.enableflags = value
class MjVisualWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def global_(self):
return self._wrapped.contents.global_
@global_.setter
def global_(self, value):
self._wrapped.contents.global_ = value
@property
def quality(self):
return self._wrapped.contents.quality
@quality.setter
def quality(self, value):
self._wrapped.contents.quality = value
@property
def headlight(self):
return self._wrapped.contents.headlight
@headlight.setter
def headlight(self, value):
self._wrapped.contents.headlight = value
@property
def map_(self):
return self._wrapped.contents.map_
@map_.setter
def map_(self, value):
self._wrapped.contents.map_ = value
@property
def scale(self):
return self._wrapped.contents.scale
@scale.setter
def scale(self, value):
self._wrapped.contents.scale = value
@property
def rgba(self):
return self._wrapped.contents.rgba
@rgba.setter
def rgba(self, value):
self._wrapped.contents.rgba = value
class MjStatisticWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def meanmass(self):
return self._wrapped.contents.meanmass
@meanmass.setter
def meanmass(self, value):
self._wrapped.contents.meanmass = value
@property
def meansize(self):
return self._wrapped.contents.meansize
@meansize.setter
def meansize(self, value):
self._wrapped.contents.meansize = value
@property
def extent(self):
return self._wrapped.contents.extent
@extent.setter
def extent(self, value):
self._wrapped.contents.extent = value
@property
def center(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.center, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@center.setter
def center(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.center, val_ptr, 3 * sizeof(c_double))
class MjDataWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def nstack(self):
return self._wrapped.contents.nstack
@nstack.setter
def nstack(self, value):
self._wrapped.contents.nstack = value
@property
def nbuffer(self):
return self._wrapped.contents.nbuffer
@nbuffer.setter
def nbuffer(self, value):
self._wrapped.contents.nbuffer = value
@property
def pstack(self):
return self._wrapped.contents.pstack
@pstack.setter
def pstack(self, value):
self._wrapped.contents.pstack = value
@property
def maxstackuse(self):
return self._wrapped.contents.maxstackuse
@maxstackuse.setter
def maxstackuse(self, value):
self._wrapped.contents.maxstackuse = value
@property
def ne(self):
return self._wrapped.contents.ne
@ne.setter
def ne(self, value):
self._wrapped.contents.ne = value
@property
def nf(self):
return self._wrapped.contents.nf
@nf.setter
def nf(self, value):
self._wrapped.contents.nf = value
@property
def nefc(self):
return self._wrapped.contents.nefc
@nefc.setter
def nefc(self, value):
self._wrapped.contents.nefc = value
@property
def ncon(self):
return self._wrapped.contents.ncon
@ncon.setter
def ncon(self, value):
self._wrapped.contents.ncon = value
@property
def nwarning(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.nwarning, dtype=np.int, count=(8)), (8, ))
arr.setflags(write=False)
return arr
@nwarning.setter
def nwarning(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.nwarning, val_ptr, 8 * sizeof(c_int))
@property
def warning_info(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.warning_info, dtype=np.int, count=(8)), (8, ))
arr.setflags(write=False)
return arr
@warning_info.setter
def warning_info(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.warning_info, val_ptr, 8 * sizeof(c_int))
@property
def timer_duration(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.timer_duration, dtype=np.double, count=(14)), (14, ))
arr.setflags(write=False)
return arr
@timer_duration.setter
def timer_duration(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.timer_duration, val_ptr, 14 * sizeof(c_double))
@property
def timer_ncall(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.timer_ncall, dtype=np.double, count=(14)), (14, ))
arr.setflags(write=False)
return arr
@timer_ncall.setter
def timer_ncall(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.timer_ncall, val_ptr, 14 * sizeof(c_double))
@property
def mocaptime(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mocaptime, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@mocaptime.setter
def mocaptime(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.mocaptime, val_ptr, 3 * sizeof(c_double))
@property
def time(self):
return self._wrapped.contents.time
@time.setter
def time(self, value):
self._wrapped.contents.time = value
@property
def energy(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.energy, dtype=np.double, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@energy.setter
def energy(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.energy, val_ptr, 2 * sizeof(c_double))
@property
def solverstat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.solverstat, dtype=np.double, count=(4)), (4, ))
arr.setflags(write=False)
return arr
@solverstat.setter
def solverstat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.solverstat, val_ptr, 4 * sizeof(c_double))
@property
def solvertrace(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.solvertrace, dtype=np.double, count=(200)), (200, ))
arr.setflags(write=False)
return arr
@solvertrace.setter
def solvertrace(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.solvertrace, val_ptr, 200 * sizeof(c_double))
@property
def buffer(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.buffer, dtype=np.uint8, count=(self.nbuffer)), (self.nbuffer, ))
arr.setflags(write=False)
return arr
@buffer.setter
def buffer(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.buffer, val_ptr, self.nbuffer * sizeof(c_ubyte))
@property
def stack(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.stack, dtype=np.double, count=(self.nstack)), (self.nstack, ))
arr.setflags(write=False)
return arr
@stack.setter
def stack(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.stack, val_ptr, self.nstack * sizeof(c_double))
@property
def qpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qpos, dtype=np.double, count=(self._size_src.nq*1)), (self._size_src.nq, 1, ))
arr.setflags(write=False)
return arr
@qpos.setter
def qpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qpos, val_ptr, self._size_src.nq*1 * sizeof(c_double))
@property
def qvel(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qvel, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qvel.setter
def qvel(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qvel, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def act(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.act, dtype=np.double, count=(self._size_src.na*1)), (self._size_src.na, 1, ))
arr.setflags(write=False)
return arr
@act.setter
def act(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.act, val_ptr, self._size_src.na*1 * sizeof(c_double))
@property
def ctrl(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ctrl, dtype=np.double, count=(self._size_src.nu*1)), (self._size_src.nu, 1, ))
arr.setflags(write=False)
return arr
@ctrl.setter
def ctrl(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ctrl, val_ptr, self._size_src.nu*1 * sizeof(c_double))
@property
def qfrc_applied(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_applied, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_applied.setter
def qfrc_applied(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_applied, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def xfrc_applied(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xfrc_applied, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@xfrc_applied.setter
def xfrc_applied(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xfrc_applied, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
@property
def qacc(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qacc, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qacc.setter
def qacc(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qacc, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def act_dot(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.act_dot, dtype=np.double, count=(self._size_src.na*1)), (self._size_src.na, 1, ))
arr.setflags(write=False)
return arr
@act_dot.setter
def act_dot(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.act_dot, val_ptr, self._size_src.na*1 * sizeof(c_double))
@property
def mocap_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mocap_pos, dtype=np.double, count=(self._size_src.nmocap*3)), (self._size_src.nmocap, 3, ))
arr.setflags(write=False)
return arr
@mocap_pos.setter
def mocap_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.mocap_pos, val_ptr, self._size_src.nmocap*3 * sizeof(c_double))
@property
def mocap_quat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mocap_quat, dtype=np.double, count=(self._size_src.nmocap*4)), (self._size_src.nmocap, 4, ))
arr.setflags(write=False)
return arr
@mocap_quat.setter
def mocap_quat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.mocap_quat, val_ptr, self._size_src.nmocap*4 * sizeof(c_double))
@property
def userdata(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.userdata, dtype=np.double, count=(self._size_src.nuserdata*1)), (self._size_src.nuserdata, 1, ))
arr.setflags(write=False)
return arr
@userdata.setter
def userdata(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.userdata, val_ptr, self._size_src.nuserdata*1 * sizeof(c_double))
@property
def sensordata(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.sensordata, dtype=np.double, count=(self._size_src.nsensordata*1)), (self._size_src.nsensordata, 1, ))
arr.setflags(write=False)
return arr
@sensordata.setter
def sensordata(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.sensordata, val_ptr, self._size_src.nsensordata*1 * sizeof(c_double))
@property
def xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xpos, dtype=np.double, count=(self._size_src.nbody*3)), (self._size_src.nbody, 3, ))
arr.setflags(write=False)
return arr
@xpos.setter
def xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xpos, val_ptr, self._size_src.nbody*3 * sizeof(c_double))
@property
def xquat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xquat, dtype=np.double, count=(self._size_src.nbody*4)), (self._size_src.nbody, 4, ))
arr.setflags(write=False)
return arr
@xquat.setter
def xquat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xquat, val_ptr, self._size_src.nbody*4 * sizeof(c_double))
@property
def xmat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xmat, dtype=np.double, count=(self._size_src.nbody*9)), (self._size_src.nbody, 9, ))
arr.setflags(write=False)
return arr
@xmat.setter
def xmat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xmat, val_ptr, self._size_src.nbody*9 * sizeof(c_double))
@property
def xipos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xipos, dtype=np.double, count=(self._size_src.nbody*3)), (self._size_src.nbody, 3, ))
arr.setflags(write=False)
return arr
@xipos.setter
def xipos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xipos, val_ptr, self._size_src.nbody*3 * sizeof(c_double))
@property
def ximat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ximat, dtype=np.double, count=(self._size_src.nbody*9)), (self._size_src.nbody, 9, ))
arr.setflags(write=False)
return arr
@ximat.setter
def ximat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ximat, val_ptr, self._size_src.nbody*9 * sizeof(c_double))
@property
def xanchor(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xanchor, dtype=np.double, count=(self._size_src.njnt*3)), (self._size_src.njnt, 3, ))
arr.setflags(write=False)
return arr
@xanchor.setter
def xanchor(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xanchor, val_ptr, self._size_src.njnt*3 * sizeof(c_double))
@property
def xaxis(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xaxis, dtype=np.double, count=(self._size_src.njnt*3)), (self._size_src.njnt, 3, ))
arr.setflags(write=False)
return arr
@xaxis.setter
def xaxis(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xaxis, val_ptr, self._size_src.njnt*3 * sizeof(c_double))
@property
def geom_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_xpos, dtype=np.double, count=(self._size_src.ngeom*3)), (self._size_src.ngeom, 3, ))
arr.setflags(write=False)
return arr
@geom_xpos.setter
def geom_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_xpos, val_ptr, self._size_src.ngeom*3 * sizeof(c_double))
@property
def geom_xmat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_xmat, dtype=np.double, count=(self._size_src.ngeom*9)), (self._size_src.ngeom, 9, ))
arr.setflags(write=False)
return arr
@geom_xmat.setter
def geom_xmat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_xmat, val_ptr, self._size_src.ngeom*9 * sizeof(c_double))
@property
def site_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_xpos, dtype=np.double, count=(self._size_src.nsite*3)), (self._size_src.nsite, 3, ))
arr.setflags(write=False)
return arr
@site_xpos.setter
def site_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.site_xpos, val_ptr, self._size_src.nsite*3 * sizeof(c_double))
@property
def site_xmat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_xmat, dtype=np.double, count=(self._size_src.nsite*9)), (self._size_src.nsite, 9, ))
arr.setflags(write=False)
return arr
@site_xmat.setter
def site_xmat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.site_xmat, val_ptr, self._size_src.nsite*9 * sizeof(c_double))
@property
def cam_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_xpos, dtype=np.double, count=(self._size_src.ncam*3)), (self._size_src.ncam, 3, ))
arr.setflags(write=False)
return arr
@cam_xpos.setter
def cam_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_xpos, val_ptr, self._size_src.ncam*3 * sizeof(c_double))
@property
def cam_xmat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_xmat, dtype=np.double, count=(self._size_src.ncam*9)), (self._size_src.ncam, 9, ))
arr.setflags(write=False)
return arr
@cam_xmat.setter
def cam_xmat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_xmat, val_ptr, self._size_src.ncam*9 * sizeof(c_double))
@property
def light_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_xpos, dtype=np.double, count=(self._size_src.nlight*3)), (self._size_src.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_xpos.setter
def light_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.light_xpos, val_ptr, self._size_src.nlight*3 * sizeof(c_double))
@property
def light_xdir(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_xdir, dtype=np.double, count=(self._size_src.nlight*3)), (self._size_src.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_xdir.setter
def light_xdir(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.light_xdir, val_ptr, self._size_src.nlight*3 * sizeof(c_double))
@property
def com_subtree(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.com_subtree, dtype=np.double, count=(self._size_src.nbody*3)), (self._size_src.nbody, 3, ))
arr.setflags(write=False)
return arr
@com_subtree.setter
def com_subtree(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.com_subtree, val_ptr, self._size_src.nbody*3 * sizeof(c_double))
@property
def cdof(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cdof, dtype=np.double, count=(self._size_src.nv*6)), (self._size_src.nv, 6, ))
arr.setflags(write=False)
return arr
@cdof.setter
def cdof(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cdof, val_ptr, self._size_src.nv*6 * sizeof(c_double))
@property
def cinert(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cinert, dtype=np.double, count=(self._size_src.nbody*10)), (self._size_src.nbody, 10, ))
arr.setflags(write=False)
return arr
@cinert.setter
def cinert(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cinert, val_ptr, self._size_src.nbody*10 * sizeof(c_double))
@property
def ten_wrapadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_wrapadr, dtype=np.int, count=(self._size_src.ntendon*1)), (self._size_src.ntendon, 1, ))
arr.setflags(write=False)
return arr
@ten_wrapadr.setter
def ten_wrapadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.ten_wrapadr, val_ptr, self._size_src.ntendon*1 * sizeof(c_int))
@property
def ten_wrapnum(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_wrapnum, dtype=np.int, count=(self._size_src.ntendon*1)), (self._size_src.ntendon, 1, ))
arr.setflags(write=False)
return arr
@ten_wrapnum.setter
def ten_wrapnum(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.ten_wrapnum, val_ptr, self._size_src.ntendon*1 * sizeof(c_int))
@property
def ten_length(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_length, dtype=np.double, count=(self._size_src.ntendon*1)), (self._size_src.ntendon, 1, ))
arr.setflags(write=False)
return arr
@ten_length.setter
def ten_length(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ten_length, val_ptr, self._size_src.ntendon*1 * sizeof(c_double))
@property
def ten_moment(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_moment, dtype=np.double, count=(self._size_src.ntendon*self._size_src.nv)), (self._size_src.ntendon, self._size_src.nv, ))
arr.setflags(write=False)
return arr
@ten_moment.setter
def ten_moment(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ten_moment, val_ptr, self._size_src.ntendon*self._size_src.nv * sizeof(c_double))
@property
def wrap_obj(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.wrap_obj, dtype=np.int, count=(self._size_src.nwrap*2)), (self._size_src.nwrap, 2, ))
arr.setflags(write=False)
return arr
@wrap_obj.setter
def wrap_obj(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.wrap_obj, val_ptr, self._size_src.nwrap*2 * sizeof(c_int))
@property
def wrap_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.wrap_xpos, dtype=np.double, count=(self._size_src.nwrap*6)), (self._size_src.nwrap, 6, ))
arr.setflags(write=False)
return arr
@wrap_xpos.setter
def wrap_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.wrap_xpos, val_ptr, self._size_src.nwrap*6 * sizeof(c_double))
@property
def actuator_length(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_length, dtype=np.double, count=(self._size_src.nu*1)), (self._size_src.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_length.setter
def actuator_length(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_length, val_ptr, self._size_src.nu*1 * sizeof(c_double))
@property
def actuator_moment(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_moment, dtype=np.double, count=(self._size_src.nu*self._size_src.nv)), (self._size_src.nu, self._size_src.nv, ))
arr.setflags(write=False)
return arr
@actuator_moment.setter
def actuator_moment(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_moment, val_ptr, self._size_src.nu*self._size_src.nv * sizeof(c_double))
@property
def crb(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.crb, dtype=np.double, count=(self._size_src.nbody*10)), (self._size_src.nbody, 10, ))
arr.setflags(write=False)
return arr
@crb.setter
def crb(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.crb, val_ptr, self._size_src.nbody*10 * sizeof(c_double))
@property
def qM(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qM, dtype=np.double, count=(self._size_src.nM*1)), (self._size_src.nM, 1, ))
arr.setflags(write=False)
return arr
@qM.setter
def qM(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qM, val_ptr, self._size_src.nM*1 * sizeof(c_double))
@property
def qLD(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qLD, dtype=np.double, count=(self._size_src.nM*1)), (self._size_src.nM, 1, ))
arr.setflags(write=False)
return arr
@qLD.setter
def qLD(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qLD, val_ptr, self._size_src.nM*1 * sizeof(c_double))
@property
def qLDiagInv(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qLDiagInv, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qLDiagInv.setter
def qLDiagInv(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qLDiagInv, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qLDiagSqrtInv(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qLDiagSqrtInv, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qLDiagSqrtInv.setter
def qLDiagSqrtInv(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qLDiagSqrtInv, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def efc_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_type, dtype=np.int, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_type.setter
def efc_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_type, val_ptr, self._size_src.njmax*1 * sizeof(c_int))
@property
def efc_id(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_id, dtype=np.int, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_id.setter
def efc_id(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_id, val_ptr, self._size_src.njmax*1 * sizeof(c_int))
@property
def efc_rownnz(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_rownnz, dtype=np.int, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_rownnz.setter
def efc_rownnz(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_rownnz, val_ptr, self._size_src.njmax*1 * sizeof(c_int))
@property
def efc_rowadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_rowadr, dtype=np.int, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_rowadr.setter
def efc_rowadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_rowadr, val_ptr, self._size_src.njmax*1 * sizeof(c_int))
@property
def efc_colind(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_colind, dtype=np.int, count=(self._size_src.njmax*self._size_src.nv)), (self._size_src.njmax, self._size_src.nv, ))
arr.setflags(write=False)
return arr
@efc_colind.setter
def efc_colind(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_colind, val_ptr, self._size_src.njmax*self._size_src.nv * sizeof(c_int))
@property
def efc_rownnz_T(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_rownnz_T, dtype=np.int, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@efc_rownnz_T.setter
def efc_rownnz_T(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_rownnz_T, val_ptr, self._size_src.nv*1 * sizeof(c_int))
@property
def efc_rowadr_T(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_rowadr_T, dtype=np.int, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@efc_rowadr_T.setter
def efc_rowadr_T(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_rowadr_T, val_ptr, self._size_src.nv*1 * sizeof(c_int))
@property
def efc_colind_T(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_colind_T, dtype=np.int, count=(self._size_src.nv*self._size_src.njmax)), (self._size_src.nv, self._size_src.njmax, ))
arr.setflags(write=False)
return arr
@efc_colind_T.setter
def efc_colind_T(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_colind_T, val_ptr, self._size_src.nv*self._size_src.njmax * sizeof(c_int))
@property
def efc_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_solref, dtype=np.double, count=(self._size_src.njmax*2)), (self._size_src.njmax, 2, ))
arr.setflags(write=False)
return arr
@efc_solref.setter
def efc_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_solref, val_ptr, self._size_src.njmax*2 * sizeof(c_double))
@property
def efc_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_solimp, dtype=np.double, count=(self._size_src.njmax*3)), (self._size_src.njmax, 3, ))
arr.setflags(write=False)
return arr
@efc_solimp.setter
def efc_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_solimp, val_ptr, self._size_src.njmax*3 * sizeof(c_double))
@property
def efc_margin(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_margin, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_margin.setter
def efc_margin(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_margin, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_frictionloss(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_frictionloss, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_frictionloss.setter
def efc_frictionloss(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_frictionloss, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_pos, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_pos.setter
def efc_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_pos, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_J(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_J, dtype=np.double, count=(self._size_src.njmax*self._size_src.nv)), (self._size_src.njmax, self._size_src.nv, ))
arr.setflags(write=False)
return arr
@efc_J.setter
def efc_J(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_J, val_ptr, self._size_src.njmax*self._size_src.nv * sizeof(c_double))
@property
def efc_J_T(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_J_T, dtype=np.double, count=(self._size_src.nv*self._size_src.njmax)), (self._size_src.nv, self._size_src.njmax, ))
arr.setflags(write=False)
return arr
@efc_J_T.setter
def efc_J_T(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_J_T, val_ptr, self._size_src.nv*self._size_src.njmax * sizeof(c_double))
@property
def efc_diagApprox(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_diagApprox, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_diagApprox.setter
def efc_diagApprox(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_diagApprox, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_D(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_D, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_D.setter
def efc_D(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_D, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_R(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_R, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_R.setter
def efc_R(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_R, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_AR(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_AR, dtype=np.double, count=(self._size_src.njmax*self._size_src.njmax)), (self._size_src.njmax, self._size_src.njmax, ))
arr.setflags(write=False)
return arr
@efc_AR.setter
def efc_AR(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_AR, val_ptr, self._size_src.njmax*self._size_src.njmax * sizeof(c_double))
@property
def e_ARchol(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.e_ARchol, dtype=np.double, count=(self._size_src.nemax*self._size_src.nemax)), (self._size_src.nemax, self._size_src.nemax, ))
arr.setflags(write=False)
return arr
@e_ARchol.setter
def e_ARchol(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.e_ARchol, val_ptr, self._size_src.nemax*self._size_src.nemax * sizeof(c_double))
@property
def fc_e_rect(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.fc_e_rect, dtype=np.double, count=(self._size_src.njmax*self._size_src.nemax)), (self._size_src.njmax, self._size_src.nemax, ))
arr.setflags(write=False)
return arr
@fc_e_rect.setter
def fc_e_rect(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.fc_e_rect, val_ptr, self._size_src.njmax*self._size_src.nemax * sizeof(c_double))
@property
def fc_AR(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.fc_AR, dtype=np.double, count=(self._size_src.njmax*self._size_src.njmax)), (self._size_src.njmax, self._size_src.njmax, ))
arr.setflags(write=False)
return arr
@fc_AR.setter
def fc_AR(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.fc_AR, val_ptr, self._size_src.njmax*self._size_src.njmax * sizeof(c_double))
@property
def ten_velocity(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_velocity, dtype=np.double, count=(self._size_src.ntendon*1)), (self._size_src.ntendon, 1, ))
arr.setflags(write=False)
return arr
@ten_velocity.setter
def ten_velocity(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ten_velocity, val_ptr, self._size_src.ntendon*1 * sizeof(c_double))
@property
def actuator_velocity(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_velocity, dtype=np.double, count=(self._size_src.nu*1)), (self._size_src.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_velocity.setter
def actuator_velocity(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_velocity, val_ptr, self._size_src.nu*1 * sizeof(c_double))
@property
def cvel(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cvel, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@cvel.setter
def cvel(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cvel, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
@property
def cdof_dot(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cdof_dot, dtype=np.double, count=(self._size_src.nv*6)), (self._size_src.nv, 6, ))
arr.setflags(write=False)
return arr
@cdof_dot.setter
def cdof_dot(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cdof_dot, val_ptr, self._size_src.nv*6 * sizeof(c_double))
@property
def qfrc_bias(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_bias, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_bias.setter
def qfrc_bias(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_bias, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qfrc_passive(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_passive, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_passive.setter
def qfrc_passive(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_passive, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def efc_vel(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_vel, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_vel.setter
def efc_vel(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_vel, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_aref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_aref, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_aref.setter
def efc_aref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_aref, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def actuator_force(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_force, dtype=np.double, count=(self._size_src.nu*1)), (self._size_src.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_force.setter
def actuator_force(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_force, val_ptr, self._size_src.nu*1 * sizeof(c_double))
@property
def qfrc_actuator(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_actuator, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_actuator.setter
def qfrc_actuator(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_actuator, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qfrc_unc(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_unc, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_unc.setter
def qfrc_unc(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_unc, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qacc_unc(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qacc_unc, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qacc_unc.setter
def qacc_unc(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qacc_unc, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def efc_b(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_b, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_b.setter
def efc_b(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_b, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def fc_b(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.fc_b, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@fc_b.setter
def fc_b(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.fc_b, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_force(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_force, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_force.setter
def efc_force(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_force, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def qfrc_constraint(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_constraint, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_constraint.setter
def qfrc_constraint(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_constraint, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qfrc_inverse(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_inverse, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_inverse.setter
def qfrc_inverse(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_inverse, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def cacc(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cacc, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@cacc.setter
def cacc(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cacc, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
@property
def cfrc_int(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cfrc_int, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@cfrc_int.setter
def cfrc_int(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cfrc_int, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
@property
def cfrc_ext(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cfrc_ext, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@cfrc_ext.setter
def cfrc_ext(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cfrc_ext, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
class MjModelWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def nq(self):
return self._wrapped.contents.nq
@nq.setter
def nq(self, value):
self._wrapped.contents.nq = value
@property
def nv(self):
return self._wrapped.contents.nv
@nv.setter
def nv(self, value):
self._wrapped.contents.nv = value
@property
def nu(self):
return self._wrapped.contents.nu
@nu.setter
def nu(self, value):
self._wrapped.contents.nu = value
@property
def na(self):
return self._wrapped.contents.na
@na.setter
def na(self, value):
self._wrapped.contents.na = value
@property
def nbody(self):
return self._wrapped.contents.nbody
@nbody.setter
def nbody(self, value):
self._wrapped.contents.nbody = value
@property
def njnt(self):
return self._wrapped.contents.njnt
@njnt.setter
def njnt(self, value):
self._wrapped.contents.njnt = value
@property
def ngeom(self):
return self._wrapped.contents.ngeom
@ngeom.setter
def ngeom(self, value):
self._wrapped.contents.ngeom = value
@property
def nsite(self):
return self._wrapped.contents.nsite
@nsite.setter
def nsite(self, value):
self._wrapped.contents.nsite = value
@property
def ncam(self):
return self._wrapped.contents.ncam
@ncam.setter
def ncam(self, value):
self._wrapped.contents.ncam = value
@property
def nlight(self):
return self._wrapped.contents.nlight
@nlight.setter
def nlight(self, value):
self._wrapped.contents.nlight = value
@property
def nmesh(self):
return self._wrapped.contents.nmesh
@nmesh.setter
def nmesh(self, value):
self._wrapped.contents.nmesh = value
@property
def nmeshvert(self):
return self._wrapped.contents.nmeshvert
@nmeshvert.setter
def nmeshvert(self, value):
self._wrapped.contents.nmeshvert = value
@property
def nmeshface(self):
return self._wrapped.contents.nmeshface
@nmeshface.setter
def nmeshface(self, value):
self._wrapped.contents.nmeshface = value
@property
def nmeshgraph(self):
return self._wrapped.contents.nmeshgraph
@nmeshgraph.setter
def nmeshgraph(self, value):
self._wrapped.contents.nmeshgraph = value
@property
def nhfield(self):
return self._wrapped.contents.nhfield
@nhfield.setter
def nhfield(self, value):
self._wrapped.contents.nhfield = value
@property
def nhfielddata(self):
return self._wrapped.contents.nhfielddata
@nhfielddata.setter
def nhfielddata(self, value):
self._wrapped.contents.nhfielddata = value
@property
def ntex(self):
return self._wrapped.contents.ntex
@ntex.setter
def ntex(self, value):
self._wrapped.contents.ntex = value
@property
def ntexdata(self):
return self._wrapped.contents.ntexdata
@ntexdata.setter
def ntexdata(self, value):
self._wrapped.contents.ntexdata = value
@property
def nmat(self):
return self._wrapped.contents.nmat
@nmat.setter
def nmat(self, value):
self._wrapped.contents.nmat = value
@property
def npair(self):
return self._wrapped.contents.npair
@npair.setter
def npair(self, value):
self._wrapped.contents.npair = value
@property
def nexclude(self):
return self._wrapped.contents.nexclude
@nexclude.setter
def nexclude(self, value):
self._wrapped.contents.nexclude = value
@property
def neq(self):
return self._wrapped.contents.neq
@neq.setter
def neq(self, value):
self._wrapped.contents.neq = value
@property
def ntendon(self):
return self._wrapped.contents.ntendon
@ntendon.setter
def ntendon(self, value):
self._wrapped.contents.ntendon = value
@property
def nwrap(self):
return self._wrapped.contents.nwrap
@nwrap.setter
def nwrap(self, value):
self._wrapped.contents.nwrap = value
@property
def nsensor(self):
return self._wrapped.contents.nsensor
@nsensor.setter
def nsensor(self, value):
self._wrapped.contents.nsensor = value
@property
def nnumeric(self):
return self._wrapped.contents.nnumeric
@nnumeric.setter
def nnumeric(self, value):
self._wrapped.contents.nnumeric = value
@property
def nnumericdata(self):
return self._wrapped.contents.nnumericdata
@nnumericdata.setter
def nnumericdata(self, value):
self._wrapped.contents.nnumericdata = value
@property
def ntext(self):
return self._wrapped.contents.ntext
@ntext.setter
def ntext(self, value):
self._wrapped.contents.ntext = value
@property
def ntextdata(self):
return self._wrapped.contents.ntextdata
@ntextdata.setter
def ntextdata(self, value):
self._wrapped.contents.ntextdata = value
@property
def nkey(self):
return self._wrapped.contents.nkey
@nkey.setter
def nkey(self, value):
self._wrapped.contents.nkey = value
@property
def nuser_body(self):
return self._wrapped.contents.nuser_body
@nuser_body.setter
def nuser_body(self, value):
self._wrapped.contents.nuser_body = value
@property
def nuser_jnt(self):
return self._wrapped.contents.nuser_jnt
@nuser_jnt.setter
def nuser_jnt(self, value):
self._wrapped.contents.nuser_jnt = value
@property
def nuser_geom(self):
return self._wrapped.contents.nuser_geom
@nuser_geom.setter
def nuser_geom(self, value):
self._wrapped.contents.nuser_geom = value
@property
def nuser_site(self):
return self._wrapped.contents.nuser_site
@nuser_site.setter
def nuser_site(self, value):
self._wrapped.contents.nuser_site = value
@property
def nuser_tendon(self):
return self._wrapped.contents.nuser_tendon
@nuser_tendon.setter
def nuser_tendon(self, value):
self._wrapped.contents.nuser_tendon = value
@property
def nuser_actuator(self):
return self._wrapped.contents.nuser_actuator
@nuser_actuator.setter
def nuser_actuator(self, value):
self._wrapped.contents.nuser_actuator = value
@property
def nuser_sensor(self):
return self._wrapped.contents.nuser_sensor
@nuser_sensor.setter
def nuser_sensor(self, value):
self._wrapped.contents.nuser_sensor = value
@property
def nnames(self):
return self._wrapped.contents.nnames
@nnames.setter
def nnames(self, value):
self._wrapped.contents.nnames = value
@property
def nM(self):
return self._wrapped.contents.nM
@nM.setter
def nM(self, value):
self._wrapped.contents.nM = value
@property
def nemax(self):
return self._wrapped.contents.nemax
@nemax.setter
def nemax(self, value):
self._wrapped.contents.nemax = value
@property
def njmax(self):
return self._wrapped.contents.njmax
@njmax.setter
def njmax(self, value):
self._wrapped.contents.njmax = value
@property
def nconmax(self):
return self._wrapped.contents.nconmax
@nconmax.setter
def nconmax(self, value):
self._wrapped.contents.nconmax = value
@property
def nstack(self):
return self._wrapped.contents.nstack
@nstack.setter
def nstack(self, value):
self._wrapped.contents.nstack = value
@property
def nuserdata(self):
return self._wrapped.contents.nuserdata
@nuserdata.setter
def nuserdata(self, value):
self._wrapped.contents.nuserdata = value
@property
def nmocap(self):
return self._wrapped.contents.nmocap
@nmocap.setter
def nmocap(self, value):
self._wrapped.contents.nmocap = value
@property
def nsensordata(self):
return self._wrapped.contents.nsensordata
@nsensordata.setter
def nsensordata(self, value):
self._wrapped.contents.nsensordata = value
@property
def nbuffer(self):
return self._wrapped.contents.nbuffer
@nbuffer.setter
def nbuffer(self, value):
self._wrapped.contents.nbuffer = value
@property
def opt(self):
return self._wrapped.contents.opt
@opt.setter
def opt(self, value):
self._wrapped.contents.opt = value
@property
def vis(self):
return self._wrapped.contents.vis
@vis.setter
def vis(self, value):
self._wrapped.contents.vis = value
@property
def stat(self):
return self._wrapped.contents.stat
@stat.setter
def stat(self, value):
self._wrapped.contents.stat = value
@property
def buffer(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.buffer, dtype=np.uint8, count=(self.nbuffer)), (self.nbuffer, ))
arr.setflags(write=False)
return arr
@buffer.setter
def buffer(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.buffer, val_ptr, self.nbuffer * sizeof(c_ubyte))
@property
def qpos0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qpos0, dtype=np.double, count=(self.nq*1)), (self.nq, 1, ))
arr.setflags(write=False)
return arr
@qpos0.setter
def qpos0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qpos0, val_ptr, self.nq*1 * sizeof(c_double))
@property
def qpos_spring(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qpos_spring, dtype=np.double, count=(self.nq*1)), (self.nq, 1, ))
arr.setflags(write=False)
return arr
@qpos_spring.setter
def qpos_spring(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qpos_spring, val_ptr, self.nq*1 * sizeof(c_double))
@property
def body_parentid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_parentid, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_parentid.setter
def body_parentid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_parentid, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_rootid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_rootid, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_rootid.setter
def body_rootid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_rootid, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_weldid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_weldid, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_weldid.setter
def body_weldid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_weldid, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_mocapid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_mocapid, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_mocapid.setter
def body_mocapid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_mocapid, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_jntnum(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_jntnum, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_jntnum.setter
def body_jntnum(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_jntnum, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_jntadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_jntadr, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_jntadr.setter
def body_jntadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_jntadr, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_dofnum(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_dofnum, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_dofnum.setter
def body_dofnum(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_dofnum, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_dofadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_dofadr, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_dofadr.setter
def body_dofadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_dofadr, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_geomnum(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_geomnum, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_geomnum.setter
def body_geomnum(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_geomnum, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_geomadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_geomadr, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_geomadr.setter
def body_geomadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_geomadr, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_pos, dtype=np.double, count=(self.nbody*3)), (self.nbody, 3, ))
arr.setflags(write=False)
return arr
@body_pos.setter
def body_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_pos, val_ptr, self.nbody*3 * sizeof(c_double))
@property
def body_quat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_quat, dtype=np.double, count=(self.nbody*4)), (self.nbody, 4, ))
arr.setflags(write=False)
return arr
@body_quat.setter
def body_quat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_quat, val_ptr, self.nbody*4 * sizeof(c_double))
@property
def body_ipos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_ipos, dtype=np.double, count=(self.nbody*3)), (self.nbody, 3, ))
arr.setflags(write=False)
return arr
@body_ipos.setter
def body_ipos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_ipos, val_ptr, self.nbody*3 * sizeof(c_double))
@property
def body_iquat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_iquat, dtype=np.double, count=(self.nbody*4)), (self.nbody, 4, ))
arr.setflags(write=False)
return arr
@body_iquat.setter
def body_iquat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_iquat, val_ptr, self.nbody*4 * sizeof(c_double))
@property
def body_mass(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_mass, dtype=np.double, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_mass.setter
def body_mass(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_mass, val_ptr, self.nbody*1 * sizeof(c_double))
@property
def body_inertia(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_inertia, dtype=np.double, count=(self.nbody*3)), (self.nbody, 3, ))
arr.setflags(write=False)
return arr
@body_inertia.setter
def body_inertia(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_inertia, val_ptr, self.nbody*3 * sizeof(c_double))
@property
def body_invweight0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_invweight0, dtype=np.double, count=(self.nbody*2)), (self.nbody, 2, ))
arr.setflags(write=False)
return arr
@body_invweight0.setter
def body_invweight0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_invweight0, val_ptr, self.nbody*2 * sizeof(c_double))
@property
def body_user(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_user, dtype=np.double, count=(self.nbody*self.nuser_body)), (self.nbody, self.nuser_body, ))
arr.setflags(write=False)
return arr
@body_user.setter
def body_user(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_user, val_ptr, self.nbody*self.nuser_body * sizeof(c_double))
@property
def jnt_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_type, dtype=np.int, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_type.setter
def jnt_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.jnt_type, val_ptr, self.njnt*1 * sizeof(c_int))
@property
def jnt_qposadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_qposadr, dtype=np.int, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_qposadr.setter
def jnt_qposadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.jnt_qposadr, val_ptr, self.njnt*1 * sizeof(c_int))
@property
def jnt_dofadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_dofadr, dtype=np.int, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_dofadr.setter
def jnt_dofadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.jnt_dofadr, val_ptr, self.njnt*1 * sizeof(c_int))
@property
def jnt_bodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_bodyid, dtype=np.int, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_bodyid.setter
def jnt_bodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.jnt_bodyid, val_ptr, self.njnt*1 * sizeof(c_int))
@property
def jnt_limited(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_limited, dtype=np.uint8, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_limited.setter
def jnt_limited(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.jnt_limited, val_ptr, self.njnt*1 * sizeof(c_ubyte))
@property
def jnt_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_solref, dtype=np.double, count=(self.njnt*2)), (self.njnt, 2, ))
arr.setflags(write=False)
return arr
@jnt_solref.setter
def jnt_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_solref, val_ptr, self.njnt*2 * sizeof(c_double))
@property
def jnt_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_solimp, dtype=np.double, count=(self.njnt*3)), (self.njnt, 3, ))
arr.setflags(write=False)
return arr
@jnt_solimp.setter
def jnt_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_solimp, val_ptr, self.njnt*3 * sizeof(c_double))
@property
def jnt_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_pos, dtype=np.double, count=(self.njnt*3)), (self.njnt, 3, ))
arr.setflags(write=False)
return arr
@jnt_pos.setter
def jnt_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_pos, val_ptr, self.njnt*3 * sizeof(c_double))
@property
def jnt_axis(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_axis, dtype=np.double, count=(self.njnt*3)), (self.njnt, 3, ))
arr.setflags(write=False)
return arr
@jnt_axis.setter
def jnt_axis(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_axis, val_ptr, self.njnt*3 * sizeof(c_double))
@property
def jnt_stiffness(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_stiffness, dtype=np.double, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_stiffness.setter
def jnt_stiffness(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_stiffness, val_ptr, self.njnt*1 * sizeof(c_double))
@property
def jnt_range(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_range, dtype=np.double, count=(self.njnt*2)), (self.njnt, 2, ))
arr.setflags(write=False)
return arr
@jnt_range.setter
def jnt_range(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_range, val_ptr, self.njnt*2 * sizeof(c_double))
@property
def jnt_margin(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_margin, dtype=np.double, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_margin.setter
def jnt_margin(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_margin, val_ptr, self.njnt*1 * sizeof(c_double))
@property
def jnt_user(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_user, dtype=np.double, count=(self.njnt*self.nuser_jnt)), (self.njnt, self.nuser_jnt, ))
arr.setflags(write=False)
return arr
@jnt_user.setter
def jnt_user(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_user, val_ptr, self.njnt*self.nuser_jnt * sizeof(c_double))
@property
def dof_bodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_bodyid, dtype=np.int, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_bodyid.setter
def dof_bodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.dof_bodyid, val_ptr, self.nv*1 * sizeof(c_int))
@property
def dof_jntid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_jntid, dtype=np.int, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_jntid.setter
def dof_jntid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.dof_jntid, val_ptr, self.nv*1 * sizeof(c_int))
@property
def dof_parentid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_parentid, dtype=np.int, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_parentid.setter
def dof_parentid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.dof_parentid, val_ptr, self.nv*1 * sizeof(c_int))
@property
def dof_Madr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_Madr, dtype=np.int, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_Madr.setter
def dof_Madr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.dof_Madr, val_ptr, self.nv*1 * sizeof(c_int))
@property
def dof_frictional(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_frictional, dtype=np.uint8, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_frictional.setter
def dof_frictional(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.dof_frictional, val_ptr, self.nv*1 * sizeof(c_ubyte))
@property
def dof_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_solref, dtype=np.double, count=(self.nv*2)), (self.nv, 2, ))
arr.setflags(write=False)
return arr
@dof_solref.setter
def dof_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_solref, val_ptr, self.nv*2 * sizeof(c_double))
@property
def dof_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_solimp, dtype=np.double, count=(self.nv*3)), (self.nv, 3, ))
arr.setflags(write=False)
return arr
@dof_solimp.setter
def dof_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_solimp, val_ptr, self.nv*3 * sizeof(c_double))
@property
def dof_frictionloss(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_frictionloss, dtype=np.double, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_frictionloss.setter
def dof_frictionloss(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_frictionloss, val_ptr, self.nv*1 * sizeof(c_double))
@property
def dof_armature(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_armature, dtype=np.double, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_armature.setter
def dof_armature(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_armature, val_ptr, self.nv*1 * sizeof(c_double))
@property
def dof_damping(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_damping, dtype=np.double, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_damping.setter
def dof_damping(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_damping, val_ptr, self.nv*1 * sizeof(c_double))
@property
def dof_invweight0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_invweight0, dtype=np.double, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_invweight0.setter
def dof_invweight0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_invweight0, val_ptr, self.nv*1 * sizeof(c_double))
@property
def geom_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_type, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_type.setter
def geom_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_type, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_contype(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_contype, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_contype.setter
def geom_contype(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_contype, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_conaffinity(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_conaffinity, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_conaffinity.setter
def geom_conaffinity(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_conaffinity, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_condim(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_condim, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_condim.setter
def geom_condim(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_condim, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_bodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_bodyid, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_bodyid.setter
def geom_bodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_bodyid, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_dataid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_dataid, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_dataid.setter
def geom_dataid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_dataid, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_matid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_matid, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_matid.setter
def geom_matid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_matid, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_group(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_group, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_group.setter
def geom_group(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_group, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_solmix(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_solmix, dtype=np.double, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_solmix.setter
def geom_solmix(self, value):
val_ptr =
|
np.array(value, dtype=np.float64)
|
numpy.array
|
import numpy as np
class Layer:
"""
Contains attributes and methods for a single layer
Attributes:
Layer info: units, prev_units, type, activation
caches: W, b, Z, A, A_prev
gradients: dW, db
"""
# Initialize object for hidden and output layer
def __init__(self, units, activation="relu", type = "hidden"):
self.units = units
self.activation = activation
self.type = type
# Getting the number of units of previous layer
def add_previous(self, prev_units):
self.prev_units = prev_units
# Initialization to weights and biases by He initialisation
def initialise_weights(self):
np.random.seed(1)
self.W = np.random.randn(self.units, self.prev_units) * np.sqrt(2/self.prev_units)
self.b = np.zeros((self.units, 1))
# Method to propagate through the neural network
# Input: A_prev
# Output: A
def forward_prop(self, A_prev):
self.A_prev = A_prev
self.Z =
|
np.dot(self.W, self.A_prev)
|
numpy.dot
|
import os
import tensorflow as tf
import numpy as np
from tensorflow.contrib.tensorboard.plugins import projector
import argparse
def create_embeddings(sess, log_dir, embedding_file='', tensor_name='embedding'):
""" Add the embeddings to input TensorFlow session and writes a metadata_file containing the words in the vocabulary
:param sess: TF session
:param log_dir: destination directory for the model and metadata (the one to which TensorBoard points)
:param embedding_file: embedding file
:param tensor_name: tensor name
:return:
"""
embedding = None
embedding_dimensions = 0
vocab_size = 0
# write labels
with open(os.path.join(log_dir, tensor_name + '_' + 'metadata.tsv'), 'w') as metadata_file:
with open(embedding_file, 'r') as inputfile:
for i, line in enumerate(inputfile):
line = line.rstrip()
values = line.split()
# the first line is always the header based on what we produce in the embeddings_knn.py
if i == 0:
vocab_size = int(values[0])
embedding_dimensions = int(values[1])
embedding =
|
np.empty((vocab_size, embedding_dimensions), dtype=np.float32)
|
numpy.empty
|
from utils.ontf import Online_NTF
import numpy as np
from sklearn.decomposition import SparseCoder
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = ['Times New Roman'] + plt.rcParams['font.serif']
import matplotlib.font_manager as font_manager
import covid_dataprocess
import itertools
DEBUG = False
class ONMF_timeseries_reconstructor():
def __init__(self,
path,
source,
data_source,
country_list=None,
state_list_test=None,
state_list_train=None,
n_components=100, # number of dictionary elements -- rank
ONMF_iterations=50, # number of iterations for the ONMF algorithm
ONMF_sub_iterations=20, # number of i.i.d. subsampling for each iteration of ONTF
ONMF_batch_size=20, # number of patches used in i.i.d. subsampling
num_patches_perbatch=1000, # number of patches that ONTF algorithm learns from at each iteration
patch_size=7, # length of sliding window
patches_file='',
learn_joint_dict=False,
prediction_length=1,
learnevery=5,
learning_window_cap=None, # if not none, learn from the past "learning_window_cap" days to predict
alpha=None,
beta=None,
subsample=False,
if_covidactnow=False,
if_onlynewcases=False,
if_moving_avg_data=False,
if_log_scale=False):
'''
batch_size = number of patches used for training dictionaries per ONTF iteration
sources: array of filenames to make patches out of
patches_array_filename: numpy array file which contains already read-in images
'''
self.path = path
self.source = source
self.data_source = data_source
self.state_list = state_list_test
self.country_list = country_list
self.n_components = n_components
self.ONMF_iterations = ONMF_iterations
self.ONMF_sub_iterations = ONMF_sub_iterations
self.num_patches_perbatch = num_patches_perbatch
self.ONMF_batch_size = ONMF_batch_size
self.patch_size = patch_size
self.patches_file = patches_file
self.learn_joint_dict = learn_joint_dict
self.prediction_length = prediction_length
self.code = np.zeros(shape=(n_components, num_patches_perbatch))
self.learnevery = learnevery
self.alpha = alpha
self.beta = beta
self.subsample = subsample
self.if_onlynewcases = if_onlynewcases
self.if_moving_avg_data = if_moving_avg_data
self.if_log_scale = if_log_scale
self.input_variable_list = []
self.result_dict = {}
self.state_list_train = state_list_train
self.learning_window_cap = learning_window_cap
input_variable_list = []
if data_source == 'COVID_ACT_NOW':
print('LOADING.. COVID_ACT_NOW')
self.input_variable_list = ['input_hospitalBedsRequired',
'input_ICUBedsInUse',
'input_ventilatorsInUse',
'input_Deaths',
'input_Infected']
self.df = covid_dataprocess.read_data_COVIDactnow_NYT()
self.df = self.truncate_NAN_DataFrame()
self.df = self.moving_avg_log_scale()
self.data_test = self.extract_ndarray_from_DataFrame()
self.result_dict.update({'Data source': 'COVID_ACT_NOW'})
self.result_dict.update({'Full DataFrame': self.df})
self.result_dict.update({'Data array': self.data_test})
self.result_dict.update({'List_states': self.state_list})
self.result_dict.update({'List_variables': self.input_variable_list})
elif data_source == 'COVID_TRACKING_PROJECT':
print('LOADING.. COVID_TRACKING_PROJECT')
self.input_variable_list = ['input_hospitalized_Currently',
'input_inICU_Currently',
'input_daily_test_positive_rate',
'input_daily_cases',
'input_daily_deaths']
# 'input_daily_cases_pct_change']
self.df = covid_dataprocess.read_data_COVIDtrackingProject()
self.df = self.truncate_NAN_DataFrame()
self.df = self.moving_avg_log_scale()
self.extract_ndarray_from_DataFrame()
self.result_dict.update({'Full DataFrame': self.df})
self.result_dict.update({'Data array': self.data_test})
self.result_dict.update({'List_states': self.state_list})
self.result_dict.update({'List_variables': self.input_variable_list})
self.result_dict.update({'Data array (train)': self.data_train})
self.result_dict.update({'List_states (train)': self.state_list_train})
else: ### JHU data
print('LOADING.. JHU Data')
self.data_test = self.combine_data(self.source)
print('data', self.data_test)
print('data.shape', self.data_test.shape)
self.ntf = Online_NTF(self.data_test, self.n_components,
iterations=self.ONMF_sub_iterations,
learn_joint_dict=True,
mode=3,
ini_dict=None,
ini_A=None,
ini_B=None,
batch_size=self.ONMF_batch_size)
self.W = np.zeros(shape=(self.data_test.shape[0] * self.data_test.shape[2] * patch_size, n_components))
def moving_avg_log_scale(self):
df = self.df
if self.if_moving_avg_data:
for state in self.state_list:
df1 = df.get(state)
df2 = df1[self.input_variable_list]
df2 = df2.rolling(window=5, win_type=None).sum() / 5 ### moving average with backward window size 5
df2 = df2.fillna(0)
df1[self.input_variable_list] = df2
df.update({state: df1})
if self.if_log_scale:
for state in self.state_list:
df1 = df.get(state)
df2 = df1[self.input_variable_list]
df2 = np.log(df2 + 1)
df1[self.input_variable_list] = df2
df.update({state: df1})
return df
def truncate_NAN_DataFrame(self):
df = self.df.copy()
### Take the maximal sub-dataframe that does not contain NAN
### If some state has all NANs for some variable, that variable is dropped from input_list_variable
start_dates = []
end_dates = []
input_variable_list_noNAN = self.input_variable_list.copy()
for column in input_variable_list_noNAN:
for state in self.state_list:
df1 = df.get(state)
if df1[column].isnull().all():
input_variable_list_noNAN.remove(column)
self.input_variable_list = input_variable_list_noNAN
print('!!! New input_variable_list', self.input_variable_list)
state_list_combined = self.state_list
state_list_combined = list(set(state_list_combined))
if self.state_list_train is not None:
for state in self.state_list_train:
state_list_combined.append(state)
for state in state_list_combined:
df1 = df.get(state)
for column in self.input_variable_list:
l_min = df1[column][df1[column].notnull()].index[0]
l_max = df1[column][df1[column].notnull()].index[-1]
start_dates.append(l_min)
end_dates.append(l_max)
max_min_date = max(start_dates)
min_max_date = min(end_dates)
for state in state_list_combined:
df1 = df.get(state)
df1 = df1[max_min_date:min_max_date]
print('!!! If any value is NAN:', df1.isnull())
df.update({state: df1})
return df
def extract_ndarray_from_DataFrame(self):
## Make numpy array of shape States x Days x variables
data_combined = []
print('!!! self.state_list', self.state_list)
df = self.df
if self.state_list is None:
self.state_list = sorted([i for i in set(df.keys())])
data_test = []
data_train = []
print('!!! self.state_list', self.state_list)
for state in self.state_list:
df1 = df.get(state)
data_combined = df1[self.input_variable_list].values ## shape Days x variables
data_test.append(data_combined)
for state in self.state_list_train:
df2 = df.get(state)
data_combined = df2[self.input_variable_list].values ## shape Days x variables
data_train.append(data_combined)
data_test = np.asarray(data_test)
self.data_test = np.nan_to_num(data_test, copy=True, nan=0, posinf=1, neginf=0)
print('!!!! data_test.shape', data_test.shape)
data_train = np.asarray(data_train)
self.data_train = np.nan_to_num(data_train, copy=True, nan=0, posinf=1, neginf=0)
print('!!!! data_train.shape', data_train.shape)
def read_data_as_array_countrywise(self, path):
'''
Read input time series as a narray
'''
data_full = pd.read_csv(path, delimiter=',').T
data = data_full.values[1:, :]
data = np.delete(data, [1, 2], 0) # delete lattitue & altitude
if self.country_list == None:
country_list = [i for i in set(data[0, :])]
country_list = sorted(country_list) # whole countries in alphabetical order
else:
country_list = self.country_list
### merge data according to country
data_new = np.zeros(shape=(data.shape[0] - 1, len(country_list)))
for i in np.arange(len(country_list)):
idx = np.where(data[0, :] == country_list[i])
data_sub = data[1:, idx]
data_sub = data_sub[:, 0, :]
data_sub = np.sum(data_sub, axis=1)
data_new[:, i] = data_sub
data_new = data_new.astype(int)
if self.country_list == None:
idx = np.where(data_new[-1, :] > 1000)
data_new = data_new[:, idx]
data_new = data_new[:, 0, :]
# data_new[:,1] = np.zeros(data_new.shape[0])
print('data_new', data_new)
country_list = [country_list[idx[0][i]] for i in range(len(idx[0]))]
print('country_list', country_list)
if self.if_onlynewcases:
data_new = np.diff(data_new, axis=0)
if self.if_moving_avg_data:
for i in np.arange(5, data_new.T.shape[1]):
data_new.T[:, i] = (data_new.T[:, i] + data_new.T[:, i - 1] + data_new.T[:, i - 2] + data_new.T[:,
i - 3] + data_new.T[
:,
i - 4]) / 5
# A_recons[:, i] = (A_recons[:, i] + A_recons[:, i-1]) / 2
if self.if_log_scale:
data_new = np.log(data_new + 1)
return data_new.T, country_list
def read_data_COVIDactnow(self, path, use_NYT_cases=False):
'''
Read input time series data as a dictionary of pandas dataframe
COVIDACTNOW is a SYNTHETIC data!!!
That's why the cases and deaths are off from the real data, expecially for the NO_INTERVENTION case
'''
data = pd.read_csv(path, delimiter=',')
df = {}
data_NYT = pd.read_csv("Data/NYT_us-states.csv", delimiter=',')
if self.state_list == None:
self.state_list = sorted([i for i in set([i for i in data['stateName']])])
### Find earliest starting date of the data
start_dates = []
for state in self.state_list:
df1 = data.loc[data['stateName'] == state]
start_dates.append(min(df1['date']))
max_min_date = max(start_dates)
# print('!!! min_dates', max_min_date)
for state in self.state_list:
df1 = data.loc[data['stateName'] == state].set_index('date')
lastUpdatedDate = df1['lastUpdatedDate'].iloc[0]
df1 = df1[max_min_date:lastUpdatedDate]
df1['input_hospitalBedsRequired'] = df1['hospitalBedsRequired']
df1['input_ICUBedsInUse'] = df1['ICUBedsInUse']
df1['input_ventilatorsInUse'] = df1['ventilatorsInUse']
if not use_NYT_cases:
df1['input_Deaths'] = df1['cumulativeDeaths']
df1['input_Infected'] = df1['cumulativeInfected']
else:
df_NYT1 = data_NYT.loc[data_NYT['state'] == state].set_index('date')
df1['input_Deaths'] = df_NYT1['deaths']
print('!!! df_NYT1', df_NYT1['deaths'])
df1['input_Infected'] = df_NYT1['cases']
print('!!! df_NYT1_cases', df_NYT1['cases'])
### Take the maximal sub-dataframe that does not contain NAN
max_index = []
for column in self.input_variable_list:
l = df1[column][df1[column].notnull()].index[-1]
max_index.append(l)
max_index = min(max_index)
print('!!! max_index', max_index)
df1 = df1[:max_index]
print('!!! If any value is NAN:', df1.isnull())
df.update({state: df1})
if self.if_onlynewcases:
for state in self.state_list:
df1 = df.get(state)
# df1[input_variable_list] contains 153 rows and 5 columns
df1['input_Infected'] = df1['input_Infected'].diff()
df1['input_Deaths'] = df1['input_Deaths'].diff()
df1 = df1.fillna(0)
df.update({state: df1})
if self.if_moving_avg_data:
for state in self.state_list:
df1 = df.get(state)
df2 = df1[self.input_variable_list]
df2 = df2.rolling(window=5, win_type=None).sum() / 5 ### moving average with backward window size 5
df2 = df2.fillna(0)
df1[self.input_variable_list] = df2
df.update({state: df1})
if self.if_log_scale:
for state in self.state_list:
df1 = df.get(state)
df2 = df1[self.input_variable_list]
df2 = np.log(df2 + 1)
df1[self.input_variable_list] = df2
df.update({state: df1})
self.df = df
## Make numpy array of shape States x Days x variables
data_combined = []
for state in self.state_list:
df1 = df.get(state)
if state == self.state_list[0]:
data_combined = df1[self.input_variable_list].values ## shape Days x variables
data_combined = np.expand_dims(data_combined, axis=0)
print('!!!Data_combined.shape', data_combined.shape)
else:
data_new = df1[self.input_variable_list].values ## shape Days x variables
data_new = np.expand_dims(data_new, axis=0)
print('!!! Data_new.shape', data_new.shape)
data_combined = np.append(data_combined, data_new, axis=0)
self.data_test = data_combined
return df, data_combined
def read_data_COVIDtrackingProject(self, path):
'''
Read input time series data as a dictionary of pandas dataframe
'''
data = pd.read_csv(path, delimiter=',').sort_values(by="date")
### Convert the format of dates from string to datetime
data['date'] = pd.to_datetime(data['date'], format='%Y%m%d', utc=False)
df = {}
if self.state_list == None:
self.state_list = sorted([i for i in set([i for i in data['state']])])
### Find earliest starting date of the data
start_dates = []
for state in self.state_list:
df1 = data.loc[data['state'] == state]
start_dates.append(min(df1['date']).strftime("%Y-%m-%d"))
max_min_date = max(start_dates)
print('!!! min_dates', max_min_date)
for state in self.state_list:
df1 = data.loc[data['state'] == state].set_index('date')
# lastUpdatedDate = df1['lastUpdateEt'].iloc[0]
df1 = df1[max_min_date:]
### making new columns to process columns of interest and preserve the original data
df1['input_onVentilator_Increase'] = df1['onVentilatorCumulative']
df1['input_inICU_Increase'] = df1['inIcuCumulative']
df1['input_test_positive_rate'] = df1['positiveTestsViral'] / df1['totalTestsViral']
df1['input_case_Increase'] = df1['positiveIncrease']
df1['input_death_Increase'] = df1['deathIncrease']
df.update({state: df1})
if self.if_moving_avg_data:
for state in self.state_list:
df1 = df.get(state)
df2 = df1[self.input_variable_list]
df2 = df2.rolling(window=5, win_type=None).sum() / 5 ### moving average with backward window size 5
df2 = df2.fillna(0)
df1[self.input_variable_list] = df2
df.update({state: df1})
if self.if_log_scale:
for state in self.state_list:
df1 = df.get(state)
df2 = df1[self.input_variable_list]
df2 = np.log(df2 + 1)
df1[self.input_variable_list] = df2
df.update({state: df1})
self.df = df
## Make numpy array of shape States x Days x variables
data_combined = []
for state in self.state_list:
df1 = df.get(state)
if state == self.state_list[0]:
data_combined = df1[self.input_variable_list].values ## shape Days x variables
data_combined = np.expand_dims(data_combined, axis=0)
print('!!!Data_combined.shape', data_combined.shape)
else:
data_new = df1[self.input_variable_list].values ## shape Days x variables
data_new = np.expand_dims(data_new, axis=0)
print('!!! Data_new.shape', data_new.shape)
data_combined = np.append(data_combined, data_new, axis=0)
self.data_test = data_combined
return df, data_combined
def read_data_as_array_citywise(self, path):
'''
Read input time series as an array
'''
data_full = pd.read_csv(path, delimiter=',').T
data = data_full.values
data = np.delete(data, [2, 3], 0) # delete lattitue & altitude
idx = np.where((data[1, :] == 'Korea, South') | (data[1, :] == 'Japan'))
data_sub = data[:, idx]
data_sub = data_sub[:, 0, :]
data_new = data_sub[2:, :].astype(int)
idx = np.where(data_new[-1, :] > 0)
data_new = data_new[:, idx]
data_new = data_new[:, 0, :]
# data_new[:,1] = np.zeros(data_new.shape[0])
city_list = data_sub[0, idx][0]
print('city_list', city_list)
return data_new.T, city_list
def combine_data(self, source):
if len(source) == 1:
for path in source:
data, self.country_list = self.read_data_as_array_countrywise(path)
data_combined = np.expand_dims(data, axis=2)
else:
path = source[0]
data, self.country_list = self.read_data_as_array_countrywise(path)
data_combined = np.empty(shape=[data.shape[0], data.shape[1], 1])
for path in source:
data_new = self.read_data_as_array_countrywise(path)[0]
data_new = np.expand_dims(data_new, axis=2)
# print('data_new.shape', data_new.shape)
min_length = np.minimum(data_combined.shape[1], data_new.shape[1])
data_combined = np.append(data_combined[:, 0:min_length, :], data_new[:, 0:min_length, :], axis=2)
data_combined = data_combined[:, :, 1:]
print('data_combined.shape', data_combined.shape)
# data_full.replace(np.nan, 0) ### replace all NANs with 0
### Replace all NANs in data_combined with 0
where_are_NaNs = np.isnan(data_combined)
data_combined[where_are_NaNs] = 0
return data_combined
def extract_random_patches(self, batch_size=None, time_interval_initial=None, A=None):
'''
Extract 'num_patches_perbatch' (segments) of size 'patch_size'many random patches of given size
'''
x = self.data_test.shape # shape = 2 (ask, bid) * time * country
k = self.patch_size
if batch_size is None:
num_patches_perbatch = self.num_patches_perbatch
else:
num_patches_perbatch = batch_size
X = np.zeros(shape=(x[0], k, x[2], 1)) # 1 * window length * country * num_patches_perbatch
for i in np.arange(num_patches_perbatch):
if time_interval_initial is None:
a = np.random.choice(x[1] - k) # starting time of a window patch of length k
else:
a = time_interval_initial + i
if A is None:
Y = self.data_train[:, a:a + k, :] # shape 2 * k * x[2]
else:
Y = A[:, a:a + k, :] # shape 2 * k * x[2]
Y = Y[:, :, :, np.newaxis]
# print('Y.shape', Y.shape)
if i == 0:
X = Y
else:
X = np.append(X, Y, axis=3) # x is class ndarray
return X # X.shape = (2, k, num_countries, num_patches_perbatch)
def extract_patches_interval(self, time_interval_initial, time_interval_terminal, A=None):
'''
Extract a given number of patches (segments) of size 'patch_size' during the given interval
X.shape = (# states) x (# window length) x (# variables) x (num_patches_perbatch)
'''
x = self.data_test.shape # shape = (# states) x (# days) x (# variables)
k = self.patch_size # num of consecutive days to form a single patch = window length
X = np.zeros(
shape=(x[0], k, x[2], 1)) # (# states) x (# window length) x (# variables) x (num_patches_perbatch)
for i in np.arange(self.num_patches_perbatch):
a = np.random.choice(np.arange(time_interval_initial, time_interval_terminal - k + 1))
if A is None:
Y = self.data_train[:, a:a + k, :] # shape 2 * k * x[2]
else:
Y = A[:, a:a + k, :] # shape 2 * k * x[2]
Y = Y[:, :, :, np.newaxis]
# print('Y.shape', Y.shape)
if i == 0:
X = Y
else:
X = np.append(X, Y, axis=3) # x is class ndarray
return X
def data_to_patches(self):
'''
args:
path (string): Path and filename of input time series data
patch_size (int): length of sliding window we are extracting from the time series (data)
returns:
'''
if DEBUG:
print(np.asarray(self.data_test))
patches = self.extract_random_patches()
print('patches.shape=', patches.shape)
return patches
def display_dictionary(self, W, cases, if_show, if_save, foldername, filename=None, custom_code4ordering=None):
k = self.patch_size
x = self.data_test.shape
rows = np.floor(np.sqrt(self.n_components)).astype(int)
cols = np.ceil(np.sqrt(self.n_components)).astype(int)
'''
fig, axs = plt.subplots(nrows=4, ncols=3, figsize=(4, 3.5),
subplot_kw={'xticks': [], 'yticks': []})
'''
fig, axs = plt.subplots(nrows=6, ncols=4, figsize=(4, 4.5),
subplot_kw={'xticks': [], 'yticks': []})
print('W.shape', W.shape)
code = self.code
# print('code', code)
importance = np.sum(code, axis=1) / sum(sum(code))
if self.if_log_scale:
W = np.exp(W) - 1
if custom_code4ordering is None:
idx = np.argsort(importance)
idx = np.flip(idx)
else:
custom_importance = np.sum(custom_code4ordering, axis=1) / sum(sum(custom_code4ordering))
idx = np.argsort(custom_importance)
idx = np.flip(idx)
# print('W', W)
if cases == 'confirmed':
c = 0
elif cases == 'death':
c = 1
else:
c = 2
for axs, i in zip(axs.flat, range(self.n_components)):
dict = W[:, idx[i]].reshape(x[0], k, x[2])
# print('x.shape', x)
for j in np.arange(dict.shape[0]):
country_name = self.country_list[j]
marker = ''
if country_name == 'Korea, South':
marker = '*'
elif country_name == 'China':
marker = 'x'
elif country_name == 'US':
marker = '^'
axs.plot(np.arange(k), dict[j, :, c], marker=marker, label='' + str(country_name))
axs.set_xlabel('%1.2f' % importance[idx[i]], fontsize=13) # get the largest first
axs.xaxis.set_label_coords(0.5, -0.05) # adjust location of importance appearing beneath patches
handles, labels = axs.get_legend_handles_labels()
fig.legend(handles, labels, loc='center right') ## bbox_to_anchor=(0,0)
# plt.suptitle(cases + '-Temporal Dictionary of size %d'% k, fontsize=16)
# plt.subplots_adjust(left=0.01, right=0.55, bottom=0.05, top=0.99, wspace=0.1, hspace=0.4) # for 24 atoms
plt.subplots_adjust(left=0.01, right=0.62, bottom=0.1, top=0.99, wspace=0.1, hspace=0.4) # for 12 atoms
# plt.tight_layout()
if if_save:
if filename is None:
plt.savefig('Time_series_dictionary/' + str(foldername) + '/Dict-' + cases + '.png')
else:
plt.savefig(
'Time_series_dictionary/' + str(foldername) + '/Dict-' + cases + '_' + str(filename) + '.png')
if if_show:
plt.show()
def display_dictionary_Hospital(self, W, state_name, if_show, if_save, foldername, filename=None,
custom_code4ordering=None):
k = self.patch_size
x = self.data_test.shape
rows = np.floor(np.sqrt(self.n_components)).astype(int)
cols = np.ceil(np.sqrt(self.n_components)).astype(int)
fig, axs = plt.subplots(nrows=rows, ncols=cols, figsize=(6, 6),
subplot_kw={'xticks': [], 'yticks': []})
print('W.shape', W.shape)
code = self.code
# print('code', code)
importance = np.sum(code, axis=1) / sum(sum(code))
if self.if_log_scale:
W = np.exp(W) - 1
if custom_code4ordering is None:
idx = np.argsort(importance)
idx = np.flip(idx)
else:
custom_importance = np.sum(custom_code4ordering, axis=1) / sum(sum(custom_code4ordering))
idx = np.argsort(custom_importance)
idx = np.flip(idx)
for axs, i in zip(axs.flat, range(self.n_components)):
dict = W[:, idx[i]].reshape(x[0], k, x[2])
# print('x.shape', x)
j = self.state_list.index(state_name)
marker_list = itertools.cycle(('*', 'x', '^', 'o', '|', '+'))
for c in np.arange(dict.shape[2]):
variable_name = self.input_variable_list[c]
variable_name = variable_name.replace('input_', '')
axs.plot(np.arange(k), dict[j, :, c], marker=next(marker_list), label=variable_name)
axs.set_xlabel('%1.2f' % importance[idx[i]], fontsize=13) # get the largest first
axs.xaxis.set_label_coords(0.5, -0.05) # adjust location of importance appearing beneath patches
handles, labels = axs.get_legend_handles_labels()
fig.legend(handles, labels, loc='center right') ## bbox_to_anchor=(0,0)
plt.suptitle(str(state_name) + '-Temporal Dictionary of size %d' % k, fontsize=16)
# plt.subplots_adjust(left=0.01, right=0.55, bottom=0.05, top=0.99, wspace=0.1, hspace=0.4) # for 24 atoms
plt.subplots_adjust(left=0.01, right=0.62, bottom=0.1, top=0.8, wspace=0.1, hspace=0.4) # for 12 atoms
# plt.tight_layout()
if if_save:
if filename is None:
plt.savefig('Time_series_dictionary/' + str(foldername) + '/Dict-' + str(state_name) + '.png')
else:
plt.savefig('Time_series_dictionary/' + str(foldername) + '/Dict-' + str(state_name) + '_' + str(
filename) + '.png')
if if_show:
plt.show()
def display_dictionary_single(self, W, if_show, if_save, foldername, filename, custom_code4ordering=None):
k = self.patch_size
x = self.data_test.shape
code = self.code
# print('code', code)
importance = np.sum(code, axis=1) / sum(sum(code))
if self.if_log_scale:
W = np.exp(W) - 1
if custom_code4ordering is None:
idx = np.argsort(importance)
idx = np.flip(idx)
else:
custom_importance = np.sum(custom_code4ordering, axis=1) / sum(sum(custom_code4ordering))
idx = np.argsort(custom_importance)
idx = np.flip(idx)
# rows = np.floor(np.sqrt(self.n_components)).astype(int)
# cols = np.ceil(np.sqrt(self.n_components)).astype(int)
fig, axs = plt.subplots(nrows=4, ncols=3, figsize=(4, 3),
subplot_kw={'xticks': [], 'yticks': []})
print('W.shape', W.shape)
# print('W', W)
for axs, i in zip(axs.flat, range(self.n_components)):
for c in np.arange(x[2]):
if c == 0:
cases = 'confirmed'
elif c == 1:
cases = 'death'
else:
cases = 'recovered'
dict = W[:, idx[i]].reshape(x[0], k, x[2]) ### atoms with highest importance appears first
for j in np.arange(dict.shape[0]):
if c == 0:
marker = '*'
elif c == 1:
marker = 'x'
else:
marker = 's'
axs.plot(np.arange(k), dict[j, :, c], marker=marker, label='' + str(cases))
axs.set_xlabel('%1.2f' % importance[idx[i]], fontsize=14) # get the largest first
axs.xaxis.set_label_coords(0.5, -0.05) # adjust location of importance appearing beneath patches
handles, labels = axs.get_legend_handles_labels()
fig.legend(handles, labels, loc='lower center') ## bbox_to_anchor=(0,0)
# plt.suptitle(str(self.country_list[0]) + '-Temporal Dictionary of size %d'% k, fontsize=16)
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.3, top=0.99, wspace=0.1, hspace=0.4)
# plt.tight_layout()
if if_save:
plt.savefig('Time_series_dictionary/' + str(foldername) + '/Dict-' + str(self.country_list[0]) + '_' + str(
filename) + '.png')
if if_show:
plt.show()
def display_prediction_evaluation(self, prediction, if_show, if_save, foldername, filename, if_errorbar=True,
if_evaluation=False, title=None):
A = self.data_test
k = self.patch_size
A_recons = prediction
print('!!!!!!!A_recons.shape', A_recons.shape)
A_predict = A_recons.copy()
if if_evaluation:
A_predict1 = A_recons.copy()
### A_recons.shape = (# trials) x (# days) x (# states) x (Future Extrapolation Length) x (# variables)
A_recons1 = np.zeros(shape=(A_predict1.shape[0], A.shape[1] + A_predict1.shape[3], A.shape[0], A.shape[2]))
A_recons1 = A_predict1[:, :, :, -1, :]
# for i in np.arange(0, A_predict1.shape[2]):
# A_recons1[:,i + A_predict1.shape[3],:,:] = A_predict1[:,i,:, -1,:]
# We are making d-days ahead prediction where d = (Future Extrapolation Length) + (prediction_length) -1
a = np.zeros(shape=(A_predict1.shape[0], A_predict1.shape[3] + self.prediction_length, A.shape[0], A.shape[2]))
A_recons1 = np.append(a, A_recons1, axis=1) # Shift A_recons1 by d in time
print('!!!! A.shape[1]+A_predict1.shape[3]', A.shape[1] + A_predict1.shape[3] + self.prediction_length)
print('!!!! A_recons1.shape', A_recons1.shape)
A_recons1 = np.swapaxes(A_recons1, axis1=1, axis2=2)
# fill in first d entries of prediction by the raw data
for trial in np.arange(0, A_predict1.shape[0]):
for j in np.arange(0, A_predict1.shape[3] + self.prediction_length):
A_recons1[trial, :, j, :] = A[:, j, :]
A_recons = A_recons1
A_predict = A_recons.copy()
print('!!!!!!!! A_recons', A_recons)
if self.if_log_scale:
A = np.exp(A) - 1
A_recons = np.exp(A_recons) - 1
if if_errorbar:
# print('!!!', A_predict.shape) # trials x states x days x variables
A_predict = np.sum(A_recons, axis=0) / A_recons.shape[0] ### axis-0 : trials
A_std = np.std(A_recons, axis=0)
print('!!! A_std', A_std)
### Make gridspec
fig1 = plt.figure(figsize=(15, 10), constrained_layout=False)
gs1 = fig1.add_gridspec(nrows=A_predict.shape[2], ncols=A_predict.shape[0], wspace=0.2, hspace=0.2)
# font = font_manager.FontProperties(family="Times New Roman", size=11)
for i in range(A_predict.shape[0]):
for c in range(A_predict.shape[2]):
ax = fig1.add_subplot(gs1[c, i])
variable_name = self.input_variable_list[c]
variable_name = variable_name.replace('input_', '')
### get days xticks
start_day = self.df.get(self.state_list[0]).index[0]
x_data = pd.date_range(start_day, periods=A.shape[1], freq='D')
x_data_recons = pd.date_range(start_day, periods=A_predict.shape[1] - self.patch_size, freq='D')
x_data_recons += pd.DateOffset(self.patch_size)
### plot axs
ax.plot(x_data, A[i, :, c], 'b-', marker='o', markevery=5, label='Original-' + str(variable_name))
if not if_errorbar:
ax.plot(x_data_recons, A_predict[i, self.patch_size:A_predict.shape[1], c],
'r-', marker='x', markevery=5, label='Prediction-' + str(variable_name))
else:
markers, caps, bars = ax.errorbar(x_data_recons,
A_predict[i, self.patch_size:A_predict.shape[1], c],
yerr=A_std[i, self.patch_size:A_predict.shape[1], c],
fmt='r-', label='Prediction-' + str(variable_name), errorevery=1)
[bar.set_alpha(0.5) for bar in bars]
# [cap.set_alpha(0.5) for cap in caps]
ax.set_ylim(0, np.maximum(np.max(A[i, :, c]), np.max(A_predict[i, :, c] + A_std[i, :, c])) * 1.1)
if c == 0:
if title is None:
ax.set_title(str(self.state_list[i]), fontsize=15)
else:
ax.set_title(title, fontsize=15)
ax.yaxis.set_label_position("left")
# ax.yaxis.set_label_coords(0, 2)
# ax.set_ylabel(str(list[j]), rotation=90)
ax.set_ylabel('population', fontsize=10) # get the largest first
ax.yaxis.set_label_position("left")
ax.legend()
fig1.autofmt_xdate()
# fig.suptitle('Plot of original and 1-step prediction -- ' + 'COVID-19 : '+ str(self.country_list[0]) +
# "\n seg. length = %i, # temp. dict. atoms = %i, learning exponent = %1.3f" % (self.patch_size, self.n_components, self.beta),
# fontsize=12, y=0.96)
# plt.tight_layout()
plt.subplots_adjust(left=0.1, right=0.9, bottom=0.1, top=0.95, wspace=0.08, hspace=0.23)
if if_save:
plt.savefig('Time_series_dictionary/' + str(foldername) + '/Plot-' + str(
filename) + '.pdf')
if if_show:
plt.show()
def display_prediction_single(self, source, prediction, if_show, if_save, foldername, filename):
A = self.combine_data(source)[0]
k = self.patch_size
A_predict = prediction
if self.if_log_scale:
A = np.exp(A) - 1
A_predict = np.exp(A_predict) - 1
fig, axs = plt.subplots(nrows=A.shape[2], ncols=1, figsize=(5, 5))
lims = [(np.datetime64('2020-01-21'), np.datetime64('2020-07-15')),
(np.datetime64('2020-01-21'), np.datetime64('2020-07-15')),
(np.datetime64('2020-01-21'), np.datetime64('2020-07-15'))]
if A.shape[2] == 1:
L = zip([axs], np.arange(A.shape[2]))
else:
L = zip(axs.flat, np.arange(A.shape[2]))
for axs, c in L:
if c == 0:
cases = 'confirmed'
elif c == 1:
cases = 'death'
else:
cases = 'recovered'
### get days xticks
x_data = pd.date_range('2020-01-21', periods=A.shape[1], freq='D')
x_data_recons = pd.date_range('2020-01-21', periods=A_predict.shape[1] - self.patch_size, freq='D')
x_data_recons += pd.DateOffset(self.patch_size)
### plot axs
axs.plot(x_data, A[0, :, c], 'b-', marker='o', markevery=5, label='Original-' + str(cases))
axs.plot(x_data_recons, A_predict[0, self.patch_size:A_predict.shape[1], c],
'r-', marker='x', markevery=5, label='Prediction-' + str(cases))
axs.set_ylim(0, np.max(A_predict[0, :, c]) + 10)
# ax.text(2, 0.65, str(list[j]))
axs.yaxis.set_label_position("right")
# ax.yaxis.set_label_coords(0, 2)
# ax.set_ylabel(str(list[j]), rotation=90)
axs.set_ylabel('log(population)', fontsize=10) # get the largest first
axs.yaxis.set_label_position("right")
axs.legend(fontsize=11)
fig.suptitle('Plot of original and 1-step prediction -- ' + 'COVID-19 : ' + str(self.country_list[0]) +
"\n seg. length = %i, # temp. dict. atoms = %i, learning exponent = %1.3f" % (
self.patch_size, self.n_components, self.beta),
fontsize=12, y=0.96)
plt.tight_layout(rect=[0, 0.03, 1, 0.9])
# plt.subplots_adjust(left=0.2, right=0.9, bottom=0.1, top=0.85, wspace=0.08, hspace=0.23)
if if_save:
plt.savefig('Time_series_dictionary/' + str(foldername) + '/Plot-' + str(self.country_list[0]) + '-' + str(
filename) + '.png')
if if_show:
plt.show()
def display_prediction(self, source, prediction, cases, if_show, if_save, foldername, if_errorbar=False):
A = self.combine_data(source)[0]
k = self.patch_size
A_recons = prediction
A_predict = prediction
if self.if_log_scale:
A = np.exp(A) - 1
A_recons = np.exp(A_recons) - 1
A_predict = np.exp(A_predict) - 1
A_std = np.zeros(shape=A_recons.shape)
if if_errorbar:
A_predict = np.sum(A_predict, axis=0) / A_predict.shape[0] ### axis-0 : trials
A_std = np.std(A_recons, axis=0)
# print('A_std', A_std)
L = len(self.country_list) # number of countries
rows = np.floor(np.sqrt(L)).astype(int)
cols = np.ceil(np.sqrt(L)).astype(int)
### get days xticks
x_data = pd.date_range('2020-01-21', periods=A.shape[1], freq='D')
x_data_recons = pd.date_range('2020-01-21', periods=A_predict.shape[1] - self.patch_size, freq='D')
x_data_recons += pd.DateOffset(self.patch_size)
if cases == 'confirmed':
c = 0
elif cases == 'death':
c = 1
else:
c = 2
fig, axs = plt.subplots(nrows=3, ncols=2, figsize=(8, 5))
for axs, j in zip(axs.flat, range(L)):
country_name = self.country_list[j]
if self.country_list[j] == 'Korea, South':
country_name = 'Korea, S.'
axs_empty = axs.plot([], [], ' ', label=str(country_name))
axs_original = axs.plot(x_data, A[j, :, c], 'b-', marker='o', markevery=5, label='Original')
if not if_errorbar:
axs_recons = axs.plot(x_data_recons, A_predict[j, self.patch_size:A_predict.shape[1], c],
'r-', marker='x', markevery=5, label='Prediction-' + str(country_name))
else:
y = A_predict[j, self.patch_size:A_predict.shape[1], c]
axs_recons = axs.errorbar(x_data_recons, y, yerr=A_std[j, self.patch_size:A_predict.shape[1], c],
fmt='r-.', label='Prediction', errorevery=2, )
axs.set_ylim(0, np.maximum(np.max(A[j, :, c]), np.max(A_predict[j, :, c] + A_std[j, :, c])) * 1.1)
# ax.text(2, 0.65, str(list[j]))
axs.yaxis.set_label_position("right")
# ax.yaxis.set_label_coords(0, 2)
# ax.set_ylabel(str(list[j]), rotation=90)
axs.legend(fontsize=9)
fig.autofmt_xdate()
fig.suptitle('Plot of original and 1-step prediction -- ' + 'COVID-19:' + cases +
"\n segment length = %i, # temporal dictionary atoms = %i" % (
self.patch_size, self.n_components),
fontsize=12, y=1)
plt.tight_layout(rect=[0, 0.03, 1, 0.9])
# plt.subplots_adjust(left=0.2, right=0.9, bottom=0.1, top=0.85, wspace=0.08, hspace=0.23)
if if_save:
plt.savefig('Time_series_dictionary/' + str(foldername) + '/Plot-' + cases + '.png')
if if_show:
plt.show()
def train_dict(self,
mode,
alpha,
beta,
learn_joint_dict,
foldername,
data_train = None,
iterations=None,
update_self=True,
if_save=True,
print_iter=False):
print('training dictionaries from patches along mode %i...' % mode)
'''
Trains dictionary based on patches from an i.i.d. sequence of batch of patches
mode = 0, 1, 2
learn_joint_dict = True or False parameter
'''
W = self.W
At = []
Bt = []
code = self.code
if data_train is None:
data_train = self.data_train
if iterations is not None:
n_iter = iterations
else:
n_iter = self.ONMF_iterations
for t in np.arange(n_iter):
X = self.extract_random_patches(A=data_train) ## need to sample patches from self.data_test
if t == 0:
self.ntf = Online_NTF(X, self.n_components,
iterations=self.ONMF_sub_iterations,
learn_joint_dict=learn_joint_dict,
mode=mode,
alpha=alpha,
beta=beta,
batch_size=self.ONMF_batch_size) # max number of possible patches
W, At, Bt, H = self.ntf.train_dict_single()
code += H
else:
self.ntf = Online_NTF(X, self.n_components,
iterations=self.ONMF_sub_iterations,
batch_size=self.ONMF_batch_size,
ini_dict=W,
ini_A=At,
ini_B=Bt,
alpha=alpha,
beta=beta,
learn_joint_dict=learn_joint_dict,
mode=mode,
history=self.ntf.history)
# out of "sample_size" columns in the data matrix, sample "batch_size" randomly and train the dictionary
# for "iterations" iterations
W, At, Bt, H = self.ntf.train_dict_single()
code += H
if print_iter:
print('Current minibatch training iteration %i out of %i' % (t, self.ONMF_iterations))
if update_self:
self.W = W
self.code = code
# print('code_right_after_training', self.code)
if self.data_source != 'JHU':
list = self.state_list
else:
list = self.country_list
print('dict_shape:', W.shape)
print('code_shape:', code.shape)
if if_save:
np.save('Time_series_dictionary/' + str(foldername) + '/dict_learned_' + str(
mode) + '_' + 'pretraining' + '_' + str(list[0]), self.W)
np.save('Time_series_dictionary/' + str(foldername) + '/code_learned_' + str(
mode) + '_' + 'pretraining' + '_' + str(list[0]), self.code)
np.save('Time_series_dictionary/' + str(foldername) + '/At_' + str(mode) + '_' + 'pretraining' + '_' + str(
list[0]), At)
np.save('Time_series_dictionary/' + str(foldername) + '/Bt_' + str(mode) + '_' + 'pretraining' + '_' + str(
list[0]), Bt)
return W, At, Bt, code
def ONMF_predictor(self,
mode,
foldername,
data_test=None,
data_train=None,
learn_from_future2past=False,
prelearned_dict = None, # if not none, use this dictionary for prediction
ini_dict=None,
ini_A=None,
ini_B=None,
beta=1,
a1=0, # regularizer for the code in partial fitting
a2=0, # regularizer for the code in recursive prediction
future_extrapolation_length=0,
if_learn_online=True,
if_save=True,
# if_recons=True, # Reconstruct observed data using learned dictionary
learning_window_cap = None, # if not none, learn only from the past "learning_window_cap" days
minibatch_training_initialization=True,
minibatch_alpha=1,
minibatch_beta=1,
print_iter=False,
online_learning=True,
num_trials=1):
print('online learning and predicting from patches along mode %i...' % mode)
'''
Trains dictionary along a continuously sliding window over the data stream
Predict forthcoming data on the fly. This could be made to affect learning rate
'''
if data_test is None:
data_test = self.data_test.copy()
if data_train is None:
data_train = self.data_train.copy()
if learning_window_cap is None:
learning_window_cap = self.learning_window_cap
# print('!!!!!!!!!! A.shape', A.shape)
k = self.patch_size # Window length
L = self.prediction_length
# A_recons = np.zeros(shape=A.shape)
# print('A_recons.shape',A_recons.shape)
# W = self.W
# print('W.shape', self.W.shape)
At = []
Bt = []
H = []
# A_recons = np.zeros(shape=(A.shape[0], k+L-1, A.shape[2]))
list_full_predictions = []
A_recons = data_test.copy()
for trial in np.arange(num_trials):
### Initialize self parameters
self.W = ini_dict
# A_recons = A[:, 0:k + L - 1, :]
At = []
Bt = []
if prelearned_dict is not None:
self.W = prelearned_dict
else:
# Learn new dictionary to use for prediction
if minibatch_training_initialization:
# print('!!! self.W right before minibatch training', self.W)
self.W, At, Bt, H = self.train_dict(mode=3,
alpha=minibatch_alpha,
beta=minibatch_beta,
iterations=self.ONMF_iterations,
learn_joint_dict=True,
foldername=None,
update_self=True,
if_save=False)
# print('data.shape', self.data_test.shape)
# iter = np.floor(A.shape[1]/self.num_patches_perbatch).astype(int)
if online_learning:
T_start = k
if learning_window_cap is not None:
T_start = max(k, data_train.shape[1]-learning_window_cap)
for t in np.arange(T_start, data_train.shape[1]):
if not learn_from_future2past:
a = np.maximum(0, t - self.num_patches_perbatch)
X = self.extract_patches_interval(time_interval_initial=a,
time_interval_terminal=t,
A = data_train) # get patch from the past2future
else:
t1 = data_train.shape[1] - t
a = np.minimum(data_train.shape[1], t1 + self.num_patches_perbatch)
X = self.extract_patches_interval(time_interval_initial=t1,
time_interval_terminal=a,
A = data_train) # get patch from the future2past
# print('X.shape', X.shape)
# X.shape = (# states) x (# window length) x (# variables) x (num_patches_perbatch)
if t == k:
self.ntf = Online_NTF(X, self.n_components,
iterations=self.ONMF_sub_iterations,
learn_joint_dict=True,
mode=mode,
ini_dict=self.W,
ini_A=ini_A,
ini_B=ini_B,
batch_size=self.ONMF_batch_size,
subsample=self.subsample,
beta=beta)
self.W, At, Bt, H = self.ntf.train_dict_single()
self.code += H
"""
# reconstruction step
patch = A[:, t - k + L:t, :]
if learn_from_future2past:
patch_recons = self.predict_joint_single(patch, a1)
A_recons = np.append(patch_recons, A_recons, axis=1)
else:
A_recons = np.append(A_recons, patch_recons, axis=1)
"""
else:
if t % self.learnevery == 0 and if_learn_online: # do not learn from zero data (make np.sum(X)>0 for online learning)
self.ntf = Online_NTF(X, self.n_components,
iterations=self.ONMF_sub_iterations,
batch_size=self.ONMF_batch_size,
ini_dict=self.W,
ini_A=At,
ini_B=Bt,
learn_joint_dict=True,
mode=mode,
history=self.ntf.history,
subsample=self.subsample,
beta=beta)
self.W, At, Bt, H = self.ntf.train_dict_single()
# print('dictionary_updated')
self.code += H
"""
# reconstruction step
patch = A[:, t - k + L:t, :] ### in the original time orientation
if learn_from_future2past:
patch_recons = self.predict_joint_single(patch, a1)
# print('patch_recons', patch_recons)
A_recons = np.append(A_recons, patch_recons, axis=1)
else:
patch_recons = patch[:, -1, :]
patch_recons = patch_recons[:, np.newaxis, :]
A_recons = np.append(patch_recons, A_recons, axis=1)
"""
if print_iter:
print('Current (trial, day) for ONMF_predictor (%i, %i) out of (%i, %i)' % (
trial + 1, t, num_trials, data_train.shape[1] - 1))
# print('!!!!! A_recons.shape', A_recons.shape)
# concatenate state-wise dictionary to predict one state
# Assumes len(list_states)=1
self.W = np.concatenate(np.vsplit(self.W, len(self.state_list_train)), axis=1)
#### forward recursive prediction begins
for t in np.arange(data_test.shape[1], data_test.shape[1] + future_extrapolation_length):
patch = A_recons[:, t - k + L:t, :]
if t == data_test.shape[1]:
patch = data_test[:, t - k + L:t, :]
print('!!!!! patch.shape', patch.shape)
patch_recons = self.predict_joint_single(patch, a2)
print('!!!!! patch_recons.shape', patch_recons.shape)
A_recons = np.append(A_recons, patch_recons, axis=1)
print('new cases predicted final', A_recons[0, -1, 0])
### initial regulation
A_recons[:, 0:self.learnevery + L, :] = data_test[:, 0:self.learnevery + L, :]
### patch the two reconstructions
# A_recons = np.append(A_recons, A_recons[:,A.shape[1]:, :], axis=1)
print('!!!!! A_recons.shape', A_recons.shape)
list_full_predictions.append(A_recons.copy())
A_full_predictions_trials = np.asarray(
list_full_predictions) ## shape = (# trials) x (# states) x (# days + L) x (# varibles)
self.result_dict.update({'Evaluation_num_trials': str(num_trials)})
self.result_dict.update({'Evaluation_A_full_predictions_trials': A_full_predictions_trials})
self.result_dict.update({'Evaluation_Dictionary': self.W})
self.result_dict.update({'Evaluation_Code': self.code})
if if_save:
if self.data_source != 'JHU':
list = self.state_list
else:
list = self.country_list
np.save('Time_series_dictionary/' + str(foldername) + '/full_results_' + 'num_trials_' + str(num_trials), self.result_dict)
"""
np.save('Time_series_dictionary/' + str(foldername) + '/dict_learned_tensor' + '_' + str(
list[0]) + '_' + 'afteronline' + str(self.beta), self.W)
np.save('Time_series_dictionary/' + str(foldername) + '/code_learned_tensor' + '_' + str(
list[0]) + '_' + 'afteronline' + str(self.beta), self.code)
np.save('Time_series_dictionary/' + str(foldername) + '/At_' + str(list[0]) + '_' + 'afteronline' + str(
self.beta), At)
np.save('Time_series_dictionary/' + str(foldername) + '/Bt_' + str(list[0]) + '_' + 'afteronline' + str(
self.beta), Bt)
np.save('Time_series_dictionary/' + str(foldername) + '/recons', A_recons)
"""
return A_full_predictions_trials, self.W, At, Bt, self.code
def ONMF_predictor_historic(self,
mode,
foldername,
prelearned_dict_seq = None, # if not none, use this seq of dict for prediction
learn_from_future2past=True,
ini_dict=None,
ini_A=None,
ini_B=None,
beta=1,
a1=0, # regularizer for the code in partial fitting
a2=0, # regularizer for the code in recursive prediction
future_extrapolation_length=0,
learning_window_cap = None,
if_save=True,
minibatch_training_initialization=False,
minibatch_alpha=1,
minibatch_beta=1,
online_learning=True,
num_trials=1): # take a number of trials to generate empirical confidence interval
print('Running ONMF_timeseries_predictor_historic along mode %i...' % mode)
'''
Apply online_learning_and_prediction for intervals [0,t] for every 1\le t\le T to make proper all-time predictions
for evaluation
'''
A = self.data_test
# print('A.shape', A.shape)
k = self.patch_size
L = self.prediction_length
FEL = future_extrapolation_length
# A_recons = np.zeros(shape=A.shape)
# print('A_recons.shape',A_recons.shape)
# W = self.W
if learning_window_cap is None:
learning_window_cap = self.learning_window_cap
self.W = ini_dict
if ini_dict is None:
d = self.data_test.shape[0]*k*self.data_test.shape[2] #(# states) x (# window length) x (# variables)
self.W = np.random.rand(d, self.n_components)
# print('W.shape', self.W.shape)
# A_recons = np.zeros(shape=(A.shape[0], k+L-1, A.shape[2]))
# A_recons = A[:, 0:k + L - 1, :]
list_full_predictions = []
W_total_seq_trials = []
for trial in np.arange(num_trials):
W_total_seq = []
### A_total_prediction.shape = (# days) x (# states) x (FEL) x (# variables)
### W_total_seq.shape = (# days) x (# states * window length * # variables) x (n_components)
A_total_prediction = []
### fill in predictions for the first k days with the raw data
for i in np.arange(k + 1):
A_total_prediction.append(A[:, i:i + FEL, :])
W_total_seq.append(self.W.copy())
for t in np.arange(k + 1, A.shape[1]):
### Set self.data_test to the truncated one during [1,t]
prelearned_dict = None
if prelearned_dict_seq is not None:
prelearned_dict = prelearned_dict_seq[trial,t,:,:]
A_recons, W, At, Bt, code = self.ONMF_predictor(mode,
foldername,
data_test=self.data_test[:, :t, :],
data_train=self.data_train[:, :t, :],
prelearned_dict=prelearned_dict,
learn_from_future2past=learn_from_future2past,
ini_dict=ini_dict,
ini_A=ini_A,
ini_B=ini_B,
beta=beta,
a1=a1,
# regularizer for the code in partial fitting
a2=a2,
# regularizer for the code in recursive prediction
future_extrapolation_length=future_extrapolation_length,
learning_window_cap=learning_window_cap,
if_save=True,
minibatch_training_initialization=minibatch_training_initialization,
minibatch_alpha=minibatch_alpha,
minibatch_beta=minibatch_beta,
print_iter=False,
online_learning=online_learning,
num_trials=1)
A_recons = A_recons[0, :, :, :]
# print('!!!! A_recons.shape', A_recons.shape)
### A_recons.shape = (# states, t+FEL, # variables)
# print('!!!!! A_recons[:, -FEL:, :].shape', A_recons[:, -FEL:, :].shape)
A_total_prediction.append(A_recons[:, -FEL:, :])
W_total_seq.append(W.copy())
### A_recons.shape = (# states, t+FEL, # variables)
print('Current (trial, day) for ONMF_predictor_historic (%i, %i) out of (%i, %i)' % (
trial + 1, t - k, num_trials, A.shape[1] - k - 1))
A_total_prediction = np.asarray(A_total_prediction)
W_total_seq = np.asarray(W_total_seq)
print('W_total_seq.shape', W_total_seq.shape)
W_total_seq_trials.append(W_total_seq)
list_full_predictions.append(A_total_prediction)
W_total_seq_trials = np.asarray(W_total_seq_trials)
self.result_dict.update({'Evaluation_num_trials': str(num_trials)})
self.result_dict.update({'Evaluation_A_full_predictions_trials': np.asarray(list_full_predictions)})
self.result_dict.update({'Evaluation_Dictionary_seq_trials': W_total_seq_trials})
# sequence of dictionaries to be used for historic prediction : shape (trials, time, W.shape[0], W.shape[1])
A_full_predictions_trials = np.asarray(list_full_predictions)
print('!!! A_full_predictions_trials.shape', A_full_predictions_trials.shape)
if if_save:
np.save('Time_series_dictionary/' + str(foldername) + '/full_results_' + 'num_trials_' + str(
num_trials), self.result_dict)
"""
np.save('Time_series_dictionary/' + str(foldername) + '/dict_learned_tensor' + '_' + str(
list[0]) + '_' + 'afteronline' + str(self.beta), self.W)
np.save('Time_series_dictionary/' + str(foldername) + '/code_learned_tensor' + '_' + str(
list[0]) + '_' + 'afteronline' + str(self.beta), self.code)
np.save('Time_series_dictionary/' + str(foldername) + '/At' + str(list[0]) + '_' + 'afteronline' + str(
self.beta), At)
np.save('Time_series_dictionary/' + str(foldername) + '/Bt' + str(list[0]) + '_' + 'afteronline' + str(
self.beta), Bt)
np.save('Time_series_dictionary/' + str(foldername) + '/Full_prediction_trials_' + 'num_trials_' + str(
num_trials), A_full_predictions_trials)
np.save('Time_series_dictionary/' + str(foldername) + '/W_total_seq_' + 'num_trials_' + str(
num_trials), W_total_seq)
"""
return A_full_predictions_trials, W_total_seq_trials, code
def predict_joint_single(self, data, a1):
k = self.patch_size
L = self.prediction_length
A = data # A.shape = (self.data_test.shape[0], k-L, self.data_test.shape[2])
# A_recons = np.zeros(shape=(A.shape[0], k, A.shape[2]))
# W_tensor = self.W.reshape((k, A.shape[0], -1))
# print('A.shape', A.shape)
W_tensor = self.W.reshape((self.data_test.shape[0], k, self.data_test.shape[2], -1))
# print('W.shape', W_tensor.shape)
# for missing data, not needed for the COVID-19 data set
# extract only rows of nonnegative values (disregarding missing entries) (negative = N/A)
J = np.where(
|
np.min(A, axis=(0, 1))
|
numpy.min
|
# License: BSD 3-clause
# Authors: <NAME>
# LTSD routine from jfsantos (<NAME>)
# Harvest, Cheaptrick, D4C, WORLD routines based on MATLAB code from <NAME>
# http://ml.cs.yamanashi.ac.jp/world/english/
# MGC code based on r9y9 (Ryuichi Yamamoto) MelGeneralizedCepstrums.jl
# Pieces also adapted from SPTK
from __future__ import division
import numpy as np
import scipy as sp
from numpy.lib.stride_tricks import as_strided
import scipy.signal as sg
from scipy.interpolate import interp1d
import wave
from scipy.cluster.vq import vq
from scipy import linalg, fftpack
from numpy.testing import assert_almost_equal
from scipy.linalg import svd
from scipy.io import wavfile
from scipy.signal import firwin
import zipfile
import tarfile
import os
import copy
import multiprocessing
from multiprocessing import Pool
import functools
import time
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib2 as urllib
def download(url, server_fname, local_fname=None, progress_update_percentage=5,
bypass_certificate_check=False):
"""
An internet download utility modified from
http://stackoverflow.com/questions/22676/
how-do-i-download-a-file-over-http-using-python/22776#22776
"""
if bypass_certificate_check:
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
u = urllib.urlopen(url, context=ctx)
else:
u = urllib.urlopen(url)
if local_fname is None:
local_fname = server_fname
full_path = local_fname
meta = u.info()
with open(full_path, 'wb') as f:
try:
file_size = int(meta.get("Content-Length"))
except TypeError:
print("WARNING: Cannot get file size, displaying bytes instead!")
file_size = 100
print("Downloading: %s Bytes: %s" % (server_fname, file_size))
file_size_dl = 0
block_sz = int(1E7)
p = 0
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if (file_size_dl * 100. / file_size) > p:
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl *
100. / file_size)
print(status)
p += progress_update_percentage
def fetch_sample_speech_tapestry():
url = "https://www.dropbox.com/s/qte66a7haqspq2g/tapestry.wav?dl=1"
wav_path = "tapestry.wav"
if not os.path.exists(wav_path):
download(url, wav_path)
fs, d = wavfile.read(wav_path)
d = d.astype('float32') / (2 ** 15)
# file is stereo? - just choose one channel
return fs, d
def fetch_sample_file(wav_path):
if not os.path.exists(wav_path):
raise ValueError("Unable to find file at path %s" % wav_path)
fs, d = wavfile.read(wav_path)
d = d.astype('float32') / (2 ** 15)
# file is stereo - just choose one channel
if len(d.shape) > 1:
d = d[:, 0]
return fs, d
def fetch_sample_music():
url = "http://www.music.helsinki.fi/tmt/opetus/uusmedia/esim/"
url += "a2002011001-e02-16kHz.wav"
wav_path = "test.wav"
if not os.path.exists(wav_path):
download(url, wav_path)
fs, d = wavfile.read(wav_path)
d = d.astype('float32') / (2 ** 15)
# file is stereo - just choose one channel
d = d[:, 0]
return fs, d
def fetch_sample_speech_fruit(n_samples=None):
url = 'https://dl.dropboxusercontent.com/u/15378192/audio.tar.gz'
wav_path = "audio.tar.gz"
if not os.path.exists(wav_path):
download(url, wav_path)
tf = tarfile.open(wav_path)
wav_names = [fname for fname in tf.getnames()
if ".wav" in fname.split(os.sep)[-1]]
speech = []
print("Loading speech files...")
for wav_name in wav_names[:n_samples]:
f = tf.extractfile(wav_name)
fs, d = wavfile.read(f)
d = d.astype('float32') / (2 ** 15)
speech.append(d)
return fs, speech
def fetch_sample_speech_eustace(n_samples=None):
"""
http://www.cstr.ed.ac.uk/projects/eustace/download.html
"""
# data
url = "http://www.cstr.ed.ac.uk/projects/eustace/down/eustace_wav.zip"
wav_path = "eustace_wav.zip"
if not os.path.exists(wav_path):
download(url, wav_path)
# labels
url = "http://www.cstr.ed.ac.uk/projects/eustace/down/eustace_labels.zip"
labels_path = "eustace_labels.zip"
if not os.path.exists(labels_path):
download(url, labels_path)
# Read wavfiles
# 16 kHz wav
zf = zipfile.ZipFile(wav_path, 'r')
wav_names = [fname for fname in zf.namelist()
if ".wav" in fname.split(os.sep)[-1]]
fs = 16000
speech = []
print("Loading speech files...")
for wav_name in wav_names[:n_samples]:
wav_str = zf.read(wav_name)
d = np.frombuffer(wav_str, dtype=np.int16)
d = d.astype('float32') / (2 ** 15)
speech.append(d)
zf = zipfile.ZipFile(labels_path, 'r')
label_names = [fname for fname in zf.namelist()
if ".lab" in fname.split(os.sep)[-1]]
labels = []
print("Loading label files...")
for label_name in label_names[:n_samples]:
label_file_str = zf.read(label_name)
labels.append(label_file_str)
return fs, speech
def stft(X, fftsize=128, step="half", mean_normalize=True, real=False,
compute_onesided=True):
"""
Compute STFT for 1D real valued input X
"""
if real:
local_fft = fftpack.rfft
cut = -1
else:
local_fft = fftpack.fft
cut = None
if compute_onesided:
cut = fftsize // 2 + 1
if mean_normalize:
X -= X.mean()
if step == "half":
X = halfoverlap(X, fftsize)
else:
X = overlap(X, fftsize, step)
size = fftsize
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(size) / (size - 1))
X = X * win[None]
X = local_fft(X)[:, :cut]
return X
def istft(X, fftsize=128, step="half", wsola=False, mean_normalize=True,
real=False, compute_onesided=True):
"""
Compute ISTFT for STFT transformed X
"""
if real:
local_ifft = fftpack.irfft
X_pad = np.zeros((X.shape[0], X.shape[1] + 1)) + 0j
X_pad[:, :-1] = X
X = X_pad
else:
local_ifft = fftpack.ifft
if compute_onesided:
X_pad = np.zeros((X.shape[0], 2 * X.shape[1])) + 0j
X_pad[:, :fftsize // 2 + 1] = X
X_pad[:, fftsize // 2 + 1:] = 0
X = X_pad
X = local_ifft(X).astype("float64")
if step == "half":
X = invert_halfoverlap(X)
else:
X = overlap_add(X, step, wsola=wsola)
if mean_normalize:
X -= np.mean(X)
return X
def mdct_slow(X, dctsize=128):
M = dctsize
N = 2 * dctsize
N_0 = (M + 1) / 2
X = halfoverlap(X, N)
X = sine_window(X)
n, k = np.meshgrid(np.arange(N), np.arange(M))
# Use transpose due to "samples as rows" convention
tf = np.cos(np.pi * (n + N_0) * (k + 0.5) / M).T
return np.dot(X, tf)
def imdct_slow(X, dctsize=128):
M = dctsize
N = 2 * dctsize
N_0 = (M + 1) / 2
N_4 = N / 4
n, k = np.meshgrid(np.arange(N), np.arange(M))
# inverse *is not* transposed
tf = np.cos(np.pi * (n + N_0) * (k + 0.5) / M)
X_r = np.dot(X, tf) / N_4
X_r = sine_window(X_r)
X = invert_halfoverlap(X_r)
return X
def nsgcwin(fmin, fmax, n_bins, fs, signal_len, gamma):
"""
Nonstationary Gabor window calculation
References
----------
<NAME>., <NAME>., <NAME>., <NAME>.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
<NAME>., <NAME>., <NAME>. and <NAME>.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : <NAME>, <NAME>, <NAME>, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
# use a hanning window
# no fractional shifts
fftres = fs / signal_len
fmin = float(fmin)
fmax = float(fmax)
gamma = float(gamma)
nyq = fs / 2.
b = np.floor(n_bins * np.log2(fmax / fmin))
fbas = fmin * 2 ** (np.arange(b + 1) / float(n_bins))
Q = 2 ** (1. / n_bins) - 2 ** (-1. / n_bins)
cqtbw = Q * fbas + gamma
cqtbw = cqtbw.ravel()
maxidx = np.where(fbas + cqtbw / 2. > nyq)[0]
if len(maxidx) > 0:
# replicate bug in MATLAB version...
# or is it a feature
if sum(maxidx) == 0:
first = len(cqtbw) - 1
else:
first = maxidx[0]
fbas = fbas[:first]
cqtbw = cqtbw[:first]
minidx = np.where(fbas - cqtbw / 2. < 0)[0]
if len(minidx) > 0:
fbas = fbas[minidx[-1]+1:]
cqtbw = cqtbw[minidx[-1]+1:]
fbas_len = len(fbas)
fbas_new = np.zeros((2 * (len(fbas) + 1)))
fbas_new[1:len(fbas) + 1] = fbas
fbas = fbas_new
fbas[fbas_len + 1] = nyq
fbas[fbas_len + 2:] = fs - fbas[1:fbas_len + 1][::-1]
bw = np.zeros_like(fbas)
bw[0] = 2 * fmin
bw[1:len(cqtbw) + 1] = cqtbw
bw[len(cqtbw) + 1] = fbas[fbas_len + 2] - fbas[fbas_len]
bw[-len(cqtbw):] = cqtbw[::-1]
bw = bw / fftres
fbas = fbas / fftres
posit = np.zeros_like(fbas)
posit[:fbas_len + 2] = np.floor(fbas[:fbas_len + 2])
posit[fbas_len + 2:] = np.ceil(fbas[fbas_len + 2:])
base_shift = -posit[-1] % signal_len
shift = np.zeros_like(posit).astype("int32")
shift[1:] = (posit[1:] - posit[:-1]).astype("int32")
shift[0] = base_shift
bw = np.round(bw)
bwfac = 1
M = bw
min_win = 4
for ii in range(len(bw)):
if bw[ii] < min_win:
bw[ii] = min_win
M[ii] = bw[ii]
def _win(numel):
if numel % 2 == 0:
s1 = np.arange(0, .5, 1. / numel)
if len(s1) != numel // 2:
# edge case with small floating point numbers...
s1 = s1[:-1]
s2 = np.arange(-.5, 0, 1. / numel)
if len(s2) != numel // 2:
# edge case with small floating point numbers...
s2 = s2[:-1]
x = np.concatenate((s1, s2))
else:
s1 = np.arange(0, .5, 1. / numel)
s2 = np.arange(-.5 + .5 / numel, 0, 1. / numel)
if len(s2) != numel // 2: # assume integer truncate 27 // 2 = 13
s2 = s2[:-1]
x = np.concatenate((s1, s2))
assert len(x) == numel
g = .5 + .5 * np.cos(2 * np.pi * x)
return g
multiscale = [_win(bi) for bi in bw]
bw = bwfac * np.ceil(M / bwfac)
for kk in [0, fbas_len + 1]:
if M[kk] > M[kk + 1]:
multiscale[kk] = np.ones(M[kk]).astype(multiscale[0].dtype)
i1 = np.floor(M[kk] / 2) - np.floor(M[kk + 1] / 2)
i2 = np.floor(M[kk] / 2) + np.ceil(M[kk + 1] / 2)
# Very rarely, gets an off by 1 error? Seems to be at the end...
# for now, slice
multiscale[kk][i1:i2] = _win(M[kk + 1])
multiscale[kk] = multiscale[kk] / np.sqrt(M[kk])
return multiscale, shift, M
def nsgtf_real(X, multiscale, shift, window_lens):
"""
Nonstationary Gabor Transform for real values
References
----------
<NAME>., <NAME>., <NAME>., <NAME>.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
<NAME>., <NAME>., <NAME>. and <NAME>.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : <NAME>, <NAME>, <NAME>, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
# This will break with multchannel input
signal_len = len(X)
N = len(shift)
X_fft = np.fft.fft(X)
fill = np.sum(shift) - signal_len
if fill > 0:
X_fft_tmp = np.zeros((signal_len + shift))
X_fft_tmp[:len(X_fft)] = X_fft
X_fft = X_fft_tmp
posit = np.cumsum(shift) - shift[0]
scale_lens = np.array([len(m) for m in multiscale])
N = np.where(posit - np.floor(scale_lens) <= (signal_len + fill) / 2)[0][-1]
c = []
# c[0] is almost exact
for ii in range(N):
idx_l = np.arange(np.ceil(scale_lens[ii] / 2), scale_lens[ii])
idx_r = np.arange(np.ceil(scale_lens[ii] / 2))
idx = np.concatenate((idx_l, idx_r))
idx = idx.astype("int32")
subwin_range = posit[ii] + np.arange(-np.floor(scale_lens[ii] / 2),
np.ceil(scale_lens[ii] / 2))
win_range = subwin_range % (signal_len + fill)
win_range = win_range.astype("int32")
if window_lens[ii] < scale_lens[ii]:
raise ValueError("Not handling 'not enough channels' case")
else:
temp = np.zeros((window_lens[ii],)).astype(X_fft.dtype)
temp_idx_l = np.arange(len(temp) - np.floor(scale_lens[ii] / 2),
len(temp))
temp_idx_r = np.arange(np.ceil(scale_lens[ii] / 2))
temp_idx = np.concatenate((temp_idx_l, temp_idx_r))
temp_idx = temp_idx.astype("int32")
temp[temp_idx] = X_fft[win_range] * multiscale[ii][idx]
fs_new_bins = window_lens[ii]
fk_bins = posit[ii]
displace = fk_bins - np.floor(fk_bins / fs_new_bins) * fs_new_bins
displace = displace.astype("int32")
temp = np.roll(temp, displace)
c.append(np.fft.ifft(temp))
if 0:
# cell2mat concatenation
c = np.concatenate(c)
return c
def nsdual(multiscale, shift, window_lens):
"""
Calculation of nonstationary inverse gabor filters
References
----------
<NAME>., <NAME>., <NAME>., <NAME>.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
<NAME>., <NAME>., <NAME>. and <NAME>.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : <NAME>, <NAME>, <NAME>, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
N = len(shift)
posit = np.cumsum(shift)
seq_len = posit[-1]
posit = posit - shift[0]
diagonal = np.zeros((seq_len,))
win_range = []
for ii in range(N):
filt_len = len(multiscale[ii])
idx = np.arange(-np.floor(filt_len / 2), np.ceil(filt_len / 2))
win_range.append((posit[ii] + idx) % seq_len)
subdiag = window_lens[ii] * np.fft.fftshift(multiscale[ii]) ** 2
ind = win_range[ii].astype(np.int)
diagonal[ind] = diagonal[ind] + subdiag
dual_multiscale = multiscale
for ii in range(N):
ind = win_range[ii].astype(np.int)
dual_multiscale[ii] = np.fft.ifftshift(
np.fft.fftshift(dual_multiscale[ii]) / diagonal[ind])
return dual_multiscale
def nsgitf_real(c, c_dc, c_nyq, multiscale, shift):
"""
Nonstationary Inverse Gabor Transform on real valued signal
References
----------
<NAME>., <NAME>., <NAME>., <NAME>.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
<NAME>., <NAME>., <NAME>. and <NAME>.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : <NAME>, <NAME>, <NAME>, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
c_l = []
c_l.append(c_dc)
c_l.extend([ci for ci in c])
c_l.append(c_nyq)
posit = np.cumsum(shift)
seq_len = posit[-1]
posit -= shift[0]
out = np.zeros((seq_len,)).astype(c_l[1].dtype)
for ii in range(len(c_l)):
filt_len = len(multiscale[ii])
win_range = posit[ii] + np.arange(-np.floor(filt_len / 2),
np.ceil(filt_len / 2))
win_range = (win_range % seq_len).astype(np.int)
temp = np.fft.fft(c_l[ii]) * len(c_l[ii])
fs_new_bins = len(c_l[ii])
fk_bins = posit[ii]
displace = int(fk_bins - np.floor(fk_bins / fs_new_bins) * fs_new_bins)
temp = np.roll(temp, -displace)
l = np.arange(len(temp) - np.floor(filt_len / 2), len(temp))
r = np.arange(np.ceil(filt_len / 2))
temp_idx = (np.concatenate((l, r)) % len(temp)).astype(np.int)
temp = temp[temp_idx]
lf = np.arange(filt_len - np.floor(filt_len / 2), filt_len)
rf = np.arange(np.ceil(filt_len / 2))
filt_idx = np.concatenate((lf, rf)).astype(np.int)
m = multiscale[ii][filt_idx]
out[win_range] = out[win_range] + m * temp
nyq_bin = np.floor(seq_len / 2) + 1
out_idx = np.arange(
nyq_bin - np.abs(1 - seq_len % 2) - 1, 0, -1).astype(np.int)
out[nyq_bin:] = np.conj(out[out_idx])
t_out = np.real(np.fft.ifft(out)).astype(np.float64)
return t_out
def cqt(X, fs, n_bins=48, fmin=27.5, fmax="nyq", gamma=20):
"""
Constant Q Transform
References
----------
<NAME>., <NAME>., <NAME>., <NAME>.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
<NAME>., <NAME>., <NAME>. and <NAME>.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : <NAME>, <NAME>, <NAME>, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
if fmax == "nyq":
fmax = fs / 2.
multiscale, shift, window_lens = nsgcwin(fmin, fmax, n_bins, fs,
len(X), gamma)
fbas = fs * np.cumsum(shift[1:]) / len(X)
fbas = fbas[:len(window_lens) // 2 - 1]
bins = window_lens.shape[0] // 2 - 1
window_lens[1:bins + 1] = window_lens[bins + 2]
window_lens[bins + 2:] = window_lens[1:bins + 1][::-1]
norm = 2. * window_lens[:bins + 2] / float(len(X))
norm = np.concatenate((norm, norm[1:-1][::-1]))
multiscale = [norm[ii] * multiscale[ii] for ii in range(2 * (bins + 1))]
c = nsgtf_real(X, multiscale, shift, window_lens)
c_dc = c[0]
c_nyq = c[-1]
c_sub = c[1:-1]
c = np.vstack(c_sub)
return c, c_dc, c_nyq, multiscale, shift, window_lens
def icqt(X_cq, c_dc, c_nyq, multiscale, shift, window_lens):
"""
Inverse constant Q Transform
References
----------
<NAME>., <NAME>., <NAME>., <NAME>.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
<NAME>., <NAME>., <NAME>. and <NAME>.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : <NAME>, <NAME>, <NAME>, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
new_multiscale = nsdual(multiscale, shift, window_lens)
X = nsgitf_real(X_cq, c_dc, c_nyq, new_multiscale, shift)
return X
def rolling_mean(X, window_size):
w = 1.0 / window_size * np.ones((window_size))
return np.correlate(X, w, 'valid')
def rolling_window(X, window_size):
# for 1d data
shape = X.shape[:-1] + (X.shape[-1] - window_size + 1, window_size)
strides = X.strides + (X.strides[-1],)
return np.lib.stride_tricks.as_strided(X, shape=shape, strides=strides)
def voiced_unvoiced(X, window_size=256, window_step=128, copy=True):
"""
Voiced unvoiced detection from a raw signal
Based on code from:
https://www.clear.rice.edu/elec532/PROJECTS96/lpc/code.html
Other references:
http://www.seas.ucla.edu/spapl/code/harmfreq_MOLRT_VAD.m
Parameters
----------
X : ndarray
Raw input signal
window_size : int, optional (default=256)
The window size to use, in samples.
window_step : int, optional (default=128)
How far the window steps after each calculation, in samples.
copy : bool, optional (default=True)
Whether to make a copy of the input array or allow in place changes.
"""
X = np.array(X, copy=copy)
if len(X.shape) < 2:
X = X[None]
n_points = X.shape[1]
n_windows = n_points // window_step
# Padding
pad_sizes = [(window_size - window_step) // 2,
window_size - window_step // 2]
# TODO: Handling for odd window sizes / steps
X = np.hstack((np.zeros((X.shape[0], pad_sizes[0])), X,
np.zeros((X.shape[0], pad_sizes[1]))))
clipping_factor = 0.6
b, a = sg.butter(10, np.pi * 9 / 40)
voiced_unvoiced = np.zeros((n_windows, 1))
period = np.zeros((n_windows, 1))
for window in range(max(n_windows - 1, 1)):
XX = X.ravel()[window * window_step + np.arange(window_size)]
XX *= sg.hamming(len(XX))
XX = sg.lfilter(b, a, XX)
left_max = np.max(np.abs(XX[:len(XX) // 3]))
right_max = np.max(np.abs(XX[-len(XX) // 3:]))
clip_value = clipping_factor * np.min([left_max, right_max])
XX_clip = np.clip(XX, clip_value, -clip_value)
XX_corr = np.correlate(XX_clip, XX_clip, mode='full')
center = np.argmax(XX_corr)
right_XX_corr = XX_corr[center:]
prev_window = max([window - 1, 0])
if voiced_unvoiced[prev_window] > 0:
# Want it to be harder to turn off than turn on
strength_factor = .29
else:
strength_factor = .3
start = np.where(right_XX_corr < .3 * XX_corr[center])[0]
# 20 is hardcoded but should depend on samplerate?
try:
start = np.max([20, start[0]])
except IndexError:
start = 20
search_corr = right_XX_corr[start:]
index = np.argmax(search_corr)
second_max = search_corr[index]
if (second_max > strength_factor * XX_corr[center]):
voiced_unvoiced[window] = 1
period[window] = start + index - 1
else:
voiced_unvoiced[window] = 0
period[window] = 0
return np.array(voiced_unvoiced), np.array(period)
def lpc_analysis(X, order=8, window_step=128, window_size=2 * 128,
emphasis=0.9, voiced_start_threshold=.9,
voiced_stop_threshold=.6, truncate=False, copy=True):
"""
Extract LPC coefficients from a signal
Based on code from:
http://labrosa.ee.columbia.edu/matlab/sws/
_rParameters
----------
X : ndarray
Signals to extract LPC coefficients from
order : int, optional (default=8)
Order of the LPC coefficients. For speech, use the general rule that the
order is two times the expected number of formants plus 2.
This can be formulated as 2 + 2 * (fs // 2000). For approx. signals
with fs = 7000, this is 8 coefficients - 2 + 2 * (7000 // 2000).
window_step : int, optional (default=128)
The size (in samples) of the space between each window
window_size : int, optional (default=2 * 128)
The size of each window (in samples) to extract coefficients over
emphasis : float, optional (default=0.9)
The emphasis coefficient to use for filtering
voiced_start_threshold : float, optional (default=0.9)
Upper power threshold for estimating when speech has started
voiced_stop_threshold : float, optional (default=0.6)
Lower power threshold for estimating when speech has stopped
truncate : bool, optional (default=False)
Whether to cut the data at the last window or do zero padding.
copy : bool, optional (default=True)
Whether to copy the input X or modify in place
Returns
-------
lp_coefficients : ndarray
lp coefficients to describe the frame
per_frame_gain : ndarray
calculated gain for each frame
residual_excitation : ndarray
leftover energy which is not described by lp coefficents and gain
voiced_frames : ndarray
array of [0, 1] values which holds voiced/unvoiced decision for each
frame.
References
----------
<NAME> (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
X = np.array(X, copy=copy)
if len(X.shape) < 2:
X = X[None]
n_points = X.shape[1]
n_windows = int(n_points // window_step)
if not truncate:
pad_sizes = [(window_size - window_step) // 2,
window_size - window_step // 2]
# TODO: Handling for odd window sizes / steps
X = np.hstack((np.zeros((X.shape[0], int(pad_sizes[0]))), X,
np.zeros((X.shape[0], int(pad_sizes[1])))))
else:
pad_sizes = [0, 0]
X = X[0, :n_windows * window_step]
lp_coefficients = np.zeros((n_windows, order + 1))
per_frame_gain = np.zeros((n_windows, 1))
residual_excitation = np.zeros(
int(((n_windows - 1) * window_step + window_size)))
# Pre-emphasis high-pass filter
X = sg.lfilter([1, -emphasis], 1, X)
# stride_tricks.as_strided?
autocorr_X = np.zeros((n_windows, int(2 * window_size - 1)))
for window in range(max(n_windows - 1, 1)):
wtws = int(window * window_step)
XX = X.ravel()[wtws + np.arange(window_size, dtype="int32")]
WXX = XX * sg.hanning(window_size)
autocorr_X[window] = np.correlate(WXX, WXX, mode='full')
center = np.argmax(autocorr_X[window])
RXX = autocorr_X[window,
np.arange(center, window_size + order, dtype="int32")]
R = linalg.toeplitz(RXX[:-1])
solved_R = linalg.pinv(R).dot(RXX[1:])
filter_coefs = np.hstack((1, -solved_R))
residual_signal = sg.lfilter(filter_coefs, 1, WXX)
gain = np.sqrt(np.mean(residual_signal ** 2))
lp_coefficients[window] = filter_coefs
per_frame_gain[window] = gain
assign_range = wtws + np.arange(window_size, dtype="int32")
residual_excitation[assign_range] += residual_signal / gain
# Throw away first part in overlap mode for proper synthesis
residual_excitation = residual_excitation[int(pad_sizes[0]):]
return lp_coefficients, per_frame_gain, residual_excitation
def lpc_to_frequency(lp_coefficients, per_frame_gain):
"""
Extract resonant frequencies and magnitudes from LPC coefficients and gains.
Parameters
----------
lp_coefficients : ndarray
LPC coefficients, such as those calculated by ``lpc_analysis``
per_frame_gain : ndarray
Gain calculated for each frame, such as those calculated
by ``lpc_analysis``
Returns
-------
frequencies : ndarray
Resonant frequencies calculated from LPC coefficients and gain. Returned
frequencies are from 0 to 2 * pi
magnitudes : ndarray
Magnitudes of resonant frequencies
References
----------
<NAME> (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
n_windows, order = lp_coefficients.shape
frame_frequencies = np.zeros((n_windows, (order - 1) // 2))
frame_magnitudes = np.zeros_like(frame_frequencies)
for window in range(n_windows):
w_coefs = lp_coefficients[window]
g_coefs = per_frame_gain[window]
roots = np.roots(np.hstack(([1], w_coefs[1:])))
# Roots doesn't return the same thing as MATLAB... agh
frequencies, index = np.unique(
np.abs(np.angle(roots)), return_index=True)
# Make sure 0 doesn't show up...
gtz = np.where(frequencies > 0)[0]
frequencies = frequencies[gtz]
index = index[gtz]
magnitudes = g_coefs / (1. - np.abs(roots))
sort_index = np.argsort(frequencies)
frame_frequencies[window, :len(sort_index)] = frequencies[sort_index]
frame_magnitudes[window, :len(sort_index)] = magnitudes[sort_index]
return frame_frequencies, frame_magnitudes
def lpc_to_lsf(all_lpc):
if len(all_lpc.shape) < 2:
all_lpc = all_lpc[None]
order = all_lpc.shape[1] - 1
all_lsf = np.zeros((len(all_lpc), order))
for i in range(len(all_lpc)):
lpc = all_lpc[i]
lpc1 = np.append(lpc, 0)
lpc2 = lpc1[::-1]
sum_filt = lpc1 + lpc2
diff_filt = lpc1 - lpc2
if order % 2 != 0:
deconv_diff, _ = sg.deconvolve(diff_filt, [1, 0, -1])
deconv_sum = sum_filt
else:
deconv_diff, _ = sg.deconvolve(diff_filt, [1, -1])
deconv_sum, _ = sg.deconvolve(sum_filt, [1, 1])
roots_diff = np.roots(deconv_diff)
roots_sum = np.roots(deconv_sum)
angle_diff = np.angle(roots_diff[::2])
angle_sum = np.angle(roots_sum[::2])
lsf = np.sort(np.hstack((angle_diff, angle_sum)))
if len(lsf) != 0:
all_lsf[i] = lsf
return np.squeeze(all_lsf)
def lsf_to_lpc(all_lsf):
if len(all_lsf.shape) < 2:
all_lsf = all_lsf[None]
order = all_lsf.shape[1]
all_lpc = np.zeros((len(all_lsf), order + 1))
for i in range(len(all_lsf)):
lsf = all_lsf[i]
zeros = np.exp(1j * lsf)
sum_zeros = zeros[::2]
diff_zeros = zeros[1::2]
sum_zeros = np.hstack((sum_zeros,
|
np.conj(sum_zeros)
|
numpy.conj
|
"""
Utilities for MLP object.
Copyright (C) 2017, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import abc
import numpy as np
import theano
import theano.tensor as T
def _linear(x):
"""Linear activation. Do nothing on the input."""
return x
# Possible activation for the hidden units.
ACTIVATIONS = {
'softmax': T.nnet.softmax,
'sigmoid': T.nnet.sigmoid,
'tanh': T.tanh,
'relu': T.nnet.relu,
'linear': _linear
}
class MLPError(Exception):
"""Base class for exceptions in this module."""
pass
class UnkownActivationError(MLPError):
"""Raised when the given activation is not known."""
def __init__(self, activation):
self.activation = str(activation)
def __str__(self):
return '"' + self.activation + '" is not one of the pre-defined " \
"activations: "' + '", "'.join(ACTIVATIONS.keys()) + '"'
def _init_weights_matrix(dim_in, dim_out, activation, borrow=True):
val = np.sqrt(6. / (dim_in + dim_out))
if activation == 'sigmoid':
retval = 4 * np.random.uniform(low=-val, high=val,
size=(dim_in, dim_out))
elif activation == 'tanh':
retval = np.random.uniform(low=-val, high=val,
size=(dim_in, dim_out))
elif (activation == 'relu' or activation == 'linear' or
activation == 'softmax'):
retval = np.random.normal(0., 0.01, size=(dim_in, dim_out))
else:
raise UnkownActivationError(activation)
return theano.shared(np.asarray(retval, dtype=theano.config.floatX),
borrow=borrow)
def init_residual_weights_matrix(dim_in, dim_out, borrow=True):
"""Partial isometry initialization."""
if dim_out == dim_in:
weights = np.identity(dim_in)
else:
d = max(dim_in, dim_out)
weights = np.linalg.qr(np.random.randn(d,d))[0][:dim_in,:dim_out]
return theano.shared(np.asarray(weights, dtype=theano.config.floatX),
borrow=borrow)
def _init_bias(dim, borrow=True):
return theano.shared(np.zeros(dim, dtype=theano.config.floatX) + .01,
borrow=borrow)
class LogisticRegressionLayer(object):
def __init__(self, inputs, dim_in, dim_out, activation):
self.inputs = inputs
self.dim_in = dim_in
self.dim_out = dim_out
weights = _init_weights_matrix(dim_in, dim_out, activation)
bias = _init_bias(dim_out)
self.outputs = ACTIVATIONS[activation](T.dot(inputs, weights) + bias)
self.params = [weights, bias]
class StdLayer(object):
def __init__(self, inputs, dim_in, dim_out, activation):
self.inputs = inputs
self.dim_in = dim_in
self.dim_out = dim_out
self.activation = activation
weights = _init_weights_matrix(dim_in, dim_out, activation)
bias = _init_bias(dim_out)
self.outputs = ACTIVATIONS[activation](T.dot(inputs, weights) + bias)
self.params = [weights, bias]
class GaussianLayer(object):
def __init__(self, inputs, dim_in, dim_out, activation):
self.inputs = inputs
self.dim_in = dim_in
self.dim_out = dim_out
self.activation = activation
shared_layer = StdLayer(inputs, dim_in, 2 * dim_out, activation)
self.mean, raw_logvar = \
theano.tensor.split(shared_layer.outputs, [dim_out, dim_out], 2,
axis=-1)
#self.var = T.log(1 + T.exp(raw_logvar))
self.var = T.exp(raw_logvar)
self.params = shared_layer.params
self.outputs = self.mean
# Possible layer types.
LAYER_TYPES = {
'standard': StdLayer,
'gaussian': GaussianLayer
}
class NeuralNetwork(object):
def __init__(self, structure, residuals, inputs=None):
if inputs is None:
self.inputs = T.matrix(dtype=theano.config.floatX)
else:
self.inputs = inputs
# Build the neural network.
self.layers = []
self.params = []
current_inputs = self.inputs
for layer_type, dim_in, dim_out, activation in structure:
self.layers.append(LAYER_TYPES[layer_type](current_inputs, dim_in,
dim_out, activation))
self.params += self.layers[-1].params
current_inputs = self.layers[-1].outputs
# Add the residual connections.
for residual_in, residual_out in residuals:
dim_in = self.layers[residual_in].dim_in
dim_out = self.layers[residual_out].dim_out
weights = init_residual_weights_matrix(dim_in, dim_out)
self.params += [weights]
self.layers[residual_out].outputs += \
T.dot(self.layers[residual_in].inputs, weights)
self.outputs = self.layers[-1].outputs
class MLP(NeuralNetwork):
def __init__(self, structure, residuals, inputs):
NeuralNetwork.__init__(self, structure, residuals, inputs)
self.log_pred = T.log(self.layers[-1].outputs)
# Build the functions.
self.forward = theano.function(
inputs=[self.inputs],
outputs=[self.log_pred]
)
class GaussianNeuralNetwork(NeuralNetwork):
def __init__(self, structure, residuals, inputs=None):
NeuralNetwork.__init__(self, structure, residuals, inputs)
self.mean = self.layers[-1].outputs
self.var = self.layers[-1].var
# Noise variable for the reparameterization trick.
if "gpu" in theano.config.device:
srng = theano.sandbox.cuda.rng_curand.CURAND_RandomStreams()
else:
srng = T.shared_randomstreams.RandomStreams()
self.eps = srng.normal(self.mean.shape)
# Latent variable.
self.sample = self.mean + T.sqrt(self.var) * self.eps
# Build the functions.
self.forward = theano.function(
inputs=[self.inputs],
outputs=[self.mean, self.var]
)
class MLPold(metaclass=abc.ABCMeta):
"""Base class for MLP objects."""
@staticmethod
def init_layer_params(dim_in, dim_out, scale):
"""Initialize a weights matrix and bias vector."""
#weights = np.random.randn(dim_in, dim_out)
weights = np.random.uniform(
low=-np.sqrt(6. / (dim_in + dim_out)),
high=np.sqrt(6. / (dim_in + dim_out)),
size=(dim_in, dim_out)
)
bias = np.zeros(dim_out)
return [scale * weights, bias]
@staticmethod
def forward(params, activation, data):
"""Forward an input matrix through the Gaussian residual MLP."""
inputs = data
for idx in range(0, len(params), 2):
weights = params[idx]
bias = params[idx + 1]
outputs = np.dot(inputs, weights) + bias
inputs = activation(outputs)
return inputs
class GaussianMLP(MLP):
"""Static implementation of a Gaussian residual MLP."""
@staticmethod
def create(dim_in, dim_out, dim_h, n_layers, scale, precision):
"""Create a Gaussian residual MLP."""
params = MLP.init_layer_params(dim_in, dim_h, scale)
for idx in range(n_layers - 1):
params += MLP.init_layer_params(dim_h, dim_h, scale)
params += MLP.init_layer_params(dim_h, 2 * dim_out, scale)
# Initialize the precision.
params[-1][dim_out:] -= np.log(precision)
return params
@staticmethod
def extract_params(params):
"""Extract the different part of the Gaussian MLP."""
return [params[-2:], params[:-2]]
@staticmethod
def forward(params, activation, data):
"""Forward an input matrix through the Gaussian residual MLP."""
linear_params, h_params = GaussianMLP.extract_params(params)
inputs = MLP.forward(h_params, activation, data)
outputs = np.dot(inputs, linear_params[0]) + linear_params[1]
mean, logvar = np.split(outputs, 2, axis=-1)
#var = np.log(1 + np.exp(logvar))
var = np.exp(logvar)
return mean, var
@staticmethod
def natural_params(mean, var):
np1 = - 1 / (2 * var)
np2 = mean / var
return np1, np2
@staticmethod
def std_params(np1, np2):
var = -1 / (2 * np1)
mean = np2 * var
return mean, var
class GaussianResidualMLP(GaussianMLP):
"""Static implementation of a Gaussian residual MLP."""
@staticmethod
def init_residual_params(dim_in, dim_out):
"""Partial isometry initialization."""
if dim_out == dim_in:
return [np.identity(dim_in)]
d = max(dim_in, dim_out)
weights = np.linalg.qr(np.random.randn(d,d))[0][:dim_in,:dim_out]
return [weights]
@staticmethod
def create(dim_in, dim_out, dim_h, n_layers, scale, precision):
"""Create a Gaussian residual MLP."""
params = GaussianMLP.create(dim_in, dim_out, dim_h, n_layers, scale,
precision)
#params += GaussianResidualMLP.init_residual_params(dim_in, dim_out)
return params
@staticmethod
def forward(params, activation, data):
"""Forward an input matrix through the Gaussian residual MLP."""
#gauss_params, res_params = params[:-1], params[-1]
gauss_params = params
mean, var = GaussianMLP.forward(gauss_params, activation, data)
#mean = mean + np.dot(data, res_params)
mean = mean + data
return mean, var
@staticmethod
def _kl_div(mean_post, var_post, mean_prior, var_prior):
"""KL divergence between the posterior and the prior."""
kl_div = (.5 * (mean_prior - mean_post)**2) / var_prior
ratio = var_post / var_prior
kl_div = kl_div + .5 * (ratio - 1 - np.log(ratio))
return
|
np.sum(kl_div, axis=1)
|
numpy.sum
|
import emcee
from multiprocessing import Pool
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
from pylcurve.lcurve import Lcurve
from pylcurve.modelling import Model
import pylcurve.mcmc_utils as m
import pylcurve.utils as utils
from pylcurve.filters import filters
"""
This script fits flux calibrated, multi-band primary eclipse photometry of
WD-WD/WD-dM binaries using an MCMC method to run Tom Marsh's LROCHE routine.
Using mass-radius relations it can determine stellar masses and effective
temperatures for both components.
All typical user modified variables are denoted by "ENTER" for ease.
"""
# ENTER filter system of observations
# cam = filters('sdss') ## SDSS throughputs
# cam = filters('ucam_sloan') ## ULTRACAM throughputs with standard sloan filters
cam = filters('ucam') ## ULTRACAM throughputs with super filters
# cam = filters('hcam') ## HiPERCAM throughputs with super filters
class EclipseLC(Model):
parameter_names = ('t1', 't2', 'm1', 'm2', 'incl', 't0', 'per', 'parallax')
def __init__(self, model_file, lightcurves, *args, **kwargs):
"""
A lightcurve model for an eclipsing WD-WD/WD-dM.
Parameters
----------
model_file: model containing LCURVE file with auxillary (fixed) params
lightcurves: a dictionary of band: filename pairs
The remaining parameters are either passed in as a list of arguments
(in order) or specified as a dictionary:
t1, t2 : white dwarf/M-dwarf temp in K
m1, m2 : white dwarf/M-dwarf masses in solar masses
incl : inclination of system
t0 : mid-eclipse time of primary eclipse
per : orbital period of system
parallax : parallax of system
"""
super().__init__(*args, **kwargs)
self.lightcurves = lightcurves
self.model_file = model_file
def get_value(self, band):
"""
Calculate lightcurve
Parameters
----------
band : string
SDSS/HiPERCAM bandpass
Returns
-------
ym : np.ndarray
model values
"""
# setup LCURVE file for this band
lcurve_model = Lcurve(self.model_file)
lcurve_pars = dict()
q = self.m2/self.m1
# ENTER chosen mass-radius relations for both stars
self.r1 = utils.get_radius(self.m1, self.t1, star_type='CO')
self.r2 = utils.get_radius(self.m2, self.t2, star_type='MS')
log_g1 = utils.log_g(self.m1, self.r1)
log_g2 = utils.log_g(self.m2, self.r2)
a = utils.separation(self.m1, self.m2, self.per)
# ENTER interstellar redenning/extinction
ebv = 0.05
Av = 3.1 * ebv
scale_factor = utils.scalefactor(a, self.parallax, cam.eff_wl[band], Av)
lcurve_pars['t1'] = utils.get_Tbb(self.t1, log_g1, band, star_type='WD',
source='Bergeron')
lcurve_pars['t2'] = utils.get_Tbb(self.t2, log_g2, band, star_type='MS')
lcurve_pars['r1'] = self.r1/a # scale to separation units
lcurve_pars['r2'] = utils.Rva_to_Rl1(q, self.r2/a) # scale and correct
lcurve_pars['t0'] = self.t0
lcurve_pars['period'] = self.per
lcurve_pars['iangle'] = self.incl
lcurve_pars['q'] = q
lcurve_pars['wavelength'] = cam.eff_wl[band].to_value(u.nm)
lcurve_pars['phase1'] = np.arcsin(lcurve_pars['r1'] - lcurve_pars['r2']) / (2 * np.pi)
lcurve_pars['phase2'] = 0.5 - lcurve_pars['phase1']
lcurve_model.set(lcurve_pars)
lcurve_model.set(utils.get_ldcs(self.t1, logg_1=log_g1, band=band,
star_type_1='WD', teff_2=self.t2,
logg_2=log_g2, star_type_2='MS'))
if not lcurve_model.ok():
raise ValueError('invalid parameter combination')
_, _, _, ym = lcurve_model(self.lightcurves[band], scale_factor)
return ym
def log_prior(self):
"""
Prior probabilities
"""
# first call parent class log_prior -> checks params in bounds
val = super().log_prior()
if np.isinf(val):
return val
# ENTER prior on parallax from Gaia (NN Ser)
par_prior = m.Prior('gaussPos', 1.9166, 0.0980)
val += par_prior.ln_prob(self.parallax)
# ENTER prior on T0
prior = m.Prior('gauss', 55307.400302182999, 1.3524578e-06)
val += prior.ln_prob(self.t0)
# ENTER prior on period
prior = m.Prior('gauss', 0.13008017141, 0.00000000017)
val += prior.ln_prob(self.per)
return val
def plot(self, ax, band, params, style='whole', dcolor='k'):
"""
Plots data and model on axis
style is either whole, model or residuals.
'whole' plots the raw data and the full model (mean model + GP).
'model' plots the mean model and the data after subtraction
of the mean of the GP - i.e the data minus the pulsations
'residuals' plots the data minus the mean model, together with the
mean and range of the GP
"""
self.set_parameter_vector(params)
t, _, y, ye, _, _ = np.loadtxt(self.lightcurves[band]).T
ym = self.get_value(band)
toff = int(np.floor(
|
np.min(t)
|
numpy.min
|
from __future__ import print_function
from numpy.core.records import array
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import copy
import numpy as np
import math
from sparselearning.igq import get_igq_sparsities
import pdb
class CosineDecay(object):
def __init__(self, death_rate, T_max, eta_min=0.005, last_epoch=-1):
self.sgd = optim.SGD(torch.nn.ParameterList([torch.nn.Parameter(torch.zeros(1))]), lr=death_rate)
self.cosine_stepper = torch.optim.lr_scheduler.CosineAnnealingLR(self.sgd, T_max, eta_min, last_epoch)
def step(self):
self.cosine_stepper.step()
def get_dr(self, death_rate):
return self.sgd.param_groups[0]['lr']
class LinearDecay(object):
def __init__(self, death_rate, factor=0.99, frequency=600):
self.factor = factor
self.steps = 0
self.frequency = frequency
def step(self):
self.steps += 1
def get_dr(self, death_rate):
if self.steps > 0 and self.steps % self.frequency == 0:
return death_rate*self.factor
else:
return death_rate
class Masking(object):
def __init__(self, optimizer, death_rate=0.3, growth_death_ratio=1.0, death_rate_decay=None, death_mode='magnitude', growth_mode='momentum', redistribution_mode='momentum', args=None, spe_initial=None, train_loader=None):
growth_modes = ['random', 'momentum', 'momentum_neuron', 'gradient']
if growth_mode not in growth_modes:
print('Growth mode: {0} not supported!'.format(growth_mode))
print('Supported modes are:', str(growth_modes))
self.args = args
self.loader = train_loader
self.device = torch.device("cuda")
self.growth_mode = growth_mode
self.death_mode = death_mode
self.redistribution_mode = redistribution_mode
self.death_rate_decay = death_rate_decay
self.spe_initial = spe_initial # initial masks made by SNIP
self.snip_masks = None # masks made by SNIP during training
self.masks = {}
self.newly_masks = {}
self.survival = {}
self.pruned_number = {}
self.modules = []
self.names = []
self.optimizer = optimizer
# stats
self.name2zeros = {}
self.name2nonzeros = {}
self.death_rate = death_rate
self.name2death_rate = {}
self.steps = 0
#dst sparse
self.start_epoch_dst = 0
self.current_epoch = 0
self.prune_ratio = self.args.prune_ratio
self.growth_ratio = self.args.growth_ratio
self.lossinfo = []
self.train_val_diff = []
self.update_threshold = self.args.update_threshold
self.prune_epochs = []
self.growth_epochs = []
self.total_fired_weights = 0
self.init_mask = None
def init(self, mode='ER', density=0.05, erk_power_scale=1.0):
self.sparsity = density
if mode == 'uniform':
index = 0
for module in self.modules:
for name, weight in module.named_parameters():
name_cur = name + '_' + str(index)
index += 1
if name_cur not in self.masks: continue
self.masks[name_cur][:] = (torch.rand(weight.shape) < density).float().data.cuda()
elif mode == 'fixed_ERK':
print('initialize by fixed_ERK')
total_params = 0
for name, weight in self.masks.items():
total_params += weight.numel()
is_epsilon_valid = False
dense_layers = set()
while not is_epsilon_valid:
divisor = 0
rhs = 0
raw_probabilities = {}
for name, mask in self.masks.items():
n_param = np.prod(mask.shape)
n_zeros = n_param * (1 - density)
n_ones = n_param * density
if name in dense_layers:
# See `- default_sparsity * (N_3 + N_4)` part of the equation above.
rhs -= n_zeros
else:
# Corresponds to `(1 - default_sparsity) * (N_1 + N_2)` part of the
# equation above.
rhs += n_ones
# Erdos-Renyi probability: epsilon * (n_in + n_out / n_in * n_out).
raw_probabilities[name] = (
np.sum(mask.shape) / np.prod(mask.shape)
) ** erk_power_scale
# Note that raw_probabilities[mask] * n_param gives the individual
# elements of the divisor.
divisor += raw_probabilities[name] * n_param
# By multipliying individual probabilites with epsilon, we should get the
# number of parameters per layer correctly.
epsilon = rhs / divisor
# If epsilon * raw_probabilities[mask.name] > 1. We set the sparsities of that
# mask to 0., so they become part of dense_layers sets.
max_prob = np.max(list(raw_probabilities.values()))
max_prob_one = max_prob * epsilon
if max_prob_one > 1:
is_epsilon_valid = False
for mask_name, mask_raw_prob in raw_probabilities.items():
if mask_raw_prob == max_prob:
print(f"Sparsity of var:{mask_name} had to be set to 0.")
dense_layers.add(mask_name)
else:
is_epsilon_valid = True
density_dict = {}
total_nonzero = 0.0
# With the valid epsilon, we can set sparsities of the remaning layers.
for name, mask in self.masks.items():
n_param = np.prod(mask.shape)
if name in dense_layers:
density_dict[name] = 1.0
else:
probability_one = epsilon * raw_probabilities[name]
density_dict[name] = probability_one
print(
f"layer: {name}, shape: {mask.shape}, density: {density_dict[name]}"
)
self.masks[name][:] = (torch.rand(mask.shape) < density_dict[name]).float().data.cuda()
total_nonzero += density_dict[name] * mask.numel()
print(f"Overall sparsity {total_nonzero / total_params}")
elif mode == 'ER':
print('initialize by SET')
# initialization used in sparse evolutionary training
total_params = 0
index = 0
for module in self.modules:
for name, weight in module.named_parameters():
name_cur = name + '_' + str(index)
index += 1
if name_cur not in self.masks: continue
total_params += weight.numel()
target_params = total_params *density
tolerance = 5
current_params = 0
new_nonzeros = 0
epsilon = 10.0
growth_factor = 0.5
# searching for the right epsilon for a specific sparsity level
while not ((current_params+tolerance > target_params) and (current_params-tolerance < target_params)):
new_nonzeros = 0.0
index = 0
for name, weight in module.named_parameters():
name_cur = name + '_' + str(index)
index += 1
if name_cur not in self.masks: continue
# original SET formulation for fully connected weights: num_weights = epsilon * (noRows + noCols)
# we adapt the same formula for convolutional weights
growth = epsilon*sum(weight.shape)
new_nonzeros += growth
current_params = new_nonzeros
if current_params > target_params:
epsilon *= 1.0 - growth_factor
else:
epsilon *= 1.0 + growth_factor
growth_factor *= 0.95
index = 0
for name, weight in module.named_parameters():
name_cur = name + '_' + str(index)
index += 1
if name_cur not in self.masks: continue
growth = epsilon*sum(weight.shape)
prob = growth/np.prod(weight.shape)
self.masks[name_cur][:] = (torch.rand(weight.shape) < prob).float().data.cuda()
# random igq
elif mode == 'igq':
model = self.modules[0]
sparsities = get_igq_sparsities(model, density)
print(sparsities)
it = iter(sparsities)
index = 0
for name, weight in model.named_parameters():
name_cur = name + '_' + str(index)
index += 1
if name_cur not in self.masks: continue
mask = torch.ones(weight.shape)
ind = np.random.choice(range(np.prod(mask.shape)),
size=int(next(it)*np.prod(mask.shape)), replace=False)
mask.reshape(-1)[ind] = 0.
self.masks[name_cur][:] = mask.float().data.cuda()
self.apply_mask()
self.fired_masks = copy.deepcopy(self.masks) # used for over-paremeters
self.init_death_rate(self.death_rate)
self.gather_statistics()
self.print_nonzero_counts()
total_size = 0
for name, weight in self.masks.items():
total_size += weight.numel()
print('Total Model parameters:', total_size)
sparse_size = 0
for name, weight in self.masks.items():
sparse_size += (weight != 0).sum().int().item()
print('Total parameters under sparsity level of {0}: {1}'.format(density, sparse_size / total_size))
def get_info_resume(self):
info = {
'lossinfo': self.lossinfo,
'train_val_diff': self.train_val_diff,
'prune_epochs': self.prune_epochs,
'growth_epochs': self.growth_epochs,
'masks': self.masks,
'name2death_rate': self.name2death_rate,
'decay': (self.death_rate_decay.sgd.state_dict(), self.death_rate_decay.cosine_stepper.state_dict()),
}
return info
def load_info_resume(self, info):
self.lossinfo = info['lossinfo']
self.train_val_diff = info['train_val_diff']
self.prune_epochs = info['prune_epochs']
self.growth_epochs = info['growth_epochs']
self.masks = info['masks']
self.name2death_rate = info['name2death_rate']
self.death_rate_decay.sgd.load_state_dict(info['decay'][0])
self.death_rate_decay.cosine_stepper.load_state_dict(info['decay'][1])
def init_death_rate(self, death_rate):
for name in self.masks:
self.name2death_rate[name] = death_rate
def at_end_of_epoch(self):
self.truncate_weights()
_, total_fired_weights = self.fired_masks_update()
self.total_fired_weights = total_fired_weights
self.print_nonzero_counts()
def step(self):
self.optimizer.step()
self.apply_mask()
self.death_rate_decay.step()
for name in self.masks:
if self.args.decay_schedule == 'cosine':
self.name2death_rate[name] = self.death_rate_decay.get_dr(self.name2death_rate[name])
elif self.args.decay_schedule == 'constant':
self.name2death_rate[name] = self.args.death_rate
self.death_rate = self.name2death_rate[name]
self.steps += 1
def add_module(self, module, density, sparse_init='ER'):
self.modules.append(module)
index = 0
for name, tensor in module.named_parameters():
name_cur = name + '_' + str(index)
index += 1
if len(tensor.size()) ==4 or len(tensor.size()) ==2:
self.names.append(name_cur)
self.masks[name_cur] = torch.zeros_like(tensor, dtype=torch.float32, requires_grad=False).cuda()
print('Removing biases...')
self.remove_weight_partial_name('bias')
self.init(mode=sparse_init, density=density)
def remove_weight(self, name, index):
if name in self.masks:
print('Removing {0} of size {1} = {2} parameters.'.format(name, self.masks[name].shape,
self.masks[name].numel()))
def remove_weight_partial_name(self, partial_name):
removed = set()
for name in list(self.masks.keys()):
if partial_name in name:
print('Removing {0} of size {1} with {2} parameters...'.format(name, self.masks[name].shape,
np.prod(self.masks[name].shape)))
removed.add(name)
self.masks.pop(name)
print('Removed {0} layers.'.format(len(removed)))
i = 0
while i < len(self.names):
name = self.names[i]
if name in removed:
self.names.pop(i)
else:
i += 1
def remove_type(self, nn_type):
index = 0
for module in self.modules:
for name, module in module.named_modules():
print(name)
if isinstance(module, nn_type):
self.remove_weight(name, index)
index += 1
def apply_mask(self):
index = 0
for module in self.modules:
for name, weight in module.named_parameters():
name_cur = name+'_'+str(index)
index += 1
if name_cur in self.masks:
weight.data = weight.data*self.masks[name_cur]
if 'momentum_buffer' in self.optimizer.state[weight]:
self.optimizer.state[weight]['momentum_buffer'] = self.optimizer.state[weight]['momentum_buffer']*self.masks[name_cur]
##################### Flying Bird+ sparse ################
def set_dst_start_epoch(self, epoch):
self.start_epoch_dst = epoch
def set_dst_current_epoch(self, epoch):
self.current_epoch = epoch
def update_loss_info(self, loss):
self.lossinfo.append(loss)
def update_train_val_diff(self, train_ra, val_ra):
self.train_val_diff.append(train_ra - val_ra)
# self.train_val_diff.append(val_ra)
def clear_dst_info(self):
l0 = self.lossinfo[-1]
l1 = self.train_val_diff[-1]
self.lossinfo.clear()
self.train_val_diff.clear()
self.lossinfo.append(l0)
self.train_val_diff.append(l1)
def get_dst_ratio(self):
r = self.args.epoch_range + 1
# growth_ratio
l0 = self.lossinfo[-r:][:-1]
l1 = self.lossinfo[-r:][1:]
diff1 = np.array(l1) -
|
np.array(l0)
|
numpy.array
|
import json
import os
import pathlib
import shutil
import subprocess
import numpy as np
import pytest
import yaml
from zntrack import zn
from zntrack.core.base import Node
@pytest.fixture
def proj_path(tmp_path):
shutil.copy(__file__, tmp_path)
os.chdir(tmp_path)
subprocess.check_call(["git", "init"])
subprocess.check_call(["dvc", "init"])
return tmp_path
class SingleNode(Node):
param1 = zn.params()
def __init__(self, param1=None, **kwargs):
super().__init__(**kwargs)
self.param1 = param1
def test_pathlib_param(proj_path):
"""Test serialized data as parameters"""
SingleNode(param1=pathlib.Path("test_file.json")).save()
assert (
yaml.safe_load(pathlib.Path("params.yaml").read_text())["SingleNode"]["param1"]
== "test_file.json"
)
assert json.loads(pathlib.Path("zntrack.json").read_text())["SingleNode"] == {
"param1": {"_type": "pathlib.Path"}
}
assert SingleNode.load().param1 == pathlib.Path("test_file.json")
def test_small_numpy_param(proj_path):
SingleNode(param1=np.arange(4)).save()
assert yaml.safe_load(pathlib.Path("params.yaml").read_text())["SingleNode"][
"param1"
] == [0, 1, 2, 3]
assert json.loads(pathlib.Path("zntrack.json").read_text())["SingleNode"] == {
"param1": {"_type": "np.ndarray_small"}
}
np.testing.assert_array_equal(SingleNode.load().param1,
|
np.arange(4)
|
numpy.arange
|
import numpy as np
# Gotran generated code for the "grandi" model
def init_state_values_single(**values):
"""
Initialize state values
"""
# Init values
# m=0.003793087414436, h=0.626221949492493, j=0.624553572490432,
# x_kr=0.0210022533039071, x_ks=0.00428016666258923,
# x_to_s=0.000440445885642567, y_to_s=0.785115828275182,
# x_to_f=0.000440438103758954, y_to_f=0.999995844038706,
# d=2.92407183949469e-06, f=0.995135796703515,
# f_Ca_Bj=0.0246760872105795, f_Ca_Bsl=0.0152723084239416,
# Ry_Rr=0.890806040818203, Ry_Ro=7.40481128853622e-07,
# Ry_Ri=9.07666168960848e-08, Na_Bj=3.4543773303328,
# Na_Bsl=0.753740951477775, Tn_CL=0.00893455096919132,
# Tn_CHc=0.117412025936615, Tn_CHm=0.0106160166692932,
# CaM=0.000295573424135051, Myo_c=0.00192322252438022,
# Myo_m=0.137560495022823, SRB=0.00217360235649355,
# SLL_j=0.00740524521680039, SLL_sl=0.00990339304377132,
# SLH_j=0.0735890020284214, SLH_sl=0.114583623436917,
# Csqn_b=1.19723145924432, Ca_sr=0.554760499828172,
# Na_j=8.40537012592918, Na_sl=8.40491910001025, Na_i=8.40513364344858,
# K_i=120, Ca_j=0.000175882395147342, Ca_sl=0.000106779509977354,
# Ca_i=8.72509677797499e-05, V_m=-81.4552030512661
init_values = np.array(
[
0.003793087414436,
0.626221949492493,
0.624553572490432,
0.0210022533039071,
0.00428016666258923,
0.000440445885642567,
0.785115828275182,
0.000440438103758954,
0.999995844038706,
2.92407183949469e-06,
0.995135796703515,
0.0246760872105795,
0.0152723084239416,
0.890806040818203,
7.40481128853622e-07,
9.07666168960848e-08,
3.4543773303328,
0.753740951477775,
0.00893455096919132,
0.117412025936615,
0.0106160166692932,
0.000295573424135051,
0.00192322252438022,
0.137560495022823,
0.00217360235649355,
0.00740524521680039,
0.00990339304377132,
0.0735890020284214,
0.114583623436917,
1.19723145924432,
0.554760499828172,
8.40537012592918,
8.40491910001025,
8.40513364344858,
120,
0.000175882395147342,
0.000106779509977354,
8.72509677797499e-05,
-81.4552030512661,
],
dtype=np.float_,
)
# State indices and limit checker
state_ind = dict(
[
("m", 0),
("h", 1),
("j", 2),
("x_kr", 3),
("x_ks", 4),
("x_to_s", 5),
("y_to_s", 6),
("x_to_f", 7),
("y_to_f", 8),
("d", 9),
("f", 10),
("f_Ca_Bj", 11),
("f_Ca_Bsl", 12),
("Ry_Rr", 13),
("Ry_Ro", 14),
("Ry_Ri", 15),
("Na_Bj", 16),
("Na_Bsl", 17),
("Tn_CL", 18),
("Tn_CHc", 19),
("Tn_CHm", 20),
("CaM", 21),
("Myo_c", 22),
("Myo_m", 23),
("SRB", 24),
("SLL_j", 25),
("SLL_sl", 26),
("SLH_j", 27),
("SLH_sl", 28),
("Csqn_b", 29),
("Ca_sr", 30),
("Na_j", 31),
("Na_sl", 32),
("Na_i", 33),
("K_i", 34),
("Ca_j", 35),
("Ca_sl", 36),
("Ca_i", 37),
("V_m", 38),
]
)
for state_name, value in values.items():
if state_name not in state_ind:
raise ValueError("{0} is not a state.".format(state_name))
ind = state_ind[state_name]
# Assign value
init_values[ind] = value
return init_values
def init_parameter_values_single(**values):
"""
Initialize parameter values
"""
# Param values
# Fjunc=0.11, Fjunc_CaL=0.9, cellLength=100, cellRadius=10.25,
# distJuncSL=0.5, distSLcyto=0.45, junctionLength=0.16,
# junctionRadius=0.015, GNa=23, GNaB=0.000597, IbarNaK=1.8,
# KmKo=1.5, KmNaip=11, Q10KmNai=1.39, Q10NaK=1.63, GKr=0.035,
# GKp=0.002, GKs=0.0035, pNaK=0.01833, GK1=0.35, Gto=0.13, epi=1,
# GClB=0.009, GClCa=0.0548125, KdClCa=0.1, GCaL=0.5, Q10CaL=1.8,
# pCa=0.00054, pK=2.7e-07, pNa=1.5e-08, IbarNCX=4.5, Kdact=0.00015,
# KmCai=0.00359, KmCao=1.3, KmNai=12.29, KmNao=87.5, Q10NCX=1.57,
# ksat=0.32, nu=0.27, IbarSLCaP=0.0673, KmPCa=0.0005,
# Q10SLCaP=2.35, GCaB=0.0005513, Kmf=0.000246, Kmr=1.7, MaxSR=15,
# MinSR=1, Q10SRCaP=2.6, Vmax_SRCaP=0.0053114, ec50SR=0.45,
# hillSRCaP=1.787, kiCa=0.5, kim=0.005, koCa=10, kom=0.06, ks=25,
# Bmax_Naj=7.561, Bmax_Nasl=1.65, koff_na=0.001, kon_na=0.0001,
# Bmax_CaM=0.024, Bmax_SR=0.0171, Bmax_TnChigh=0.14,
# Bmax_TnClow=0.07, Bmax_myosin=0.14, koff_cam=0.238,
# koff_myoca=0.00046, koff_myomg=5.7e-05, koff_sr=0.06,
# koff_tnchca=3.2e-05, koff_tnchmg=0.00333, koff_tncl=0.0196,
# kon_cam=34, kon_myoca=13.8, kon_myomg=0.0157, kon_sr=100,
# kon_tnchca=2.37, kon_tnchmg=0.003, kon_tncl=32.7,
# Bmax_SLhighj0=0.000165, Bmax_SLhighsl0=0.0134, Bmax_SLlowj0=0.00046,
# Bmax_SLlowsl0=0.0374, koff_slh=0.03, koff_sll=1.3, kon_slh=100,
# kon_sll=100, Bmax_Csqn0=0.14, DcaJuncSL=1.64e-06,
# DcaSLcyto=1.22e-06, J_ca_juncsl=8.2413e-13, J_ca_slmyo=3.7243e-12,
# koff_csqn=65, kon_csqn=100, DnaJuncSL=1.09e-05, DnaSLcyto=1.79e-05,
# J_na_juncsl=1.8313e-14, J_na_slmyo=1.6386e-12, Nao=140, Ko=5.4,
# Cao=1.8, Cli=15, Clo=150, Mgi=1, Cmem=1.381e-10, Frdy=96485,
# R=8314, Temp=310, stim_amplitude=40.0, stim_duration=1.0,
# stim_period=1000.0, stim_start=0.0
init_values = np.array(
[
0.11,
0.9,
100,
10.25,
0.5,
0.45,
0.16,
0.015,
23,
0.000597,
1.8,
1.5,
11,
1.39,
1.63,
0.035,
0.002,
0.0035,
0.01833,
0.35,
0.13,
1,
0.009,
0.0548125,
0.1,
0.5,
1.8,
0.00054,
2.7e-07,
1.5e-08,
4.5,
0.00015,
0.00359,
1.3,
12.29,
87.5,
1.57,
0.32,
0.27,
0.0673,
0.0005,
2.35,
0.0005513,
0.000246,
1.7,
15,
1,
2.6,
0.0053114,
0.45,
1.787,
0.5,
0.005,
10,
0.06,
25,
7.561,
1.65,
0.001,
0.0001,
0.024,
0.0171,
0.14,
0.07,
0.14,
0.238,
0.00046,
5.7e-05,
0.06,
3.2e-05,
0.00333,
0.0196,
34,
13.8,
0.0157,
100,
2.37,
0.003,
32.7,
0.000165,
0.0134,
0.00046,
0.0374,
0.03,
1.3,
100,
100,
0.14,
1.64e-06,
1.22e-06,
8.2413e-13,
3.7243e-12,
65,
100,
1.09e-05,
1.79e-05,
1.8313e-14,
1.6386e-12,
140,
5.4,
1.8,
15,
150,
1,
1.381e-10,
96485,
8314,
310,
40.0,
1.0,
1000.0,
0.0,
],
dtype=np.float_,
)
# Parameter indices and limit checker
param_ind = dict(
[
("Fjunc", 0),
("Fjunc_CaL", 1),
("cellLength", 2),
("cellRadius", 3),
("distJuncSL", 4),
("distSLcyto", 5),
("junctionLength", 6),
("junctionRadius", 7),
("GNa", 8),
("GNaB", 9),
("IbarNaK", 10),
("KmKo", 11),
("KmNaip", 12),
("Q10KmNai", 13),
("Q10NaK", 14),
("GKr", 15),
("GKp", 16),
("GKs", 17),
("pNaK", 18),
("GK1", 19),
("Gto", 20),
("epi", 21),
("GClB", 22),
("GClCa", 23),
("KdClCa", 24),
("GCaL", 25),
("Q10CaL", 26),
("pCa", 27),
("pK", 28),
("pNa", 29),
("IbarNCX", 30),
("Kdact", 31),
("KmCai", 32),
("KmCao", 33),
("KmNai", 34),
("KmNao", 35),
("Q10NCX", 36),
("ksat", 37),
("nu", 38),
("IbarSLCaP", 39),
("KmPCa", 40),
("Q10SLCaP", 41),
("GCaB", 42),
("Kmf", 43),
("Kmr", 44),
("MaxSR", 45),
("MinSR", 46),
("Q10SRCaP", 47),
("Vmax_SRCaP", 48),
("ec50SR", 49),
("hillSRCaP", 50),
("kiCa", 51),
("kim", 52),
("koCa", 53),
("kom", 54),
("ks", 55),
("Bmax_Naj", 56),
("Bmax_Nasl", 57),
("koff_na", 58),
("kon_na", 59),
("Bmax_CaM", 60),
("Bmax_SR", 61),
("Bmax_TnChigh", 62),
("Bmax_TnClow", 63),
("Bmax_myosin", 64),
("koff_cam", 65),
("koff_myoca", 66),
("koff_myomg", 67),
("koff_sr", 68),
("koff_tnchca", 69),
("koff_tnchmg", 70),
("koff_tncl", 71),
("kon_cam", 72),
("kon_myoca", 73),
("kon_myomg", 74),
("kon_sr", 75),
("kon_tnchca", 76),
("kon_tnchmg", 77),
("kon_tncl", 78),
("Bmax_SLhighj0", 79),
("Bmax_SLhighsl0", 80),
("Bmax_SLlowj0", 81),
("Bmax_SLlowsl0", 82),
("koff_slh", 83),
("koff_sll", 84),
("kon_slh", 85),
("kon_sll", 86),
("Bmax_Csqn0", 87),
("DcaJuncSL", 88),
("DcaSLcyto", 89),
("J_ca_juncsl", 90),
("J_ca_slmyo", 91),
("koff_csqn", 92),
("kon_csqn", 93),
("DnaJuncSL", 94),
("DnaSLcyto", 95),
("J_na_juncsl", 96),
("J_na_slmyo", 97),
("Nao", 98),
("Ko", 99),
("Cao", 100),
("Cli", 101),
("Clo", 102),
("Mgi", 103),
("Cmem", 104),
("Frdy", 105),
("R", 106),
("Temp", 107),
("stim_amplitude", 108),
("stim_duration", 109),
("stim_period", 110),
("stim_start", 111),
]
)
for param_name, value in values.items():
if param_name not in param_ind:
raise ValueError("{0} is not a parameter.".format(param_name))
ind = param_ind[state_name]
# Assign value
init_values[ind] = value
return init_values
def state_indices(*states):
"""
State indices
"""
state_inds = dict(
[
("m", 0),
("h", 1),
("j", 2),
("x_kr", 3),
("x_ks", 4),
("x_to_s", 5),
("y_to_s", 6),
("x_to_f", 7),
("y_to_f", 8),
("d", 9),
("f", 10),
("f_Ca_Bj", 11),
("f_Ca_Bsl", 12),
("Ry_Rr", 13),
("Ry_Ro", 14),
("Ry_Ri", 15),
("Na_Bj", 16),
("Na_Bsl", 17),
("Tn_CL", 18),
("Tn_CHc", 19),
("Tn_CHm", 20),
("CaM", 21),
("Myo_c", 22),
("Myo_m", 23),
("SRB", 24),
("SLL_j", 25),
("SLL_sl", 26),
("SLH_j", 27),
("SLH_sl", 28),
("Csqn_b", 29),
("Ca_sr", 30),
("Na_j", 31),
("Na_sl", 32),
("Na_i", 33),
("K_i", 34),
("Ca_j", 35),
("Ca_sl", 36),
("Ca_i", 37),
("V_m", 38),
]
)
indices = []
for state in states:
if state not in state_inds:
raise ValueError("Unknown state: '{0}'".format(state))
indices.append(state_inds[state])
if len(indices) > 1:
return indices
else:
return indices[0]
def parameter_indices(*params):
"""
Parameter indices
"""
param_inds = dict(
[
("Fjunc", 0),
("Fjunc_CaL", 1),
("cellLength", 2),
("cellRadius", 3),
("distJuncSL", 4),
("distSLcyto", 5),
("junctionLength", 6),
("junctionRadius", 7),
("GNa", 8),
("GNaB", 9),
("IbarNaK", 10),
("KmKo", 11),
("KmNaip", 12),
("Q10KmNai", 13),
("Q10NaK", 14),
("GKr", 15),
("GKp", 16),
("GKs", 17),
("pNaK", 18),
("GK1", 19),
("Gto", 20),
("epi", 21),
("GClB", 22),
("GClCa", 23),
("KdClCa", 24),
("GCaL", 25),
("Q10CaL", 26),
("pCa", 27),
("pK", 28),
("pNa", 29),
("IbarNCX", 30),
("Kdact", 31),
("KmCai", 32),
("KmCao", 33),
("KmNai", 34),
("KmNao", 35),
("Q10NCX", 36),
("ksat", 37),
("nu", 38),
("IbarSLCaP", 39),
("KmPCa", 40),
("Q10SLCaP", 41),
("GCaB", 42),
("Kmf", 43),
("Kmr", 44),
("MaxSR", 45),
("MinSR", 46),
("Q10SRCaP", 47),
("Vmax_SRCaP", 48),
("ec50SR", 49),
("hillSRCaP", 50),
("kiCa", 51),
("kim", 52),
("koCa", 53),
("kom", 54),
("ks", 55),
("Bmax_Naj", 56),
("Bmax_Nasl", 57),
("koff_na", 58),
("kon_na", 59),
("Bmax_CaM", 60),
("Bmax_SR", 61),
("Bmax_TnChigh", 62),
("Bmax_TnClow", 63),
("Bmax_myosin", 64),
("koff_cam", 65),
("koff_myoca", 66),
("koff_myomg", 67),
("koff_sr", 68),
("koff_tnchca", 69),
("koff_tnchmg", 70),
("koff_tncl", 71),
("kon_cam", 72),
("kon_myoca", 73),
("kon_myomg", 74),
("kon_sr", 75),
("kon_tnchca", 76),
("kon_tnchmg", 77),
("kon_tncl", 78),
("Bmax_SLhighj0", 79),
("Bmax_SLhighsl0", 80),
("Bmax_SLlowj0", 81),
("Bmax_SLlowsl0", 82),
("koff_slh", 83),
("koff_sll", 84),
("kon_slh", 85),
("kon_sll", 86),
("Bmax_Csqn0", 87),
("DcaJuncSL", 88),
("DcaSLcyto", 89),
("J_ca_juncsl", 90),
("J_ca_slmyo", 91),
("koff_csqn", 92),
("kon_csqn", 93),
("DnaJuncSL", 94),
("DnaSLcyto", 95),
("J_na_juncsl", 96),
("J_na_slmyo", 97),
("Nao", 98),
("Ko", 99),
("Cao", 100),
("Cli", 101),
("Clo", 102),
("Mgi", 103),
("Cmem", 104),
("Frdy", 105),
("R", 106),
("Temp", 107),
("stim_amplitude", 108),
("stim_duration", 109),
("stim_period", 110),
("stim_start", 111),
]
)
indices = []
for param in params:
if param not in param_inds:
raise ValueError("Unknown param: '{0}'".format(param))
indices.append(param_inds[param])
if len(indices) > 1:
return indices
else:
return indices[0]
def monitor_indices(*monitored):
"""
Monitor indices
"""
monitor_inds = dict(
[
("Vcell", 0),
("Vmyo", 1),
("Vsr", 2),
("Vsl", 3),
("Vjunc", 4),
("SAjunc", 5),
("SAsl", 6),
("Fsl", 7),
("Fsl_CaL", 8),
("mss", 9),
("taum", 10),
("ah", 11),
("bh", 12),
("tauh", 13),
("hss", 14),
("aj", 15),
("bj", 16),
("tauj", 17),
("jss", 18),
("I_Na_junc", 19),
("I_Na_sl", 20),
("I_Na", 21),
("I_nabk_junc", 22),
("I_nabk_sl", 23),
("I_nabk", 24),
("sigma", 25),
("fnak", 26),
("I_nak_junc", 27),
("I_nak_sl", 28),
("I_nak", 29),
("gkr", 30),
("xrss", 31),
("tauxr", 32),
("rkr", 33),
("I_kr", 34),
("kp_kp", 35),
("I_kp_junc", 36),
("I_kp_sl", 37),
("I_kp", 38),
("eks", 39),
("gks_junc", 40),
("gks_sl", 41),
("xsss", 42),
("tauxs", 43),
("I_ks_junc", 44),
("I_ks_sl", 45),
("I_ks", 46),
("GtoSlow", 47),
("GtoFast", 48),
("xtoss", 49),
("ytoss", 50),
("tauxtos", 51),
("tauytos", 52),
("I_tos", 53),
("tauxtof", 54),
("tauytof", 55),
("I_tof", 56),
("I_to", 57),
("I_ClCa_junc", 58),
("I_ClCa_sl", 59),
("I_ClCa", 60),
("I_Clbk", 61),
("fss", 62),
("dss", 63),
("taud", 64),
("tauf", 65),
("fcaCaMSL", 66),
("fcaCaj", 67),
("ibarca_j", 68),
("ibarca_sl", 69),
("ibark", 70),
("ibarna_j", 71),
("ibarna_sl", 72),
("I_Ca_junc", 73),
("I_Ca_sl", 74),
("I_Ca", 75),
("I_CaK", 76),
("I_CaNa_junc", 77),
("I_CaNa_sl", 78),
("I_CaNa", 79),
("I_Catot", 80),
("Ka_junc", 81),
("Ka_sl", 82),
("s1_junc", 83),
("s1_sl", 84),
("s2_junc", 85),
("s3_junc", 86),
("s2_sl", 87),
("s3_sl", 88),
("I_ncx_junc", 89),
("I_ncx_sl", 90),
("I_ncx", 91),
("I_pca_junc", 92),
("I_pca_sl", 93),
("I_pca", 94),
("I_cabk_junc", 95),
("I_cabk_sl", 96),
("I_cabk", 97),
("kCaSR", 98),
("koSRCa", 99),
("kiSRCa", 100),
("RI", 101),
("J_SRCarel", 102),
("J_serca", 103),
("J_SRleak", 104),
("J_CaB_cytosol", 105),
("Bmax_SLlowsl", 106),
("Bmax_SLlowj", 107),
("Bmax_SLhighsl", 108),
("Bmax_SLhighj", 109),
("J_CaB_junction", 110),
("J_CaB_sl", 111),
("Bmax_Csqn", 112),
("I_Na_tot_junc", 113),
("I_Na_tot_sl", 114),
("I_Na_tot_sl2", 115),
("I_Na_tot_junc2", 116),
("I_K_tot", 117),
("I_Ca_tot_junc", 118),
("I_Ca_tot_sl", 119),
("i_Stim", 120),
("I_Na_tot", 121),
("I_Cl_tot", 122),
("I_Ca_tot", 123),
("I_tot", 124),
("FoRT", 125),
("ena_junc", 126),
("ena_sl", 127),
("ek", 128),
("eca_junc", 129),
("eca_sl", 130),
("ecl", 131),
("Qpow", 132),
("aki", 133),
("bki", 134),
("kiss", 135),
("I_K1", 136),
("dm_dt", 137),
("dh_dt", 138),
("dj_dt", 139),
("dx_kr_dt", 140),
("dx_ks_dt", 141),
("dx_to_s_dt", 142),
("dy_to_s_dt", 143),
("dx_to_f_dt", 144),
("dy_to_f_dt", 145),
("dd_dt", 146),
("df_dt", 147),
("df_Ca_Bj_dt", 148),
("df_Ca_Bsl_dt", 149),
("dRy_Rr_dt", 150),
("dRy_Ro_dt", 151),
("dRy_Ri_dt", 152),
("dNa_Bj_dt", 153),
("dNa_Bsl_dt", 154),
("dTn_CL_dt", 155),
("dTn_CHc_dt", 156),
("dTn_CHm_dt", 157),
("dCaM_dt", 158),
("dMyo_c_dt", 159),
("dMyo_m_dt", 160),
("dSRB_dt", 161),
("dSLL_j_dt", 162),
("dSLL_sl_dt", 163),
("dSLH_j_dt", 164),
("dSLH_sl_dt", 165),
("dCsqn_b_dt", 166),
("dCa_sr_dt", 167),
("dNa_j_dt", 168),
("dNa_sl_dt", 169),
("dNa_i_dt", 170),
("dK_i_dt", 171),
("dCa_j_dt", 172),
("dCa_sl_dt", 173),
("dCa_i_dt", 174),
("dV_m_dt", 175),
]
)
indices = []
for monitor in monitored:
if monitor not in monitor_inds:
raise ValueError("Unknown monitored: '{0}'".format(monitor))
indices.append(monitor_inds[monitor])
if len(indices) > 1:
return indices
else:
return indices[0]
def rhs(states, t, parameters, values=None):
"""
Compute the right hand side of the grandi ODE
"""
# Assign states
assert len(states) == 39
(
m,
h,
j,
x_kr,
x_ks,
x_to_s,
y_to_s,
x_to_f,
y_to_f,
d,
f,
f_Ca_Bj,
f_Ca_Bsl,
Ry_Rr,
Ry_Ro,
Ry_Ri,
Na_Bj,
Na_Bsl,
Tn_CL,
Tn_CHc,
Tn_CHm,
CaM,
Myo_c,
Myo_m,
SRB,
SLL_j,
SLL_sl,
SLH_j,
SLH_sl,
Csqn_b,
Ca_sr,
Na_j,
Na_sl,
Na_i,
K_i,
Ca_j,
Ca_sl,
Ca_i,
V_m,
) = states
# Assign parameters
assert len(parameters) == 112
Fjunc = parameters[0]
Fjunc_CaL = parameters[1]
cellLength = parameters[2]
cellRadius = parameters[3]
GNa = parameters[8]
GNaB = parameters[9]
IbarNaK = parameters[10]
KmKo = parameters[11]
KmNaip = parameters[12]
GKr = parameters[15]
GKp = parameters[16]
GKs = parameters[17]
pNaK = parameters[18]
GK1 = parameters[19]
Gto = parameters[20]
epi = parameters[21]
GClB = parameters[22]
GClCa = parameters[23]
KdClCa = parameters[24]
GCaL = parameters[25]
Q10CaL = parameters[26]
pCa = parameters[27]
pK = parameters[28]
pNa = parameters[29]
IbarNCX = parameters[30]
Kdact = parameters[31]
KmCai = parameters[32]
KmCao = parameters[33]
KmNai = parameters[34]
KmNao = parameters[35]
Q10NCX = parameters[36]
ksat = parameters[37]
nu = parameters[38]
IbarSLCaP = parameters[39]
KmPCa = parameters[40]
Q10SLCaP = parameters[41]
GCaB = parameters[42]
Kmf = parameters[43]
Kmr = parameters[44]
MaxSR = parameters[45]
MinSR = parameters[46]
Q10SRCaP = parameters[47]
Vmax_SRCaP = parameters[48]
ec50SR = parameters[49]
hillSRCaP = parameters[50]
kiCa = parameters[51]
kim = parameters[52]
koCa = parameters[53]
kom = parameters[54]
ks = parameters[55]
Bmax_Naj = parameters[56]
Bmax_Nasl = parameters[57]
koff_na = parameters[58]
kon_na = parameters[59]
Bmax_CaM = parameters[60]
Bmax_SR = parameters[61]
Bmax_TnChigh = parameters[62]
Bmax_TnClow = parameters[63]
Bmax_myosin = parameters[64]
koff_cam = parameters[65]
koff_myoca = parameters[66]
koff_myomg = parameters[67]
koff_sr = parameters[68]
koff_tnchca = parameters[69]
koff_tnchmg = parameters[70]
koff_tncl = parameters[71]
kon_cam = parameters[72]
kon_myoca = parameters[73]
kon_myomg = parameters[74]
kon_sr = parameters[75]
kon_tnchca = parameters[76]
kon_tnchmg = parameters[77]
kon_tncl = parameters[78]
Bmax_SLhighj0 = parameters[79]
Bmax_SLhighsl0 = parameters[80]
Bmax_SLlowj0 = parameters[81]
Bmax_SLlowsl0 = parameters[82]
koff_slh = parameters[83]
koff_sll = parameters[84]
kon_slh = parameters[85]
kon_sll = parameters[86]
Bmax_Csqn0 = parameters[87]
J_ca_juncsl = parameters[90]
J_ca_slmyo = parameters[91]
koff_csqn = parameters[92]
kon_csqn = parameters[93]
J_na_juncsl = parameters[96]
J_na_slmyo = parameters[97]
Nao = parameters[98]
Ko = parameters[99]
Cao = parameters[100]
Cli = parameters[101]
Clo = parameters[102]
Mgi = parameters[103]
Cmem = parameters[104]
Frdy = parameters[105]
R = parameters[106]
Temp = parameters[107]
stim_amplitude = parameters[108]
stim_duration = parameters[109]
stim_period = parameters[110]
stim_start = parameters[111]
# Init return args
if values is None:
values = np.zeros_like(states)
else:
assert isinstance(values, np.ndarray) and values.shape == states.shape
# Expressions for the Geometry component
Vcell = 1e-15 * np.pi * cellLength * (cellRadius * cellRadius)
Vmyo = 0.65 * Vcell
Vsr = 0.035 * Vcell
Vsl = 0.02 * Vcell
Vjunc = 0.0005390000000000001 * Vcell
Fsl = 1 - Fjunc
Fsl_CaL = 1 - Fjunc_CaL
# Expressions for the Reversal potentials component
FoRT = Frdy / (R * Temp)
ena_junc = np.log(Nao / Na_j) / FoRT
ena_sl = np.log(Nao / Na_sl) / FoRT
ek = np.log(Ko / K_i) / FoRT
eca_junc = np.log(Cao / Ca_j) / (2 * FoRT)
eca_sl = np.log(Cao / Ca_sl) / (2 * FoRT)
ecl = np.log(Cli / Clo) / FoRT
Qpow = -31 + Temp / 10
# Expressions for the I_Na component
mss = 1.0 / (
(1 + 0.0018422115811651339 * np.exp(-0.1107419712070875 * V_m))
* (1 + 0.0018422115811651339 * np.exp(-0.1107419712070875 * V_m))
)
taum = 0.1292 * np.exp(
-(
(2.9465894465894467 + 0.06435006435006435 * V_m)
* (2.9465894465894467 + 0.06435006435006435 * V_m)
)
) + 0.06487 * np.exp(
-(
(-0.09434663536776214 + 0.019561815336463225 * V_m)
* (-0.09434663536776214 + 0.019561815336463225 * V_m)
)
)
ah = np.where(
V_m >= -40, 0, 4.4312679295805147e-07 * np.exp(-0.14705882352941177 * V_m)
)
bh = np.where(
V_m >= -40,
0.77 / (0.13 + 0.049758141083938695 * np.exp(-0.0900900900900901 * V_m)),
310000.0 * np.exp(0.3485 * V_m) + 2.7 * np.exp(0.079 * V_m),
)
tauh = 1.0 / (ah + bh)
hss = 1.0 / (
(1 + 15212.593285654404 * np.exp(0.13458950201884254 * V_m))
* (1 + 15212.593285654404 * np.exp(0.13458950201884254 * V_m))
)
aj = np.where(
V_m >= -40,
0,
(37.78 + V_m)
* (-25428.0 * np.exp(0.2444 * V_m) - 6.948e-06 * np.exp(-0.04391 * V_m))
/ (1 + 50262745825.95399 * np.exp(0.311 * V_m)),
)
bj = np.where(
V_m >= -40,
0.6 * np.exp(0.057 * V_m) / (1 + 0.040762203978366204 * np.exp(-0.1 * V_m)),
0.02424
*
|
np.exp(-0.01052 * V_m)
|
numpy.exp
|
import numpy as np
from matplotlib import pyplot as plt
import keyboard
size = 15 # random map generator
mapa = [[list(np.random.uniform(0, 1, 3))] * size for i in range(size)]
mapah = [[1] * size for i in range(size)]
for i in range(size-2):
for j in range(size-2):
mapah[i+1][j+1] = np.random.choice([0.3, 0.4, 0.7, 1])
if np.random.uniform() > 0.33:
mapa[i+1][j+1] = 0
posx, posy, posz = (1, np.random.randint(1, size -1), 0.5)
rot, rot_v = (np.pi/4, 0)
x, y, z = (posx, posy, posz)
mapa[x][y] = 0
count = 0
while True:
testx, testy = (x, y)
if np.random.uniform() > 0.5:
testx = testx + np.random.choice([-1, 1])
else:
testy = testy + np.random.choice([-1, 1])
if testx > 0 and testx < size -1 and testy > 0 and testy < size -1:
if mapa[testx][testy] == 0 or count > 5:
count = 0
x, y = (testx, testy)
mapa[x][y] = 0
if x == size-2:
exitx, exity = (x, y)
break
else:
count = count+1
mod = 1 # resolution modifier
inc = 0.05/mod # ray increment
height = int(48*mod)
width = int(60*mod)
while True: #main game loop
pixels = []
for j in range(height): #vertical loop
pixels.append([])
rot_j = np.deg2rad(24 + rot_v - j/mod)
for i in range(width): #horizontal vision loop
rot_i = rot + np.deg2rad(i/mod - 30)
x, y, z = (posx, posy, posz)
sin, cos, = (inc*np.sin(rot_i), inc*np.cos(rot_i))
sinz = inc*np.sin(rot_j)
n = 0
while True: # ray loop
x, y, z = (x + cos, y + sin, z + sinz)
n = n+1
if mapa[int(x)][int(y)] != 0 and z <= mapah[int(x)][int(y)]:
h = np.clip(1/(inc * n), 0, 1)
c = np.asarray(mapa[int(x)][int(y)])*(0.3 + 0.7 * h**2)
pixels[j].append(c)
break
elif z > 1: # ceiling
h = 0.3 + 0.7*np.clip(1/(inc * n), 0, 1)**2
if int(x*5)%2 ==1:
pixels[j].append(np.asarray([.8,1,.9])*h)
else:
pixels[j].append(np.asarray([0.5,0.5,1])*h)
break
elif z < 0: # floor
h = 0.3 + 0.7*np.clip(1/(inc * n), 0, 1)**2
if int(x) == exitx and int(y) == exity:
pixels[j].append(np.asarray([0,0,1]))
elif int(x*2)%2 == int(y*2)%2:
pixels[j].append(np.asarray([.3,.1,.1])*h)
else:
pixels[j].append(np.asarray([.8,.8,.5])*h)
break
plt.imshow(pixels)
plt.axis('off'); plt.tight_layout()
plt.draw(); plt.pause(0.0001); plt.clf()
# player's movement
key = keyboard.read_key()
x, y = (posx, posy)
if key == 'up':
x, y = (x + 0.3*
|
np.cos(rot)
|
numpy.cos
|
from __future__ import print_function
from six.moves import range
import numpy as np
from collections import defaultdict
def state_lifetimes_counts(transition_count_matrix_l,
n, nwin):
"""
Calculate lifetimes in each of the states (for each run/window)
Parameters:
-----------
transition_count_matrix_l: list of arrays
List of arrays with transition count matrices. One array for
each run/windows.
n: int
Number of (structural) states
nwin: int
Number of simulations runs/windows. I.e., how many umbrella
winodws were run.
Returns:
--------
t_ar: array_like
Array, n x nwin, where n is number of states,
nwin is number of windows with aggregate lifetimes in the states.
"""
#n = len(transition_count_matrix_l[0][:,0])
#nwin = len(transition_count_matrix_l)
t_ar = np.zeros((n,nwin), dtype=np.float64)
for iwin, win in enumerate(transition_count_matrix_l):
# sum over the column gives all counts in a state
t_ar[:, iwin] = np.sum(win, axis=0)
return t_ar
def total_transition_counts(transition_count_matrix_l, n):
"""
Parameters:
-----------
transition_count_matrix_l: list of arrays
Returns:
--------
nn_ar: array_like, total transitions j->i
"""
#n = len(transition_count_matrix_l[0][:,0])
nn_ar = np.zeros((n,n))
# do j=1,n
# do i=1,n
# nn(i,j)=0.d0
# do iwin=1,nwin
# nn(i,j)=nn(i,j)+nij(i,j,iwin)
# enddo
# enddo
for j in range(n):
for i in range(n):
for iwin, win in enumerate(transition_count_matrix_l):
nn_ar[i,j] += win[i,j]
return nn_ar
def counts_in_out(transition_count_matrix_l, n, nwin):
"""
Parameters:
-----------
transition_count_matrix_l: list of arrays
List of arrays with transition count matrices. One array for
each run/windows.
n: int
Number of (structural) states
nwin: int
Number of simulations runs/windows. I.e., how many umbrella
winodws were run.
Returns:
--------
n_in: array_like
Array length n. Total number of transitions into state i.
n_out: array_like
Array length n. Total number of transitions out of state j.
"""
n_in =
|
np.zeros(n)
|
numpy.zeros
|
"""
Module that extracts product values from pyclass results.
All functions assume the proleptic Gregorian ordinal,
where January 1 of year 1 has ordinal 1.
"""
import numpy as np
trans_class = 9
def sort_models(class_result):
"""
Sorts the classification results if order is in question.
Args:
class_result: list of dicts return from pyclass classification
Returns:
sorted list
"""
if len(class_result) == 1:
return class_result
idxs = np.argsort([m['start_day'] for m in class_result])
return [class_result[i] for i in idxs]
def class_primary(class_result, ordinal):
"""
Identify the primary cover class for the given ordinal day. The days
between and around segments identified through the classification
process comprise valid segments for this. They also receive differing
values based on if they occur at the beginning/end or if they are
between classified segments.
<- .... |--seg 1--| ...... |--seg 2--| .... ->
0 val trans_val val 0
Args:
class_result: ordered list of dicts return from pyclass classification
ordinal: ordinal day to calculate from
Returns:
int
"""
ret = 0
if ordinal > 0:
prev_end = 0
for segment in class_result:
if segment['start_day'] <= ordinal <= segment['end_day']:
ret = segment['class_vals'][
|
np.argmax(segment['class_probs'][0])
|
numpy.argmax
|
# -*- coding: utf-8 -*-
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import struct
import time
import datetime as dt
import numpy as np
import mne
from mne.io import RawArray
from mne import create_info
def read_sef(filename):
"""
Reads file with format .sef, and returns a mne.io.Raw object containing
the data.
Parameters
----------
filename : str or file-like
The Simple EEG (.sef) file to read.
Returns
-------
raw : mne.io.RawArray
RawArray containing the EEG signals.
"""
f = open(filename, 'rb')
# Read fixed part of the headerà
version = f.read(4).decode('utf-8')
if version != 'SE01':
print(f'Version : {version} not supported')
raise ValueError()
n_channels, = struct.unpack('I', f.read(4))
num_aux_electrodes, = struct.unpack('I', f.read(4))
num_time_frames, = struct.unpack('I', f.read(4))
sfreq, = struct.unpack('f', f.read(4))
year, = struct.unpack('H', f.read(2))
month, = struct.unpack('H', f.read(2))
day, = struct.unpack('H', f.read(2))
hour, = struct.unpack('H', f.read(2))
minute, = struct.unpack('H', f.read(2))
second, = struct.unpack('H', f.read(2))
millisecond, = struct.unpack('H', f.read(2))
# Read variable part of the header
ch_names = []
for _ in range(n_channels):
name = [char for char in f.read(8).split(b'\x00')
if char != b''][0]
ch_names.append(name.decode('utf-8').strip())
# Read data
buffer = np.frombuffer(
f.read(n_channels * num_time_frames * 8),
dtype=np.float32,
count=n_channels * num_time_frames)
data =
|
np.reshape(buffer, (num_time_frames, n_channels))
|
numpy.reshape
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for coefficient-wise operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import gen_math_ops
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x ** y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
_NEG = lambda x: -x
_ABS = abs
_LT = lambda x, y: x < y
_LE = lambda x, y: x <= y
_GT = lambda x, y: x > y
_GE = lambda x, y: x >= y
_AND = lambda x, y: x & y
_OR = lambda x, y: x | y
_XOR = lambda x, y: x ^ y
_INV = lambda x: ~x
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return tf.SparseTensor(
indices=x_indices, values=x_values, shape=x_shape), x_values
class UnaryOpTest(tf.test.TestCase):
def _compareCpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(use_gpu=False):
inx = tf.convert_to_tensor(x)
if x.dtype in (np.float32, np.float64):
y = 1.1 * tf_func(inx)
np_ans *= 1.1
else:
y = tf_func(inx)
tf_cpu = y.eval()
self.assertShapeEqual(np_ans, y)
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_cpu, rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(np_ans, tf_cpu)
if x.dtype in (np.complex64, np.complex128) and tf_func == tf.sign:
return # Return early
if x.dtype == np.float16:
s = list(np.shape(x))
jacob_t, _ = tf.test.compute_gradient(inx,
s,
y,
s,
x_init_value=x)
xf = x.astype(np.float)
inxf = tf.convert_to_tensor(xf)
yf = tf_func(inxf)
_, jacob_n = tf.test.compute_gradient(inxf,
s,
yf,
s,
x_init_value=xf)
jacob_n = jacob_n.astype(np.float16)
self.assertAllClose(jacob_t, jacob_n, rtol=5e-3, atol=5e-3)
elif x.dtype in (np.float32, np.complex64):
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
y,
s,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype in (np.float64, np.complex128):
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
y,
s,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _check(self, result_tensor, result_np, input_sp_t, tol):
self.assertTrue(isinstance(result_tensor, tf.SparseTensor))
self.assertTrue(isinstance(input_sp_t, tf.SparseTensor))
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
self.assertAllEqual(input_sp_t.shape.eval(), result_tensor.shape.eval())
if tol is None:
self.assertAllClose(result_np, result_tensor.values.eval())
else:
self.assertAllClose(result_np, result_tensor.values.eval(), rtol=tol,
atol=tol)
def _compareSparseCpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
res_np = np_func(x_sp_vals)
with self.test_session(use_gpu=False):
self._check(tf_func(x_sp), res_np, x_sp, tol)
def _compareGpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(use_gpu=True):
result = tf_func(tf.convert_to_tensor(x))
tf_gpu = result.eval()
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_gpu, rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(np_ans, tf_gpu)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareSparseGpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
res_np = np_func(x_sp_vals)
with self.test_session(use_gpu=True):
self._check(tf_func(x_sp), res_np, x_sp, tol)
def _compareBoth(self, x, np_func, tf_func):
self._compareCpu(x, np_func, tf_func)
self._compareGpu(x, np_func, tf_func)
def _compareBothSparse(self, x, np_func, tf_func, tol=None):
self._compareSparseCpu(x, np_func, tf_func, tol)
self._compareSparseGpu(x, np_func, tf_func, tol)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _sigmoid(self, x):
return 1.0 / (1.0 + np.exp(-x))
def _replace_domain_error_with_inf(self, fn):
def func(x):
try:
return fn(x)
except ValueError as e:
if "domain error" in str(e):
return np.inf * np.ones_like(x)
else:
raise e
return func
def testFloatBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
y = (x + .5).astype(np.float32) # no zero
z = (x + 15.5).astype(np.float32) # all positive
k = np.arange(-0.90, 0.90, 0.25).astype(np.float32) # between -1 and 1
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(z, np.sqrt, tf.sqrt)
self._compareBoth(z, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(z, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(y, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
self._compareBoth(k, np.arcsin, tf.asin)
self._compareBoth(k, np.arccos, tf.acos)
self._compareBoth(x, np.arctan, tf.atan)
self._compareBoth(x, np.tan, tf.tan)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
tf.lgamma)
self._compareBoth(x, np.vectorize(math.erf), tf.erf)
self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(y, np.sign, tf.sign)
self._compareBothSparse(x, np.vectorize(math.erf), tf.erf)
def testFloatTanhEdge(self):
x = np.arange(40, 40 + 6).reshape(6).astype(np.float32)
self._compareBoth(x, np.tanh, tf.tanh)
x = np.arange(-40, -40 + 6).reshape(6).astype(np.float32)
self._compareBoth(x, np.tanh, tf.tanh)
def testFloatEmpty(self):
x = np.empty((2, 0, 5), dtype=np.float32)
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(x, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(x, np.sqrt, tf.sqrt)
self._compareBoth(x, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(x, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(x, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
# Can't use vectorize below, so just use some arbitrary function
self._compareBoth(x, np.sign, tf.lgamma)
self._compareBoth(x, np.sign, tf.erf)
self._compareBoth(x, np.sign, tf.erfc)
self._compareBoth(x, np.tan, tf.tan)
self._compareBoth(x, np.arcsin, tf.asin)
self._compareBoth(x, np.arccos, tf.acos)
self._compareBoth(x, np.arctan, tf.atan)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(x, np.sign, tf.sign)
self._compareBothSparse(x, np.sign, tf.erf)
def testDoubleBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)
y = (x + .5).astype(np.float64) # no zero
z = (x + 15.5).astype(np.float64) # all positive
k = np.arange(-0.90, 0.90, 0.35).reshape(1, 3, 2).astype(np.float64) # between -1 and 1
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(z, np.sqrt, tf.sqrt)
self._compareBoth(z, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(z, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(y, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
tf.lgamma)
self._compareBoth(x, np.vectorize(math.erf), tf.erf)
self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
self._compareBoth(x, np.arctan, tf.atan)
self._compareBoth(k, np.arcsin, tf.asin)
self._compareBoth(k, np.arccos, tf.acos)
self._compareBoth(k, np.tan, tf.tan)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(y, np.sign, tf.sign)
self._compareBothSparse(x, np.vectorize(math.erf), tf.erf)
def testHalfBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float16)
y = (x + .5).astype(np.float16) # no zero
z = (x + 15.5).astype(np.float16) # all positive
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(z, np.sqrt, tf.sqrt)
self._compareBoth(z, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(z, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(y, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
tf.lgamma)
self._compareBoth(x, np.vectorize(math.erf), tf.erf)
self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(y, np.sign, tf.sign)
self._compareBothSparse(x, np.vectorize(math.erf), tf.erf, tol=1e-3)
def testInt32Basic(self):
x = np.arange(-6, 6, 2).reshape(1, 3, 2).astype(np.int32)
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(x, np.square, tf.square)
self._compareCpu(x, np.sign, tf.sign)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sign, tf.sign)
def testInt64Basic(self):
x = np.arange(
-6 << 40, 6 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, tf.neg)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(x, np.square, tf.square)
self._compareCpu(x, np.sign, tf.sign)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sign, tf.sign)
def testComplex64Basic(self):
x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(
np.complex64)
y = x + 0.5 # no zeros
self._compareCpu(x, np.abs, tf.complex_abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, tf.neg)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(y, self._inv, tf.inv)
self._compareCpu(x, np.square, tf.square)
self._compareCpu(y, np.sqrt, tf.sqrt)
self._compareCpu(y, self._rsqrt, tf.rsqrt)
self._compareCpu(x, np.exp, tf.exp)
self._compareCpu(y, np.log, tf.log)
self._compareCpu(x, np.tanh, tf.tanh)
self._compareCpu(x, self._sigmoid, tf.sigmoid)
self._compareCpu(x, np.sin, tf.sin)
self._compareCpu(x, np.cos, tf.cos)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, 1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
# Numpy uses an incorrect definition of sign; use the right one instead.
def complex_sign(x):
return x / np.abs(x)
self._compareCpu(y, complex_sign, tf.sign)
self._compareBothSparse(y, complex_sign, tf.sign)
def testComplex128Basic(self):
x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(
np.complex128)
y = x + 0.5 # no zeros
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, tf.neg)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(y, self._inv, tf.inv)
self._compareCpu(x, np.square, tf.square)
self._compareCpu(y, np.sqrt, tf.sqrt)
self._compareCpu(y, self._rsqrt, tf.rsqrt)
self._compareCpu(x, np.exp, tf.exp)
self._compareCpu(y, np.log, tf.log)
self._compareCpu(x, np.tanh, tf.tanh)
self._compareCpu(x, self._sigmoid, tf.sigmoid)
self._compareCpu(x, np.sin, tf.sin)
self._compareCpu(x, np.cos, tf.cos)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, 1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
# Numpy uses an incorrect definition of sign; use the right one instead.
def complex_sign(x):
return x / np.abs(x)
self._compareCpu(y, complex_sign, tf.sign)
self._compareBothSparse(y, complex_sign, tf.sign)
def testGradGrad(self):
np.random.seed(7)
shape = (5,)
dtype_tols = [(np.float32, 1e-3), (np.float64, 1e-6), (np.complex64, 1e-3),
(np.complex128, 1e-6)]
op_range = [(gen_math_ops._inv_grad, [-2, 2]),
(gen_math_ops._sigmoid_grad, [-2, 2]),
(gen_math_ops._sqrt_grad, [1, 3]),
(gen_math_ops._tanh_grad, [-2, 2]),]
def rand(dtype):
x = np.random.uniform(
real_range[0], real_range[1], size=shape[0]).astype(dtype)
if dtype in (np.complex64, np.complex128):
x += 1j * np.random.uniform(-2, 2, size=shape[0]).astype(dtype)
return x
for op, real_range in op_range:
with self.test_session():
for dtype, tol in dtype_tols:
x = tf.constant(rand(dtype))
y = tf.constant(rand(dtype))
z = op(x, y)
error = tf.test.compute_gradient_error(
[x, y], [shape, shape],
z,
shape,
x_init_value=[rand(dtype), rand(dtype)])
self.assertLess(error, tol)
class BinaryOpTest(tf.test.TestCase):
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = out.eval()
# Test that the op takes precedence over numpy operators.
np_left = tf_func(x, iny).eval()
np_right = tf_func(inx, y).eval()
if also_compare_variables:
var_x = tf.Variable(x)
var_y = tf.Variable(y)
tf.initialize_all_variables().run()
print(type(x), type(y), type(var_x), type(var_y))
print(type(tf_func(x, var_y)), type(tf_func(var_x, y)))
np_var_left = tf_func(x, var_y).eval()
np_var_right = tf_func(var_x, y).eval()
if np_ans.dtype != np.object:
self.assertAllClose(np_ans, tf_cpu)
self.assertAllClose(np_ans, np_left)
self.assertAllClose(np_ans, np_right)
if also_compare_variables:
self.assertAllClose(np_ans, np_var_left)
self.assertAllClose(np_ans, np_var_right)
self.assertShapeEqual(np_ans, out)
_GRAD_TOL = {tf.float16: 1e-3,
tf.float32: 1e-3,
tf.complex64: 1e-2,
tf.float64: 1e-5,
tf.complex128: 1e-4}
def _compareGradientX(self, x, y, np_func, tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = tf.test.compute_gradient(inx,
xs,
out,
zs,
x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inxf,
xs,
outf,
zs,
x_init_value=xf,
delta=1e-3)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[tf.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self, x, y, np_func, tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
ys,
out,
zs,
x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inyf,
ys,
outf,
zs,
x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[tf.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareBoth(self, x, y, np_func, tf_func, also_compare_variables=False):
self._compareCpu(x, y, np_func, tf_func, also_compare_variables)
if x.dtype in (np.float16, np.float32, np.float64):
if tf_func not in (_FLOORDIV, tf.floordiv, tf.igamma, tf.igammac, tf.zeta, tf.polygamma):
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
if tf_func in (tf.igamma, tf.igammac, tf.zeta, tf.polygamma):
# These methods only support gradients in the second parameter
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(x, y, np.add, tf.add, also_compare_variables=True)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)
# Need x > 1
self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta, tf.zeta)
n_small = np.arange(0, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(n_small, x_pos_small, special.polygamma, tf.polygamma)
except ImportError as e:
tf.logging.warn("Cannot test special functions: %s" % str(e))
def testFloatDifferentShapes(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.float32)
y = np.array([1, 2]).reshape(2, 1).astype(np.float32)
with self.test_session() as sess:
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
s = tf.reduce_sum(inx * iny)
gx, gy = sess.run(tf.gradients(s, [inx, iny]))
# gx is simply the broadcasted y
self.assertAllEqual(gx, np.array([1, 1, 2, 2])
.reshape(2, 2).astype(np.float32))
# gy is x's column summed up
self.assertAllEqual(gy, np.array([3, 7]).
reshape(2, 1).astype(np.float32))
def testFloatVariableOverload(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.int32)
y = np.array([1, 2]).reshape(2, 1).astype(np.int32)
var_x = tf.Variable(x)
var_y = tf.Variable(y)
with self.test_session() as sess:
sess.run([var_x.initializer, var_y.initializer])
left_result = (var_x * y).eval()
right_result = (x * var_y).eval()
np_result = x * y
self.assertAllEqual(np_result, left_result)
self.assertAllEqual(np_result, right_result)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareBoth(x, y, np.add, tf.add)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)
except ImportError as e:
tf.logging.warn("Cannot test special functions: %s" % str(e))
def testInt8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int8)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int16)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
self._compareBoth(x, y, np.add, tf.add)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.true_divide, tf.truediv)
self._compareBoth(x, y, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.mod, tf.mod)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
# _compareBoth tests on GPU only for floating point types, so test
# _MOD for int32 on GPU by calling _compareGpu
self._compareGpu(x, y, np.mod, _MOD)
def testInt64Basic(self):
x = np.arange(1 << 40, 13 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int64)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.true_divide, tf.truediv)
self._compareBoth(x, y, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.mod, tf.mod)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
def testComplex64Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex64)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex64)
self._compareCpu(x, y, np.add, tf.add)
self._compareCpu(x, y, np.subtract, tf.sub)
self._compareCpu(x, y, np.multiply, tf.mul)
self._compareCpu(x, y + 0.1, np.true_divide, tf.truediv)
self._compareCpu(x, y, np.add, _ADD)
self._compareCpu(x, y, np.subtract, _SUB)
self._compareCpu(x, y, np.multiply, _MUL)
self._compareCpu(x, y + 0.1, np.true_divide, _TRUEDIV)
def testComplex128Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex128)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex128)
self._compareCpu(x, y, np.add, tf.add)
self._compareCpu(x, y, np.subtract, tf.sub)
self._compareCpu(x, y, np.multiply, tf.mul)
self._compareCpu(x, y + 0.1, np.true_divide, tf.truediv)
self._compareCpu(x, y, np.add, _ADD)
self._compareCpu(x, y, np.subtract, _SUB)
self._compareCpu(x, y, np.multiply, _MUL)
self._compareCpu(x, y + 0.1, np.true_divide, _TRUEDIV)
def testStringComparison(self):
x = np.array([["abc", "bh"], ["c", ""]])
y = np.array([["abc", "bh"], ["def", "hi"]])
with self.test_session(use_gpu=False) as sess:
cmp_eq = tf.equal(x, y)
cmp_not_eq = tf.not_equal(x, y)
values = sess.run([cmp_eq, cmp_not_eq])
self.assertAllEqual([[True, True], [False, False]], values[0])
self.assertAllEqual([[False, False], [True, True]], values[1])
def testString(self):
x = np.array([["x_0_0", "x_0_1", "x_0_2"],
["x_1_0", "x_1_1", "x_1_2"],
["x_2_0", "x_2_1", "x_2_2"]], dtype=np.object)
y = np.array([["y_0_0", "y_0_1", "y_0_2"],
["y_1_0", "y_1_1", "y_1_2"],
["y_2_0", "y_2_1", "y_2_2"]], dtype=np.object)
z = np.array([["z_0", "z_1", "z_2"]], dtype=np.object)
w = np.array("w", dtype=np.object)
self._compareCpu(x, y, _ADD, _ADD)
self._compareCpu(x, z, _ADD, _ADD)
self._compareCpu(x, w, _ADD, _ADD)
self._compareCpu(z, w, _ADD, _ADD)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
if dtype in (np.complex64, np.complex128):
x = (1 + np.linspace(0, 2 + 3j, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 2 - 2j, np.prod(ys))).astype(dtype).reshape(ys)
else:
x = (1 + np.linspace(0, 5, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 5, np.prod(ys))).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
if x.dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
if tf_func not in (_FLOORDIV, tf.floordiv):
if x.dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(x, y, np_func, tf_func, np.float)
self._compareGradientY(x, y, np_func, tf_func, np.float)
else:
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
# TODO(josh11b,vrv): Refactor this to use parameterized tests.
def _testBCastByFunc(self, funcs, xs, ys):
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
np.complex64,
np.complex128,
]
for dtype in dtypes:
for (np_func, tf_func) in funcs:
if (dtype in (np.complex64, np.complex128) and
tf_func in (_FLOORDIV, tf.floordiv)):
continue # floordiv makes no sense for complex numbers
self._compareBCast(xs, ys, dtype, np_func, tf_func)
self._compareBCast(ys, xs, dtype, np_func, tf_func)
def _testBCastA(self, xs, ys):
funcs = [
(np.add, tf.add),
(np.add, _ADD),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastB(self, xs, ys):
funcs = [
(np.subtract, tf.sub),
(np.subtract, _SUB),
(np.power, tf.pow),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastC(self, xs, ys):
funcs = [
(np.multiply, tf.mul),
(np.multiply, _MUL),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastD(self, xs, ys):
funcs = [
(np.true_divide, tf.truediv),
(np.floor_divide, tf.floordiv),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
self._testBCastByFunc(funcs, xs, ys)
def testBCast_0A(self):
self._testBCastA([1, 3, 2], [1])
def testBCast_0B(self):
self._testBCastB([1, 3, 2], [1])
def testBCast_0C(self):
self._testBCastC([1, 3, 2], [1])
def testBCast_0D(self):
self._testBCastD([1, 3, 2], [1])
def testBCast_1A(self):
self._testBCastA([1, 3, 2], [2])
def testBCast_1B(self):
self._testBCastB([1, 3, 2], [2])
def testBCast_1C(self):
self._testBCastC([1, 3, 2], [2])
def testBCast_1D(self):
self._testBCastD([1, 3, 2], [2])
def testBCast_2A(self):
self._testBCastA([1, 3, 2], [3, 2])
def testBCast_2B(self):
self._testBCastB([1, 3, 2], [3, 2])
def testBCast_2C(self):
self._testBCastC([1, 3, 2], [3, 2])
def testBCast_2D(self):
self._testBCastD([1, 3, 2], [3, 2])
def testBCast_3A(self):
self._testBCastA([1, 3, 2], [3, 1])
def testBCast_3B(self):
self._testBCastB([1, 3, 2], [3, 1])
def testBCast_3C(self):
self._testBCastC([1, 3, 2], [3, 1])
def testBCast_3D(self):
self._testBCastD([1, 3, 2], [3, 1])
def testBCast_4A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_4B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_4C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_4D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_5A(self):
self._testBCastA([1, 3, 2], [2, 3, 1])
def testBCast_5B(self):
self._testBCastB([1, 3, 2], [2, 3, 1])
def testBCast_5C(self):
self._testBCastC([1, 3, 2], [2, 3, 1])
def testBCast_5D(self):
self._testBCastD([1, 3, 2], [2, 3, 1])
def testBCast_6A(self):
self._testBCastA([1, 3, 2], [2, 1, 1])
def testBCast_6B(self):
self._testBCastB([1, 3, 2], [2, 1, 1])
def testBCast_6C(self):
self._testBCastC([1, 3, 2], [2, 1, 1])
def testBCast_6D(self):
self._testBCastD([1, 3, 2], [2, 1, 1])
def testBCast_7A(self):
self._testBCastA([1, 3, 2], [1, 3, 1])
def testBCast_7B(self):
self._testBCastB([1, 3, 2], [1, 3, 1])
def testBCast_7C(self):
self._testBCastC([1, 3, 2], [1, 3, 1])
def testBCast_7D(self):
self._testBCastD([1, 3, 2], [1, 3, 1])
def testBCast_8A(self):
self._testBCastA([2, 1, 5], [2, 3, 1])
def testBCast_8B(self):
self._testBCastB([2, 1, 5], [2, 3, 1])
def testBCast_8C(self):
self._testBCastC([2, 1, 5], [2, 3, 1])
def testBCast_8D(self):
self._testBCastD([2, 1, 5], [2, 3, 1])
def testBCast_9A(self):
self._testBCastA([2, 0, 5], [2, 0, 1])
def testBCast_9B(self):
self._testBCastB([2, 0, 5], [2, 0, 1])
def testBCast_9C(self):
self._testBCastC([2, 0, 5], [2, 0, 1])
def testBCast_9D(self):
self._testBCastD([2, 0, 5], [2, 0, 1])
def testBCast_10A(self):
self._testBCastA([2, 3, 0], [2, 3, 1])
def testBCast_10B(self):
self._testBCastB([2, 3, 0], [2, 3, 1])
def testBCast_10C(self):
self._testBCastC([2, 3, 0], [2, 3, 1])
def testBCast_10D(self):
self._testBCastD([2, 3, 0], [2, 3, 1])
def testBCast_11A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_11B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_11C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_11D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_12A(self):
self._testBCastA([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12B(self):
self._testBCastB([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12C(self):
self._testBCastC([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12D(self):
self._testBCastD([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_13A(self):
self._testBCastA([1, 3, 2, 1, 1], [1])
def testBCast_13B(self):
self._testBCastB([1, 3, 2, 1, 1], [1])
def testBCast_13C(self):
self._testBCastC([1, 3, 2, 1, 1], [1])
def testBCast_13D(self):
self._testBCastD([1, 3, 2, 1, 1], [1])
def testBCast_14A(self):
self._testBCastA([2, 3, 1, 1, 5], [1])
def testBCast_14B(self):
self._testBCastB([2, 3, 1, 1, 5], [1])
def testBCast_14C(self):
self._testBCastC([2, 3, 1, 1, 5], [1])
def testBCast_14D(self):
self._testBCastD([2, 3, 1, 1, 5], [1])
def testBCast_15A(self):
self._testBCastA([10, 3, 1, 2], [3, 1, 2])
def testBCast_15B(self):
self._testBCastB([10, 3, 1, 2], [3, 1, 2])
def testBCast_15C(self):
self._testBCastC([10, 3, 1, 2], [3, 1, 2])
def testBCast_15D(self):
self._testBCastD([10, 3, 1, 2], [3, 1, 2])
def testMismatchedDimensions(self):
for func in [tf.add, tf.sub, tf.mul, tf.div, _ADD, _SUB, _MUL, _TRUEDIV,
_FLOORDIV]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
func(tf.convert_to_tensor([10.0, 20.0, 30.0]),
tf.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
def testZeroPowGrad(self):
with self.test_session():
for dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
x = tf.constant(0.0, dtype=dtype)
y = tf.constant(2.0, dtype=dtype)
z = tf.pow(x, y)
error = tf.test.compute_gradient_error(y, [], z, [])
self.assertEqual(error, 0)
def testComplexPowGrad(self):
with self.test_session():
for dtype in np.complex64, np.complex128:
for base in 2.0, -2.0:
x = tf.constant(base, dtype=dtype)
y = tf.constant(2.0, dtype=dtype)
z = tf.pow(x, y)
error = tf.test.compute_gradient_error(y, [], z, [])
self.assertLess(error, 2e-4)
class ComparisonOpTest(tf.test.TestCase):
def _compare(self, func, x, y, dtype):
with self.test_session(use_gpu=False):
out = func(tf.convert_to_tensor(np.array([x]).astype(dtype)),
tf.convert_to_tensor(np.array([y]).astype(dtype)))
ret = out.eval()
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compare(tf.less, x, y, t), x < y)
self.assertEqual(self._compare(tf.less_equal, x, y, t), x <= y)
self.assertEqual(self._compare(tf.greater, x, y, t), x > y)
self.assertEqual(self._compare(tf.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compare(tf.equal, x, y, t), x == y)
self.assertEqual(self._compare(tf.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compare(tf.equal, x, y, t), x == y)
self.assertEqual(self._compare(tf.not_equal, x, y, t), x != y)
def _compareCpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))
tf_cpu = out.eval()
self.assertAllEqual(np_ans, tf_cpu)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))
tf_gpu = out.eval()
self.assertAllEqual(np_ans, tf_gpu)
def _compareBoth(self, x, y, np_func, tf_func):
self._compareCpu(x, y, np_func, tf_func)
if x.dtype == np.float16 or x.dtype == np.float32 or x.dtype == np.float64:
self._compareGpu(x, y, np_func, tf_func)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compareBoth(xt, yt, np.less, tf.less)
self._compareBoth(xt, yt, np.less_equal, tf.less_equal)
self._compareBoth(xt, yt, np.greater, tf.greater)
self._compareBoth(xt, yt, np.greater_equal, tf.greater_equal)
self._compareBoth(xt, yt, np.equal, tf.equal)
self._compareBoth(xt, yt, np.not_equal, tf.not_equal)
# TODO(zhifengc): complex64 doesn't work on GPU yet.
for t in [np.complex64, np.complex128]:
self._compareCpu(x.astype(t), y.astype(t), np.equal, tf.equal)
self._compareCpu(x.astype(t), y.astype(t), np.not_equal, tf.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
self._compareCpu(y, x, np_func, tf_func)
if x.dtype == np.float16 or x.dtype == np.float32 or x.dtype == np.float64:
self._compareGpu(x, y, np_func, tf_func)
self._compareGpu(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, tf.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, tf.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, tf.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, tf.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, tf.equal)
def testBCastNotEqual(self):
self._testBCastByFunc(np.not_equal, tf.not_equal)
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [tf.less, tf.less_equal, tf.greater,
tf.greater_equal, tf.equal, tf.not_equal]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x.astype(t), y.astype(t))
class LogicalOpTest(tf.test.TestCase):
def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=use_gpu):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_val = out.eval()
self.assertEqual(out.dtype, tf.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with self.test_session(use_gpu=use_gpu):
out = tf.logical_not(tf.convert_to_tensor(x))
tf_val = out.eval()
self.assertEqual(out.dtype, tf.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def testScalar(self):
data = [np.array([True]), np.array([False])]
for use_gpu in [True, False]:
for x in data:
self._not(x, use_gpu)
for x in data:
for y in data:
self._compareBinary(
x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(
x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(
x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testTensor(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for use_gpu in [True, False]:
self._not(x, use_gpu)
self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testBCast(self):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
for (xs, ys) in shapes:
x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)
y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)
for use_gpu in [True, False]:
self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testShapeMismatch(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)
for f in [tf.logical_and, tf.logical_or, tf.logical_xor]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x, y)
def testUsingAsPythonValueFails(self):
# Ensure that we raise an error when the user attempts to treat a
# `Tensor` as a Python `bool`.
b = tf.constant(False)
with self.assertRaises(TypeError):
if b:
pass
x = tf.constant(3)
y = tf.constant(4)
with self.assertRaises(TypeError):
if x > y:
pass
z = tf.constant(7)
# The chained comparison should fail because Python computes `x <
# y` and short-circuits the comparison with `z` if it is `False`.
with self.assertRaises(TypeError):
_ = x < y < z
class SelectOpTest(tf.test.TestCase):
def _compare(self, c, x, y, use_gpu):
np_ans = np.where(c, x, y)
with self.test_session(use_gpu=use_gpu):
out = tf.select(c, x, y)
tf_ans = out.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
out,
s,
x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inxf,
s,
outf,
s,
x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
s,
out,
s,
x_init_value=y,
delta=1.0)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inyf,
s,
outf,
s,
x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
def testGradients(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(c, xt, yt, np.float)
self._compareGradientY(c, xt, yt, np.float)
else:
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
def testShapeMismatch(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(2, 5, 3) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
tf.select(c, xt, yt)
def testEmptyTensor(self):
c = np.random.randint(0, 3, 0).astype(np.bool).reshape(1, 3, 0)
x = np.random.rand(1, 3, 0) * 100
y = np.random.rand(1, 3, 0) * 100
z_expected = np.zeros((1, 3, 0), dtype=np.float32)
with self.test_session():
xt = x.astype(np.float32)
yt = y.astype(np.float32)
z = tf.select(c, xt, yt).eval()
self.assertAllEqual(z_expected, z)
def testNan(self):
"""Verify that nans don't propagate where they shouldn't."""
with self.test_session():
for c in False, True:
for a in 7.0, np.nan:
for b in 5.0, np.nan:
x = tf.select(c, a, b).eval()
y = a if c else b
self.assertEqual(np.isnan(x), np.isnan(y))
class BatchSelectOpTest(tf.test.TestCase):
"""Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+."""
def _compare(self, c, x, y, use_gpu):
np_ans = np.dstack(
[x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(
[2, 0, 1])
with self.test_session(use_gpu=use_gpu):
out = tf.select(c, x, y)
tf_ans = out.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
out,
s,
x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inxf,
s,
outf,
s,
x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
s,
out,
s,
x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inyf,
s,
outf,
s,
x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
def testGradients(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(c, xt, yt, np.float)
self._compareGradientY(c, xt, yt, np.float)
else:
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
def testShapeMismatch(self):
c = np.random.randint(0, 2, 8).astype(np.bool)
x = np.random.rand(16, 3, 2) * 100
y = np.random.rand(16, 3, 2) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
tf.select(c, xt, yt)
class MinMaxOpTest(tf.test.TestCase):
def _compare(self, x, y, use_gpu):
np_min, np_max = np.minimum(x, y), np.maximum(x, y)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
omin, omax = tf.minimum(inx, iny), tf.maximum(inx, iny)
tf_min, tf_max = sess.run([omin, omax])
self.assertAllEqual(np_min, tf_min)
self.assertAllEqual(np_max, tf_max)
def testBasic(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1, 3, 2) * 100.
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testDifferentShapes(self):
x = np.random.rand(1, 3, 2) * 100.
y =
|
np.random.rand(2)
|
numpy.random.rand
|
import collections
import itertools
from itertools import tee
from math import sqrt
import os
import matplotlib.pyplot as plt
import numpy as np
import sklearn.preprocessing
import scipy
from scipy.stats import spearmanr
import torch
import pygsp
import networkx as nx
import community as louvain
import networkx.algorithms.community as commu
from sklearn import manifold
from tqdm import tqdm
from .diffusion_graph import edges_from_loss_fn, cosine_loss
from .loaders import get_all_pairs_datasets
from .loaders import split_train_test
from .loaders import get_train_test_datasets_labels
from .loaders import get_dataset_from_datapath
from .loaders import get_labels_stats
from .classifiers import features_classification
def connect_parts_labels(recorder, graph, part_a, part_b, labels, label_0, label_1):
recorder.record_balance(min(len(part_a), len(part_b)) / len(graph))
a_to_0 = np.array([label_0]*len(graph))
for node_b in part_b:
a_to_0[node_b] = label_1
a_to_0 = torch.LongTensor(a_to_0)
ratio_errors = (a_to_0 == labels).sum().item()
ratio_errors = ratio_errors / len(graph)
ratio_errors = max(ratio_errors, 1. - ratio_errors)
#print('\n', part_a, ' ============ ', part_b, ' ####### ', ratio_errors*100, '\n')
return ratio_errors * 100.
def stoer_wagner_volume(recorder, graph, labels, params):
assert params.n_way == 2
try:
_, (part_a, part_b) = nx.stoer_wagner(graph)
except nx.exception.NetworkXError:
assert params.n_shot == 1
part_a = nx.node_connected_component(graph, 0)
part_b = nx.node_connected_component(graph, 1)
label_0, label_1 = torch.min(labels).item(), torch.max(labels).item()
assert (labels == label_0).sum().item() * 2 == len(graph)
return connect_parts_labels(recorder, graph, part_a, part_b, labels, label_0, label_1)
def min_cut_volume(recorder, graph, labels, params):
assert params.n_shot == 1 and params.n_way == 2
cut = nx.minimum_edge_cut(graph, 0, 1)
graph.remove_edges_from(cut)
part_a = nx.node_connected_component(graph, 0)
part_b = nx.node_connected_component(graph, 1)
label_0, label_1 = labels[0].item(), labels[1].item()
return connect_parts_labels(recorder, graph, part_a, part_b, labels, label_0, label_1)
def kernighan(recorder, graph, labels, params):
assert params.n_shot == 1 and params.n_way == 2
part_a, part_b = commu.kernighan_lin.kernighan_lin_bisection(graph, max_iter=20)
label_0, label_1 = labels[0].item(), labels[1].item()
return connect_parts_labels(recorder, graph, part_a, part_b, labels, label_0, label_1)
def compute_pure(nodes, colors, labels):
labels = labels.numpy()
colors_to_labels = collections.defaultdict(set)
labels_to_colors = collections.defaultdict(set)
for node in nodes:
colors_to_labels[colors[node]].add(int(labels[node]))
labels_to_colors[int(labels[node])].add(colors[node])
num_confusion = sum([1 for labels in colors_to_labels.values() if len(labels) > 1])
num_splitted = sum([1 for colors in labels_to_colors.values() if len(colors) > 1])
num_good = len(colors_to_labels) - num_confusion
ratio = num_good / (num_good + num_confusion) * 100.
return ratio / 100.
def get_total(bag):
return sum([len(subbag) for subbag in bag.values()])
def compute_entropy(bag):
total = get_total(bag)
freq = [len(subbag)/total for subbag in bag.values()]
freq = np.array(freq)
h = -(freq * np.log2(freq)).sum()
return h
def print_histo(histo, edges):
for histo_bin in zip(edges, edges[1:], histo.tolist()):
print('[%.3f,%.3f]=%.3f'%histo_bin, end=' ')
print('\n')
def compute_communities_entropy(nodes, colors, labels, verbose):
communities = collections.defaultdict(lambda: collections.defaultdict(list))
classes = collections.defaultdict(lambda: collections.defaultdict(list))
for node in nodes:
com, label = colors[node], int(labels[node])
communities[com][label].append(node)
classes[label][com].append(node)
com_entropy = np.array([compute_entropy(com) for com in communities.values()])
cla_entropy = np.array([compute_entropy(cla) for cla in classes.values()])
totals = [get_total(com) for com in communities.values()]
avg_cla_entropy = np.mean(cla_entropy)
avg_com_entropy = np.mean(com_entropy)
w_com_entropy = np.average(com_entropy, weights=totals)
hist_cla_entropy = np.histogram(cla_entropy, bins=5, density=True)
hist_com_entropy = np.histogram(com_entropy, bins=5, density=True)
weights = [total / sum(totals) for total in totals]
histo_w_com_entropy = np.histogram(com_entropy, bins=5, weights=weights, density=True)
if verbose:
print('')
print('Communities %d\n'%len(communities))
print('Entropy per class: ', avg_cla_entropy)
print_histo(*hist_cla_entropy)
print('Entropy per community: ', avg_com_entropy)
print_histo(*hist_com_entropy)
print('Entropy per community, weighted: ', w_com_entropy)
print_histo(*histo_w_com_entropy)
avgs = avg_cla_entropy, avg_com_entropy, w_com_entropy
histos = hist_cla_entropy, hist_com_entropy, histo_w_com_entropy
return [len(communities)] + list(zip(avgs, histos))
def color(dendrogram, node, level):
key = node
for i in range(level+1):
key = dendrogram[level][key]
return key
def louvain_dendrogram(recorder, graph, labels, params, print_details=False):
dendrogram = louvain.generate_dendrogram(graph)
colors = {node: node for node in graph}
infos = []
if print_details:
print('\n', 'Level', ' ', 'Mixed Communities', ' ', 'Splitted Classes', ' ', 'Good Communities', ' ', 'Ratio')
for level in range(len(dendrogram)):
colors = {node: dendrogram[level][color] for node, color in colors.items()}
if params.communities == 'pure':
info = compute_pure(graph.nodes, colors, labels)
elif params.communities == 'entropy':
info = compute_communities_entropy(graph.nodes, colors, labels, verbose=False)
infos.append(info)
return infos
def build_community_graph(params, examples, labels, print_graph=False):
num_neighbors, regular = params.num_neighbors, params.regular
edges = edges_from_loss_fn(examples, cosine_loss, num_neighbors=num_neighbors,
regular=regular,
normalize_weights=True,
substract_mean=True,
exponent=True)
if params.higher_order:
num_nodes = len(set([int(edge[1][0]) for edge in edges]))
adj = np.zeros(shape=(num_nodes, num_nodes))
for w, (a, b) in edges:
adj[a,b] = adj
adj = np.linalg.power(params.alpha * np.eyes(N=num_nodes) + adj, params.kappa)
edges = [(a, b, w) for w, (a, b) in edges]
graph = nx.Graph()
graph.add_weighted_edges_from(edges)
if print_graph:
nx.draw_spring(graph)
plt.show()
return graph
def monitore_volume(recorder, train_set, test_set, train_labels, test_labels, params):
examples = torch.cat([train_set, test_set], dim=0)
labels = torch.cat([train_labels, test_labels], dim=0)
graph = build_community_graph(params, examples, labels)
if params.intersection_measure == 'stoer_wagner':
volume = stoer_wagner_volume(recorder, graph, labels, params)
elif params.intersection_measure == 'minimum_cut':
volume = min_cut_volume(recorder, graph, labels, params)
elif params.intersection_measure == 'kernighan':
volume = kernighan(recorder, graph, labels, params)
elif params.intersection_measure == 'louvain_dendrogram':
infos = louvain_dendrogram(recorder, graph, labels, params)
return infos
else:
raise ValueError
recorder.record_volume_error(volume)
def infos_per_level(graph, labels, params, verbose=True):
dendrogram = louvain.generate_dendrogram(graph)
colors = {node: node for node in graph}
infos = []
for level in range(len(dendrogram)):
colors = {node: dendrogram[level][color] for node, color in colors.items()}
info = compute_communities_entropy(graph.nodes, colors, labels, verbose=verbose)
infos.append(info)
return infos
def get_draw_options(big_graph, bipartite):
w_max = float(np.max([w[2] for w in big_graph.edges.data('weight')]))
colors = [w[2]/w_max for w in big_graph.edges.data('weight')]
edge_labels = {w[:2]:('%.3f'%w[2]) for w in big_graph.edges.data('weight')}
if bipartite is None:
node_color = '#A0CBE2'
else:
node_color = ['#1A4081' if node < bipartite else '#E37373' for node in big_graph]
return {
"node_color": node_color,
"edge_color": colors,
"width": 4,
"edge_cmap": plt.cm.Reds,
"with_labels": True,
}, edge_labels
def print_big_graph(params, mean_pos, edges, avg_degree=20, tsne_pos=False):
bipartite = 64 if '&' in params.dataset else None
edges.sort(key=lambda t: t[2], reverse=True)
max_edges = int(len(mean_pos) * avg_degree)
edges = edges[:max_edges]
big_graph = nx.Graph()
big_graph.add_weighted_edges_from(edges)
options, edge_labels = get_draw_options(big_graph, bipartite)
if tsne_pos:
mean_pos_array = np.concatenate(list(mean_pos.values()), axis=0)
tsne = manifold.TSNE()
print(mean_pos_array)
mean_pos_array = tsne.fit_transform(mean_pos_array)
pos = dict()
for i, key in enumerate(mean_pos):
pos[key] = mean_pos_array[i]
elif bipartite is not None:
part_1 = [node for node in big_graph if node < bipartite]
pos = nx.drawing.layout.bipartite_layout(big_graph, part_1, align='horizontal')
else:
# pos = nx.spring_layout(big_graph)
pos = nx.spectral_layout(big_graph)
nx.draw_networkx_nodes(big_graph, pos=pos, **options)
nx.draw_networkx_labels(big_graph, pos, labels={node:str(node) for node in big_graph})
nx.draw_networkx_edge_labels(big_graph, edge_labels=edge_labels, pos=pos, font_size=8)
name = params.intersection_measure + '_communities_' + str(-params.ladder) + '_' + str(params.num_neighbors)
nx.drawing.nx_agraph.write_dot(big_graph, os.path.join('graphs', name+'.dot'))
plt.savefig(os.path.join('graphs', name+'.pdf'))
plt.clf()
# plt.show()
print('')
def add_class(mean_pos, label, false_label, examples, labels):
if label in mean_pos:
return
class_examples = examples[labels == false_label]
mean_pos[label] = torch.mean(class_examples, dim=0, keepdim=True).numpy()
def monitore_communities(data_path, params, num_repetitions=5):
parts = params.parts if '&' in params.dataset else None
all_pairs = get_all_pairs_datasets(data_path, params.n_way, params.crop, parts)
edges = []
mean_pos = dict()
total_pairs = next(all_pairs)
progress = tqdm(total=total_pairs, leave=True)
for (examples, labels, label_a, label_b, false_a, false_b) in all_pairs:
add_class(mean_pos, label_a, false_a, examples, labels)
add_class(mean_pos, label_b, false_b, examples, labels)
graph = build_community_graph(params, examples, labels)
h_seq = []
for _ in range(num_repetitions):
infos = infos_per_level(graph, labels, params, verbose=False)
ladder = max(len(infos) + params.ladder, 0)
h_seq.append(infos[ladder][-1][0]) # last level
h_avg = float(np.mean(h_seq))
h_max = float(np.log2(params.n_way))
r = h_avg / h_max
r = min(1-1e-3, r) # crop
weight = r # / (1 - r) # similarity score
edges.append((label_a, label_b, weight))
desc = ' '.join([str(label_a), str(label_b), str(weight)])
progress.set_description(desc=desc)
progress.update()
progress.close()
print_big_graph(params, mean_pos, edges)
def monitore_regression(data_path, params, num_repetitions=20):
all_pairs = get_all_pairs_datasets(data_path, params.n_way, params.crop, params.parts)
edges = []
mean_pos = dict()
for (examples, labels, label_a, label_b, false_a, false_b) in all_pairs:
add_class(mean_pos, label_a, false_a, examples, labels)
add_class(mean_pos, label_b, false_b, examples, labels)
acc_seq = []
for _ in range(num_repetitions):
train_test = split_train_test(examples, labels, params.n_shot, params.n_val)
train_set, train_labels, test_set, test_labels = train_test
train_acc, test_acc = features_classification(train_set, train_labels,
test_set, test_labels,
params.n_way, params.classifier,
params.origin_normalization, params)
acc = 100. - test_acc # error monitoring instead of accuracy
acc_seq.append(acc) # last level
acc_avg = float(np.mean(acc_seq))
acc_max = float(100.)
r = acc_avg / acc_max
r = min(1-1e-5, r) # crop
weight = r # / (1 - r) # similarity score
edges.append((label_a, label_b, weight))
print(label_a, label_b, weight, acc_avg)
print_big_graph(params, mean_pos, edges)
def gather_edges(graph, ways):
edges = []
for str_edge in graph.edges.data('weight'):
edge = int(str_edge[0]), int(str_edge[1]), float(str_edge[2])
if edge[0] in ways and edge[1] in ways:
edges.append(edge)
return edges
def get_subgraph_weight(edges):
return sum([edge[2] for edge in edges])
def get_worse_clusters(params, big_graph, data_path):
_, original_labels = get_dataset_from_datapath(data_path)
num_labels, n_sample_per_label = get_labels_stats(original_labels)
combinations_iter = itertools.combinations(list(range(big_graph.number_of_nodes())), params.n_way)
combinations = []
for i, ways in enumerate(combinations_iter):
real_ways = [int(original_labels[i*n_sample_per_label]) for i in ways]
combinations.append((get_subgraph_weight(gather_edges(big_graph, real_ways)), ways))
combinations.sort(key=(lambda t: t[0]), reverse=True)
return combinations
def write_labels(signal, nodes):
for node in nodes:
if nodes[node]['label'] is None:
continue
signal[node] = nodes[node]['label']
def create_signal(nodes, n_way):
num_nodes = len(nodes)
signal = np.full(shape=(num_nodes, n_way), fill_value=(1./n_way))
write_labels(signal, nodes)
return signal
def bhattacharyya_dist(signal, temperature):
sqrt_signal = np.sqrt(signal)
bhattacharyya = np.einsum('ip,jp->ij', sqrt_signal, sqrt_signal)
bhattacharyya = -np.log(np.maximum(bhattacharyya, epsilon_a))
bhattacharyya = scipy.special.softmax(adj * bhattacharyya, axis=1)
return bhattacharyya
def kl_metric(signal, temperature):
eps = 0.001
num_labels = int(signal.shape[1])
signal = np.log(np.maximum(signal, eps))
signal = scipy.special.softmax(signal * temperature, axis=1)
h = np.sum(signal * np.log(num_labels * np.maximum(signal, eps)), axis=1)
num_nodes = int(signal.shape[0])
h_cross = np.zeros(shape=(num_nodes, num_nodes), dtype=np.float32)
for i in range(num_nodes):
for j in range(num_nodes):
h_cross[i,j] = float(h[j])
return h_cross
def graph_mean_shift(graph, n_way, num_iterations_max=1):
signal = create_signal(graph.nodes, n_way)
adj = nx.adjacency_matrix(graph).toarray()
adj = adj + np.eye(N=int(adj.shape[0]))
adj = adj + 1.*(adj != 0.) # from [-1, 1] to [0, 2]
adj_temperature = 1.
adj = scipy.special.softmax(adj * adj_temperature, axis=1)
epsilon = 0.001
metric_temperature = 1.
for _ in range(num_iterations_max):
metric = kl_metric(signal, metric_temperature)
wcoeff =
|
np.maximum(metric, epsilon)
|
numpy.maximum
|
import numpy as np
from .ndarray import ndarray
from ..array import extent
from .tile_operation import tile_operation
from .shuffle import shuffle
from .map import map2
from .base import force
from .. import util
from .. import rpc
from .. import blob_ctx
def _sample_sort_mapper(array, ex, sample_rate, local_sorted_array):
'''
sample each tile of the original array with sample_rate, sort each tile
and put the local sorted result into local_sorted_array.
Args:
array(DistArray): array to be sorted.
ex(Extent): Region being processed.
sample_rate(float): the sample rate of each tile.
local_sorted_array(DistArray): array to store the local sorted result.
'''
data = array.fetch(ex)
samples = np.random.choice(data.flatten(), size=int(data.size * sample_rate), replace=False)
local_sorted_array.update(ex, np.sort(data, axis=None).reshape(ex.shape))
yield None, samples
def _partition_count_mapper(array, ex, partition_keys):
'''
given the partition keys, calculate the index of each partition key in the local tile.
Args:
array(DistArray): local sorted array.
ex(Extent): Region being processed.
partition_keys(numpy.array): the partition keys which separate each partitions.
'''
data = array.fetch(ex).flatten()
idx = np.searchsorted(data, partition_keys, side='left')
idx =
|
np.insert(idx, 0, 0)
|
numpy.insert
|
import numpy as np
import pytest
import pylas
from pylas.point.dims import SubFieldView, ScaledArrayView
def test_sub_field_view_behaves_like_array():
""" This function is used to test if the SubFieldView class
works & has an API that is similar to np.ndarray
"""
array = np.zeros(10, np.uint8)
field = SubFieldView(array, 0b0000_0010)
assert len(field) == 10
assert np.all(field == 0)
assert np.all(field[:] == 0)
assert field.max() == 0
assert np.max(field) == 0
assert field.min() == 0
assert
|
np.min(field)
|
numpy.min
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Controller coordinates sampling and training model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import pickle
import random
flags = tf.flags
gfile = tf.gfile
FLAGS = flags.FLAGS
def find_best_eps_lambda(rewards, lengths):
"""Find the best lambda given a desired epsilon = FLAGS.max_divergence."""
# perhaps not the best way to do this
desired_div = FLAGS.max_divergence * np.mean(lengths)
def calc_divergence(eps_lambda):
max_reward = np.max(rewards)
logz = (max_reward / eps_lambda +
np.log(np.mean(np.exp((rewards - max_reward) / eps_lambda))))
exprr = np.mean(np.exp(rewards / eps_lambda - logz) *
rewards / eps_lambda)
return exprr - logz
left = 0.0
right = 1000.0
if len(rewards) <= 8:
return (left + right) / 2
num_iter = max(4, 1 + int(np.log((right - left) / 0.1) /
|
np.log(2.0)
|
numpy.log
|
"""Provides the ImprovementEmitter."""
import numpy as np
from ribs.archives import AddStatus
from ribs.emitters._emitter_base import EmitterBase
from ribs.emitters.opt import CMAEvolutionStrategy
class ImprovementEmitter(EmitterBase):
"""Adapts a covariance matrix towards changes in the archive.
This emitter originates in `Fontaine 2020
<https://arxiv.org/abs/1912.02400>`_. Initially, it starts at ``x0`` and
uses CMA-ES to search for solutions that improve the archive, i.e. solutions
that add new entries to the archive or improve existing entries. Once CMA-ES
restarts (see ``restart_rule``), the emitter starts from a randomly chosen
elite in the archive and continues searching for solutions that improve the
archive.
Args:
archive (ribs.archives.ArchiveBase): An archive to use when creating and
inserting solutions. For instance, this can be
:class:`ribs.archives.GridArchive`.
x0 (np.ndarray): Initial solution.
sigma0 (float): Initial step size.
selection_rule ("mu" or "filter"): Method for selecting solutions in
CMA-ES. With "mu" selection, the first half of the solutions will be
selected, while in "filter", any solutions that were added to the
archive will be selected.
restart_rule ("no_improvement" or "basic"): Method to use when checking
for restart. With "basic", only the default CMA-ES convergence rules
will be used, while with "no_improvement", the emitter will restart
when none of the proposed solutions were added to the archive.
weight_rule ("truncation" or "active"): Method for generating weights in
CMA-ES. Either "truncation" (positive weights only) or "active"
(include negative weights).
bounds (None or array-like): Bounds of the solution space. Solutions are
clipped to these bounds. Pass None to indicate there are no bounds.
Alternatively, pass an array-like to specify the bounds for each
dim. Each element in this array-like can be None to indicate no
bound, or a tuple of ``(lower_bound, upper_bound)``, where
``lower_bound`` or ``upper_bound`` may be None to indicate no bound.
batch_size (int): Number of solutions to return in :meth:`ask`. If not
passed in, a batch size will automatically be calculated.
seed (int): Value to seed the random number generator. Set to None to
avoid a fixed seed.
Raises:
ValueError: If any of ``selection_rule``, ``restart_rule``, or
``weight_rule`` is invalid.
"""
def __init__(self,
archive,
x0,
sigma0,
selection_rule="filter",
restart_rule="no_improvement",
weight_rule="truncation",
bounds=None,
batch_size=None,
seed=None):
self._rng = np.random.default_rng(seed)
self._batch_size = batch_size
self._x0 =
|
np.array(x0, dtype=archive.dtype)
|
numpy.array
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# License: 3 Clause BSD
# http://scikit-criteria.org/
# =============================================================================
# DOCS
# =============================================================================
"""AHP"""
# =============================================================================
# IMPORTS
# =============================================================================
from collections.abc import namedtuple
import numpy as np
from skcriteria import norm, rank
# =============================================================================
# CONSTANTS
# =============================================================================
#: Theorical limit of number of criteria or alternatives in AHP [SATTY2003]_
#: [SATTY2008]_
#:
#: References
#: ----------
#: <NAME>. and <NAME>. Why the Magic Number Seven Plus or Minus Two,
#: Mathematical and Computer Modelling, 2003, vol. 38, pp. 233-244
AHP_LIMIT = 16
MTX_TYPE_CRITERIA = "criteria"
MTX_TYPE_ALTERNATIVES = "alternatives"
SAATY_MIN, SAATY_MAX = 0, 10
#: Random indexes [CHANGSHENG2013]_
SAATY_RI = {
k: np.float16(v) for k, v in {
1: 0.0,
2: 0.0,
3: 0.52,
4: 0.89,
5: 1.12,
6: 1.26,
7: 1.36,
8: 1.41,
9: 1.46,
10: 1.49,
11: 1.52,
12: 1.54,
13: 1.56,
14: 1.58,
15: 1.59
}.items()
}
def _resolve_saaty_intensity():
Intensity = namedtuple(
"Intensity", ["value", "label", "definition", "explanation"])
saaty_direct = (
(1, "1", "Equal Importance",
"Two activities contribute equally to the objective"),
(2, "2", "Weak or slight",
"Two activities contribute equally to the objective"),
(3, "3", "Moderate importance",
"Experience and judgement slightly favour one activity over another"),
(4, "4", "Moderate plus",
"Experience and judgement slightly favour one activity over another"),
(5, "5", "Strong importance",
"Experience and judgement strongly favour one activity over another"),
(6, "6", "Strong plus",
"Experience and judgement strongly favour one activity over another"),
(7, "7", "Very strong or demonstrated importance", (
"An activity is favoured very strongly over another; its "
"dominance demonstrated in practice")),
(8, "8", "Very, very strong", (
"An activity is favoured very strongly over another; its "
"dominance demonstrated in practice")),
(9, "9", "Extreme importance",
"The evidence favouring one activity over another"),
)
rec_defn = ("If activity i has one of the above non-zero numbers assigned "
"to it when compared with activity j, then j has the "
"reciprocal value when compared with i")
rec_expl = "A reasonable assumption"
saaty_intensity = {}
for value, label, defn, expl in saaty_direct:
saaty_intensity[value] = Intensity(value, label, defn, expl)
rec_value = 1/float(value)
rec_label = "1/{}".format(label)
saaty_intensity[rec_value] = Intensity(
rec_value, rec_label, rec_defn, rec_expl)
return saaty_intensity
SAATY_INTENSITY = _resolve_saaty_intensity()
SAATY_INTENSITY_VALUES = np.array(list(SAATY_INTENSITY.keys()))
del _resolve_saaty_intensity
# =============================================================================
# HELPER FUNCTIONS
# =============================================================================
def validate_values(values):
values = np.asarray(values)
if not np.all((values > SAATY_MIN) & (values < SAATY_MAX)):
msg = "All values must >= {} and <= {}"
raise ValueError(msg.format(SAATY_MIN+1, SAATY_MAX-1))
def validate_ahp_matrix(rows_and_columns, mtx, mtxtype=None):
type_validation = mtxtype is None or (
isinstance(mtxtype, str) and
mtxtype in [MTX_TYPE_CRITERIA, MTX_TYPE_ALTERNATIVES])
if not type_validation:
msg = "'mtxtype must be 'None', '{}' or '{}'. Found '{}'".format(
MTX_TYPE_ALTERNATIVES, MTX_TYPE_CRITERIA, mtxtype)
raise ValueError(msg)
if rows_and_columns > AHP_LIMIT:
if mtxtype:
msg = "The max number of {} must be <= {}.".format(
mtxtype, AHP_LIMIT)
else:
msg = "The max number of rows and columns must be <= {}.".format(
AHP_LIMIT)
raise ValueError(msg)
mtx = np.asarray(mtx)
shape = (rows_and_columns, rows_and_columns)
if mtx.shape != shape:
msg = "The shape of {} matrix must be '{}'. Found '{}'".format(
mtxtype or "the", shape, mtx.shape)
raise ValueError(msg)
if not np.all(np.diagonal(mtx) == 1):
msg = "All the diagonal values must be only ones (1)"
raise ValueError(msg)
validate_values(mtx)
triu, tril = np.triu(mtx), np.tril(mtx)
# tril to triu
old_err_state =
|
np.seterr(divide='ignore')
|
numpy.seterr
|
import numpy as np
from statsmodels.genmod.bayes_mixed_glm import (BinomialBayesMixedGLM,
PoissonBayesMixedGLM)
import pandas as pd
from scipy import sparse
from numpy.testing import assert_allclose, assert_equal
from scipy.optimize import approx_fprime
def gen_simple_logit(nc, cs, s):
np.random.seed(3799)
exog_vc = np.kron(np.eye(nc), np.ones((cs, 1)))
exog_fe = np.random.normal(size=(nc * cs, 2))
vc = s * np.random.normal(size=nc)
lp = np.dot(exog_fe, np.r_[1, -1]) + np.dot(exog_vc, vc)
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(nc, dtype=np.int)
return y, exog_fe, exog_vc, ident
def gen_simple_poisson(nc, cs, s):
np.random.seed(3799)
exog_vc = np.kron(np.eye(nc), np.ones((cs, 1)))
exog_fe = np.random.normal(size=(nc * cs, 2))
vc = s * np.random.normal(size=nc)
lp = np.dot(exog_fe, np.r_[0.1, -0.1]) + np.dot(exog_vc, vc)
r = np.exp(lp)
y = np.random.poisson(r)
ident =
|
np.zeros(nc, dtype=np.int)
|
numpy.zeros
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Student t distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from scipy import stats
import tensorflow as tf
class StudentTTest(tf.test.TestCase):
def testStudentPDFAndLogPDF(self):
with tf.Session():
batch_size = 6
df = tf.constant([3.0] * batch_size)
mu = tf.constant([7.0] * batch_size)
sigma = tf.constant([8.0] * batch_size)
df_v = 3.0
mu_v = 7.0
sigma_v = 8.0
t = np.array([-2.5, 2.5, 8.0, 0.0, -1.0, 2.0], dtype=np.float32)
student = tf.contrib.distributions.StudentT(df, mu=mu, sigma=sigma)
log_pdf = student.log_pdf(t)
self.assertEquals(log_pdf.get_shape(), (6,))
log_pdf_values = log_pdf.eval()
pdf = student.pdf(t)
self.assertEquals(pdf.get_shape(), (6,))
pdf_values = pdf.eval()
expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v)
expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.log(expected_pdf), log_pdf_values)
self.assertAllClose(expected_pdf, pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testStudentLogPDFMultidimensional(self):
with tf.Session():
batch_size = 6
df = tf.constant([[1.5, 7.2]] * batch_size)
mu = tf.constant([[3.0, -3.0]] * batch_size)
sigma = tf.constant([[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
df_v = np.array([1.5, 7.2])
mu_v = np.array([3.0, -3.0])
sigma_v = np.array([np.sqrt(10.0), np.sqrt(15.0)])
t = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
student = tf.contrib.distributions.StudentT(df, mu=mu, sigma=sigma)
log_pdf = student.log_pdf(t)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
pdf = student.pdf(t)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v)
expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.log(expected_pdf), log_pdf_values)
self.assertAllClose(expected_pdf, pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testStudentEntropy(self):
df_v =
|
np.array([[2., 3., 7.]])
|
numpy.array
|
# Simulate Data
import numpy as np
import networkx as nx
from kuramoto import Kuramoto
# Network generation
n_nodes = 50
p = 1 # p=1 -> all-to-all connectivity
graph_nx = nx.erdos_renyi_graph(n=n_nodes, p=p)
graph = nx.to_numpy_array(graph_nx)
# Kuramoto model simulation
natural_freq_mean = 20
natural_freq_var = .1
dt = .01
T = 10
K_start = 0
K_stop = 3
coupling_vals = np.linspace(K_start, K_stop, 200)
runs = []
for coupling in coupling_vals:
model = Kuramoto(coupling=coupling, dt=dt, T=T, n_nodes=n_nodes)
model.natfreqs = np.random.normal(natural_freq_mean, natural_freq_var, size=n_nodes) # reset natural frequencies (20rad/sec 3 Hz)
act_mat = model.run(adj_mat=graph)
runs.append(act_mat)
runs_array = np.array(runs)
coherence = []
for i, coupling in enumerate(coupling_vals):
coherence.append(
[model.phase_coherence(vec)
for vec in runs_array[i, ::].T]
)
coherence_array = np.array(coherence)
# Window the data and window both the zero-step and one-step-ahead label time-series
from gtda.time_series import SlidingWindow
window_size = 25
stride = 1
coherence_threshold = .8
onset_threshold = .5
SW = SlidingWindow(window_size, stride)
# The for-loop steps through each coupling strength value
for i in np.arange(len(coherence_array)):
data = runs_array[i,::]
data = np.sin(data.T)
coherence = coherence_array[i, :]
labels = coherence
labels2 = np.where(coherence > coherence_threshold, 1, 0) # sync regime
labels1 = np.where(coherence > onset_threshold, 1, 0) # onset regime
labels = labels1 + labels2
data_sw, labels_sw = SW.fit_transform_resample(data, labels)
# We want to predict the label of the next window given the current window
labels_sw_one_step = np.roll(labels_sw, -1)
labels_sw_one_step[-1] = labels_sw_one_step[-2] # fill last val to its previous value
if i == 0:
yr_one_step = labels_sw_one_step
X_sw = data_sw
else:
yr_one_step = np.concatenate((yr_one_step, labels_sw_one_step), axis=0)
X_sw= np.concatenate((X_sw, data_sw), axis=0)
# Create topological feature vector
from gtda.time_series import PearsonDissimilarity
from gtda.homology import VietorisRipsPersistence
from gtda.diagrams import Amplitude, PersistenceEntropy, NumberOfPoints
PD = PearsonDissimilarity()
X_pd = PD.fit_transform(X_sw)
VR = VietorisRipsPersistence(metric="precomputed", homology_dimensions=[0, 1, 2])
Ampl = Amplitude()
X_a = []
PerEnt = PersistenceEntropy()
X_pe = []
NumPts = NumberOfPoints()
X_np = []
for i in np.arange(len(X_pd)):
X_vr = VR.fit_transform([X_pd[i]])
X_a.append(Ampl.fit_transform(X_vr))
X_pe.append(PerEnt.fit_transform(X_vr))
X_np.append(NumPts.fit_transform(X_vr))
X_a = np.array(X_a)
X_a = np.squeeze(X_a)
X_pe = np.array(X_pe)
X_pe = np.squeeze(X_pe)
X_np =
|
np.array(X_np)
|
numpy.array
|
import pandas as pd
import numpy as np
from functools import wraps
import copy
# Pass through pd.DataFrame methods for a (1,1,o,d) shaped triangle:
df_passthru = ['to_clipboard', 'to_csv', 'to_pickle', 'to_excel', 'to_json',
'to_html', 'to_dict', 'unstack', 'pivot', 'drop_duplicates',
'describe', 'melt']
# Aggregate method overridden to the 4D Triangle Shape
agg_funcs = ['sum', 'mean', 'median', 'max', 'min', 'prod', 'var', 'std']
agg_funcs = {item: 'nan'+item for item in agg_funcs}
# def check_triangle_postcondition(f):
# ''' Post-condition check to ensure the integrity of the triangle object
# remains intact. (used for debugging)
# '''
# @wraps(f)
# def wrapper(*args, **kwargs):
# X = f(*args, **kwargs)
# if not hasattr(X, 'triangle'):
# raise ValueError('X is missing triangle attribute')
# if X.triangle.ndim != 4:
# raise ValueError('X.triangle must be a 4-dimensional array')
# if len(X.kdims) != X.triangle.shape[0]:
# raise ValueError('X.index and X.triangle are misaligned')
# if len(X.vdims) != X.triangle.shape[1]:
# raise ValueError('X.columns and X.triangle are misaligned')
# return X
# return wrapper
class TriangleBase:
def __init__(self, data=None, origin=None, development=None,
columns=None, index=None):
# Sanitize Inputs
columns = [columns] if type(columns) is str else columns
origin = [origin] if type(origin) is str else origin
if development is not None and type(development) is str:
development = [development]
key_gr = origin if not development else origin+development
if not index:
index = ['Total']
data_agg = data.groupby(key_gr).sum().reset_index()
data_agg[index[0]] = 'Total'
else:
data_agg = data.groupby(key_gr+index) \
.sum().reset_index()
# Convert origin/development to dates
origin_date = TriangleBase.to_datetime(data_agg, origin)
self.origin_grain = TriangleBase.get_grain(origin_date)
# These only work with valuation periods and not lags
if development:
development_date = TriangleBase.to_datetime(data_agg, development,
period_end=True)
self.development_grain = TriangleBase.get_grain(development_date)
col = 'development'
else:
development_date = origin_date
self.development_grain = self.origin_grain
col = None
# Prep the data for 4D Triangle
data_agg = self.get_axes(data_agg, index, columns,
origin_date, development_date)
data_agg = pd.pivot_table(data_agg, index=index+['origin'],
columns=col, values=columns,
aggfunc='sum')
# Assign object properties
self.kdims = np.array(data_agg.index.droplevel(-1).unique())
self.odims = np.array(data_agg.index.levels[-1].unique())
if development:
self.ddims = np.array(data_agg.columns.levels[-1].unique())
self.ddims = self.ddims*({'Y': 12, 'Q': 3, 'M': 1}
[self.development_grain])
self.vdims = np.array(data_agg.columns.levels[0].unique())
else:
self.ddims = np.array([None])
self.vdims = np.array(data_agg.columns.unique())
self.ddims = self.ddims
self.valuation_date = development_date.max()
self.key_labels = index
self.iloc = _Ilocation(self)
self.loc = _Location(self)
# Create 4D Triangle
triangle = \
np.reshape(np.array(data_agg), (len(self.kdims), len(self.odims),
len(self.vdims), len(self.ddims)))
triangle = np.swapaxes(triangle, 1, 2)
# Set all 0s to NAN for nansafe ufunc arithmetic
triangle[triangle == 0] = np.nan
self.triangle = triangle
# Used to show NANs in lower part of triangle
self.nan_override = False
self.valuation = self._valuation_triangle()
# ---------------------------------------------------------------- #
# ----------------------- Class Properties ----------------------- #
# ---------------------------------------------------------------- #
def _len_check(self, x, y):
if len(x) != len(y):
raise ValueError(f'Length mismatch: Expected axis has ',
f'{len(x)} elements, new values have',
f' {len(y)} elements')
@property
def shape(self):
return self.triangle.shape
@property
def index(self):
return pd.DataFrame(list(self.kdims), columns=self.key_labels)
@property
def columns(self):
return self.idx_table().columns
@columns.setter
def columns(self, value):
self._len_check(self.columns, value)
self.vdims = [value] if type(value) is str else value
@property
def origin(self):
return pd.DatetimeIndex(self.odims, name='origin')
@origin.setter
def origin(self, value):
self._len_check(self.origin, value)
self.odims = [value] if type(value) is str else value
@property
def development(self):
return pd.Series(list(self.ddims), name='development').to_frame()
@development.setter
def development(self, value):
self._len_check(self.development, value)
self.ddims = [value] if type(value) is str else value
@property
def latest_diagonal(self):
return self.get_latest_diagonal()
@property
# @check_triangle_postcondition
def link_ratio(self):
obj = copy.deepcopy(self)
temp = obj.triangle.copy()
temp[temp == 0] = np.nan
val_array = obj.valuation.values.reshape(obj.shape[-2:],order='f')[:, 1:]
obj.triangle = temp[..., 1:]/temp[..., :-1]
obj.ddims = np.array([f'{obj.ddims[i]}-{obj.ddims[i+1]}'
for i in range(len(obj.ddims)-1)])
# Check whether we want to eliminate the last origin period
if np.max(np.sum(~np.isnan(self.triangle[..., -1, :]), 2)-1) == 0:
obj.triangle = obj.triangle[..., :-1, :]
obj.odims = obj.odims[:-1]
val_array = val_array[:-1, :]
obj.valuation = pd.DatetimeIndex(pd.DataFrame(val_array).unstack().values)
return obj
@property
def age_to_age(self):
return self.link_ratio
# ---------------------------------------------------------------- #
# ---------------------- End User Methods ------------------------ #
# ---------------------------------------------------------------- #
# @check_triangle_postcondition
def get_latest_diagonal(self, compress=True):
''' Method to return the latest diagonal of the triangle. Requires
self.nan_overide == False.
'''
obj = copy.deepcopy(self)
diagonal = obj[obj.valuation == obj.valuation_date].triangle
if compress:
diagonal = np.expand_dims(np.nansum(diagonal, 3), 3)
obj.ddims = ['Latest']
obj.valuation = pd.DatetimeIndex(
[pd.to_datetime(obj.valuation_date)]*len(obj.odims))
obj.triangle = diagonal
return obj
# @check_triangle_postcondition
def incr_to_cum(self, inplace=False):
"""Method to convert an incremental triangle into a cumulative triangle.
Parameters
----------
inplace: bool
Set to True will update the instance data attribute inplace
Returns
-------
Updated instance of triangle accumulated along the origin
"""
if inplace:
np.cumsum(np.nan_to_num(self.triangle), axis=3, out=self.triangle)
self.triangle = self.expand_dims(self.nan_triangle())*self.triangle
self.triangle[self.triangle == 0] = np.nan
return self
else:
new_obj = copy.deepcopy(self)
return new_obj.incr_to_cum(inplace=True)
# @check_triangle_postcondition
def cum_to_incr(self, inplace=False):
"""Method to convert an cumlative triangle into a incremental triangle.
Parameters
----------
inplace: bool
Set to True will update the instance data attribute inplace
Returns
-------
Updated instance of triangle accumulated along the origin
"""
if inplace:
temp = np.nan_to_num(self.triangle)[..., 1:] - \
np.nan_to_num(self.triangle)[..., :-1]
temp = np.concatenate((self.triangle[..., 0:1], temp), axis=3)
temp = temp*self.expand_dims(self.nan_triangle())
temp[temp == 0] = np.nan
self.triangle = temp
return self
else:
new_obj = copy.deepcopy(self)
return new_obj.cum_to_incr(inplace=True)
# @check_triangle_postcondition
def grain(self, grain='', incremental=False, inplace=False):
"""Changes the grain of a cumulative triangle.
Parameters
----------
grain : str
The grain to which you want your triangle converted, specified as
'O<x>D<y>' where <x> and <y> can take on values of ``['Y', 'Q', 'M']``
For example, 'OYDY' for Origin Year/Development Year, 'OQDM' for
Origin quarter, etc.
incremental : bool
Not implemented yet
inplace : bool
Whether to mutate the existing Triangle instance or return a new
one.
Returns
-------
Triangle
"""
if inplace:
origin_grain = grain[1:2]
development_grain = grain[-1]
new_tri, o = self._set_ograin(grain=grain, incremental=incremental)
# Set development Grain
dev_grain_dict = {'M': {'Y': 12, 'Q': 3, 'M': 1},
'Q': {'Y': 4, 'Q': 1},
'Y': {'Y': 1}}
if self.shape[3] != 1:
keeps = dev_grain_dict[self.development_grain][development_grain]
keeps = np.where(np.arange(new_tri.shape[3]) % keeps == 0)[0]
keeps = -(keeps + 1)[::-1]
new_tri = new_tri[..., keeps]
self.ddims = self.ddims[keeps]
self.odims =
|
np.unique(o)
|
numpy.unique
|
# Copyright 2016-2018 Iowa State University Research Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from delamo.api import DelamoModeler
from delamo.api import Layer
from delamo.api import bond_layers
from delamo.api import SimpleCoordSys
from delamo.api import solid_solid_coupling
from delamo import process
from delamo.layer import LayerMold
import os
# Front matter
# ------------
# Initialize the DeLaMo model
DM = DelamoModeler.Initialize(globals(),
pointtolerancefactor=100.0,
normaltolerance=100e-4,
GapWidth=0.0)
# This script then generates both a CAD file and a Python script.
# The Python script can be run from Abaqus. It includes the
# initialization script referenced above, and also opens
# the CAD file and builds the model.
# The name of the script file to generate and
# the name of the CAD file to write are returned
# by process.output_filenames()
# The first parameter to output_filenames
# should always match the name of the original script
# with the ".py" stripped
# In manually generated scripts, always specify phase
# to be "ORIGINAL"
(script_to_generate,
cad_file_path_from_script,
layer_boundary_template) = process.output_filenames("07_SolidSolidCoupling", process="DEFECT_INSERTION",
phase="ORIGINAL",
apply_damage_script="07_SolidSolidCoupling_add_damage.py", )
# When writing a DeLaMo script, you start by creating a
# finite element initialization script. This is a
# Python script for ABAQUS that defines your various parameters
# -- material properties, etc. as Python variables.
# In this case they are stored in the "abqparams_CFRP.py" file
DM.abaqus_init_script("abqparams_CFRP.py", globals())
# The above call automatically inserts wrapped copies of variables
# defined in those scripts into the global variable space. Then you
# can reference those variables in this script
# (you can have as many init scripts as you like)
# The Delamo model contains generates several sets of instructions
# for different phases of the finite element modeling process:
# DM.initinstrs (initialization)
# DM.assemblyinstrs (model assembly)
# DM.bcinstrs (boundary conditions)
# DM.meshinstrs (meshing)
# All methods called from those variables will go generally be executed
# in the assemblyinstrs pool unless otherwise overridden. You can
# use e.g. DM.meshinstrs.rewrapobj() to get a reference to
# one of these variables that will execute in an alternate context.
#
# For example,
LaminateAssemblyMeshing = DM.meshinstrs.rewrapobj(LaminateAssembly)
# Creates a reference to the LaminateAssembly, for which method calls
# execute in the meshing context
# Basic parameters
# Set layer thickness for lamina
# *** MUST BE KEPT IN SYNC WITH 07_SolidSolidCoupling_add_damage.py ***
numLayers = 8 # If updated must also update loop limit, below
thickness1 = 2.194565 / numLayers
thickness2 = (4.57197 - 2.194565)/ numLayers
(OrigMold, SolidSolidCoupling) = solid_solid_coupling.from_solid_and_tool(DM,
os.path.join("..", "data", "NASAShellOverwrap.STEP"),
os.path.join("..", "data",
"CuttingTool2.STEP"),
OrigDirPoint=np.array((-200.0, 50.0, 0.0)),
OrigDirNormal=np.array((0.0, 0.0, 1.0)))
MoldEdgePoints = OrigMold.GetPointsOnOuterEdges()
# Define a coordinate system
# This example defines +x direction along 0 deg. fibers,
# +y direction across 0 deg fibers, equivalent to
# the default (when coordsys is not specified)
coordsys = SimpleCoordSys((1.0, 0.0, 0.0), (0.0, 1.0, 0.0))
layup = [0, 45, -45, 90, 90, -45, 45, 0, 0, 45, -45, 90, 90, -45, 45, 0, 0, 45, -45, 90, 90, -45, 45, 0, 0, 45, -45, 90, 90, -45, 45, 0 ] # assumed layup
# define properties for solid
solidmeshsize=15.0 # mm
# Define ABAQUS section for surrounding solid model.. Should really make principal axes function of position for stiffener portion...
## Unfortunately ABAQUS can't use a solid section for a complicated geometry
## that needs to be Tet meshed. But if it weren't so complicated and it
## could be meshed, here's how you would do it:
#sectionlayers = [ section.SectionLayer(material='CFRPLaminaMat', thickness=0.125, orientAngle = layupangle, numIntPts=1,plyName='') for layupangle in layup ]
#SolidSection=FEModel.CompositeSolidSection(name='SolidSection',layupName='SolidSectionLayup',symmetric=False, layup = sectionlayers )
# Instead we model the solid as just a general solid with a single orientation
# For the moment, model the solid as uniaxial... Should probably build a hybrid stiffness model based on laminate theory
SolidSection=FEModel.HomogeneousSolidSection(name='LaminaSection',material=CFRPLaminaMat.name,thickness=None)
SolidSolidCoupling.solidpart.MeshSimple(MeshElemTypes, solidmeshsize, ElemShape=abqC.TET, ElemTechnique=abqC.FREE,refined_edges = MoldEdgePoints,pointtolerance=DM.abqpointtolerance,refinedmeshsize=meshsize)
SolidSolidCoupling.solidpart.AssignSection(SolidSection)
SolidSolidCoupling.solidpart.ApplyLayup(coordsys,0.0) # orientation of 0 means that 0 degrees as defined in the SolidSection layers lines up with the first axis (fiber direction) of the coordsys.
# Create and add point marker for fixed faced boundary condition
# This point identifies it
FixedPoint = [0, +107.95, 1.]
ForcePoint = [0, -107.95, 1.]
Mold = OrigMold
previouslayer = None
layers = []
# Create the flat region
for layernum in range(16): # Iteration limit should match numLayers*2 but must be a constant so that loop unwrapping can work
# Set the thickness for the 2 zones
if (layernum < numLayers):
thickness = thickness1
pass
if layernum >= numLayers: # Avoid using else clause because that triggers a redbaron bug
thickness = thickness2
pass
layer = Layer.CreateFromMold(DM, Mold, "OFFSET", thickness, "Layer_%d" % (layernum + 1), LaminaSection,
layup[layernum], coordsys=coordsys)
# If it is the 9th layer, then cut the layer
if (layernum == numLayers):
layer.Split(os.path.join("..", "data", "SplitLineNASA.csv"), DM.abqpointtolerance)
layer.gk_layer.RemoveLayerBodyByPointInFace(
|
np.array((-200.0, 60.0, 2.19456))
|
numpy.array
|
from construct import PaddedString, Struct, Int32un
import numpy as np
from oct_converter.image_types import OCTVolumeWithMetaData, FundusImageWithMetaData
from pylibjpeg import decode
from pathlib import Path
class FDA(object):
""" Class for extracting data from Topcon's .fda file format.
Notes:
Mostly based on description of .fda file format here:
https://bitbucket.org/uocte/uocte/wiki/Topcon%20File%20Format
Attributes:
filepath (str): Path to .img file for reading.
header (obj:Struct): Defines structure of volume's header.
oct_header (obj:Struct): Defines structure of OCT header.
fundus_header (obj:Struct): Defines structure of fundus header.
chunk_dict (dict): Name of data chunks present in the file, and their start locations.
"""
def __init__(self, filepath):
self.filepath = Path(filepath)
if not self.filepath.exists():
raise FileNotFoundError(self.filepath)
self.header = Struct(
'FOCT' / PaddedString(4, 'ascii'),
'FDA' / PaddedString(3, 'ascii'),
'version_info_1' / Int32un,
'version_info_2' / Int32un
)
self.oct_header = Struct(
'type' / PaddedString(1, 'ascii'),
'unknown1' / Int32un,
'unknown2' / Int32un,
'width' / Int32un,
'height' / Int32un,
'number_slices' / Int32un,
'unknown3' / Int32un,
)
self.oct_header_2 = Struct(
'unknown' / PaddedString(1, 'ascii'),
'width' / Int32un,
'height' / Int32un,
'bits_per_pixel' / Int32un,
'number_slices' / Int32un,
'unknown' / PaddedString(1, 'ascii'),
'size' / Int32un,
)
self.fundus_header = Struct(
'width' / Int32un,
'height' / Int32un,
'bits_per_pixel' / Int32un,
'number_slices' / Int32un,
'unknown' / PaddedString(4, 'ascii'),
'size' / Int32un,
# 'img' / Int8un,
)
self.chunk_dict = self.get_list_of_file_chunks()
def get_list_of_file_chunks(self):
"""Find all data chunks present in the file.
Returns:
dict
"""
chunk_dict = {}
with open(self.filepath, 'rb') as f:
# skip header
raw = f.read(15)
header = self.header.parse(raw)
eof = False
while not eof:
chunk_name_size = np.fromstring(f.read(1), dtype=np.uint8)[0]
if chunk_name_size == 0:
eof = True
else:
chunk_name = f.read(chunk_name_size)
chunk_size = np.fromstring(f.read(4), dtype=np.uint32)[0]
chunk_location = f.tell()
f.seek(chunk_size, 1)
chunk_dict[chunk_name] = [chunk_location, chunk_size]
print('File {} contains the following chunks:'.format(self.filepath))
for key in chunk_dict.keys():
print(key)
return chunk_dict
def read_oct_volume(self):
""" Reads OCT data.
Returns:
obj:OCTVolumeWithMetaData
"""
if b'@IMG_JPEG' not in self.chunk_dict:
raise ValueError('Could not find OCT header @IMG_JPEG in chunk list')
with open(self.filepath, 'rb') as f:
chunk_location, chunk_size = self.chunk_dict[b'@IMG_JPEG']
f.seek(chunk_location) # Set the chunk’s current position.
raw = f.read(25)
oct_header = self.oct_header.parse(raw)
volume = np.zeros((oct_header.height, oct_header.width, oct_header.number_slices))
for i in range(oct_header.number_slices):
size = np.fromstring(f.read(4), dtype=np.int32)[0]
raw_slice= f.read(size)
slice = decode(raw_slice)
volume[:,:,i] = slice
oct_volume = OCTVolumeWithMetaData([volume[:, :, i] for i in range(volume.shape[2])])
return oct_volume
def read_oct_volume_2(self):
""" Reads OCT data.
Returns:
obj:OCTVolumeWithMetaData
"""
if b'@IMG_MOT_COMP_03' not in self.chunk_dict:
raise ValueError('Could not find OCT header @IMG_MOT_COMP_03 in chunk list')
with open(self.filepath, 'rb') as f:
chunk_location, chunk_size = self.chunk_dict[b'@IMG_MOT_COMP_03']
f.seek(chunk_location) # Set the chunk’s current position.
raw = f.read(22)
oct_header = self.oct_header_2.parse(raw)
number_pixels = oct_header.width * oct_header.height * oct_header.number_slices
raw_volume = np.fromstring(f.read(number_pixels * 2), dtype=np.uint16)
volume =
|
np.array(raw_volume)
|
numpy.array
|
# Online KDE - Recursive max likelihood for parameters estimation (h_t)
# Functions to be imported into the main script
# 2/2/2021
# Author : <NAME>
# Libraries
import numpy as np
from scipy.stats import uniform, norm
import matplotlib.pyplot as plt
import pickle
from sklearn.metrics import mean_squared_error
import seaborn as sns
import datetime
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
# defining plot style
def plot_style_LaTex():
plt.style.use('seaborn-ticks')
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams.update({'font.size': 20})
# plt.rcParams.update({'font.size': 12})
plt.rcParams["axes.grid"] = False
plt.rcParams["legend.loc"] = 'best'
return
# Initial uniform distribution to start the forecast algorithm based on the dataset values
def initial_distribution(ini, fmax, samples):
# create uniform distribution associated to that vector
y = np.linspace(ini, fmax, samples)
pdf_uni = uniform.pdf(y, loc=ini, scale=fmax)
return y, pdf_uni
# function to obtain a small dataset - for testing purposes
def smaller_df(df, until_month):
return df[df.index.month <= until_month]
# function to calculate the histogram based on Fredis-Dicomanis approach and keeping the function in memory
def hist_calculation(df):
hist, bins =
|
np.histogram(df['flex_load_kWh'].values, bins='fd', density=True)
|
numpy.histogram
|
#! /usr/bin/env python
"""
Generate eyediagram and save it to eye.eps
"""
import argparse
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pylab as plt
from matplotlib.colors import Normalize
def resample(data, time):
xs = data[:,0]
ys = data[:,1]
f = interp1d(xs,ys)
y = f(time)
return y
def rollover(sample, time, period, time_offset):
x = []
y = []
for idx,t in enumerate(time):
x.append((t + time_offset) % period)
y.append(sample[idx])
return y,x
def run(filename, imgname, period, time_offset, nbin):
data = np.loadtxt(filename)
x = data[:,0]
t_start = x[0]
t_stop = x[-1]
t_step = period/nbin
time =
|
np.arange(t_start, t_stop, t_step)
|
numpy.arange
|
import numpy as np
from ... import utils
from ..base import BaseAlg
class ModelS(BaseAlg):
"""
The receiver learns a model of the sender and uses it to predict the state given the
message
"""
def __init__(
self,
n_state,
n_mess,
n_act,
n_runs,
alpha0=0.1,
eps0=0.1,
eps0_decay=0,
mode=0,
act_variant="exact",
train_variant="exact",
sample_variant="sample",
**kwargs
):
"""
Args:
n_state (int): Number of states
n_mess (int): Number of messages
n_act (int): Number of actions
n_runs (int): Number of runs
alpha0 (float): Step size
eps0 (float): Initial exploration rate
eps0_decay (float): Decay of exploration rate per step
mode (int): 0 for communication, 1 for fixed messages, 2 for fixed actions
act_variant (str):
'exact' for exact model of q0, common seed;
'q0_imperfect' for imperfect model of q0, common seed;
'seed_imperfect' for exact model of q0, different seed;
'imperfect' for imperfect model of q0, different seed (default: 'exact')
train_variant (str):
'exact' for using true state to train q
'imperfect' for using sampled state to train q (default: 'exact')
sample_variant (str):
'sample' for sampling the state
'expectation' for taking expected Q values of all possible states
(default: 'sample')
"""
self.n_state = n_state
self.n_mess = n_mess
self.n_act = n_act
self.n_runs = n_runs
self.alpha = alpha0
self.eps = eps0
self.eps_decay = eps0_decay
self.mode = mode
self.act_variant = act_variant
self.train_variant = train_variant
self.sample_variant = sample_variant
self.q0 = np.zeros((n_runs, n_state, n_mess))
self.q1 = np.zeros((n_runs, n_mess, n_act))
self.q = np.zeros((n_runs, n_state, n_act))
self.q0_expected = np.zeros((n_runs, n_state, n_mess))
self.s0 = None
self.s1 = None
self.m0 = None
self.a1 = None
self.s0_sampled = None
self.p_s0_possible = None
def act(self, state, test=False):
"""
Args:
state (np.ndarray): Current state (size=n_runs)
test (bool): True if testing (no exploration)
Returns:
Current message, action (size=n_runs)
"""
if test: # Temporarily set eps to 0 to stop exploration
true_eps = self.eps
self.eps = 0
self.s0 = state
if self.mode == 1:
a0_expected = np.zeros((self.n_runs, self.n_state), dtype=np.int)
a0_expected[:] = np.arange(self.n_state)
self.m0 = self.s0
else:
a0_expected = np.zeros((self.n_runs, self.n_state), dtype=np.int)
exp_mask = np.random.random(self.n_runs) < self.eps
a0_expected[exp_mask] = np.random.randint(
self.n_mess, size=(self.n_runs, self.n_state)
)[exp_mask]
not_exp_mask = np.logical_not(exp_mask)
if self.act_variant == "exact":
a0_expected[not_exp_mask] = utils.rand_argmax(self.q0, axis=-1)[
not_exp_mask
]
self.m0 = a0_expected[np.arange(self.n_runs), self.s0]
elif self.act_variant == "q0_imperfect":
a0_expected[not_exp_mask] = utils.rand_argmax(self.q0_expected, axis=-1)[
not_exp_mask
]
self.m0 = np.zeros(self.n_runs, dtype=np.int)
self.m0[exp_mask] = a0_expected[np.arange(self.n_runs), self.s0][exp_mask]
q = self.q0[np.arange(self.n_runs), state]
self.m0[not_exp_mask] = utils.rand_argmax(q)[not_exp_mask]
elif self.act_variant == "seed_imperfect":
a0_expected[not_exp_mask] = utils.rand_argmax(self.q0, axis=-1)[
not_exp_mask
]
self.m0 = np.zeros(self.n_runs, dtype=np.int)
exp_mask = np.random.random(self.n_runs) < self.eps
self.m0[exp_mask] =
|
np.random.randint(self.n_mess, size=self.n_runs)
|
numpy.random.randint
|
"""Genetic evaluation of individuals."""
import os
import sys
# import time
from collections import Counter
from itertools import compress
from numba import njit
import pkg_resources
import numpy as np
import pandas as pd
import scipy.linalg
import scipy.stats
def example_data():
"""Provide data to the package."""
cwd = os.getcwd()
stream = pkg_resources.resource_stream(__name__, 'data/chr.txt')
chrmosomedata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/group.txt')
groupdata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/effects.txt')
markereffdata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/phase.txt')
genodata = pd.read_table(stream, header=None, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/ped.txt')
ped = pd.read_table(stream, header=None, sep=" ")
os.chdir(cwd)
return chrmosomedata, markereffdata, genodata, groupdata, ped
if __name__ == "__main__":
example_data()
@njit
def fnrep2(gen, aaxx, aaxx1):
"""Code phased genotypes into 1, 2, 3 and 4."""
qqq = np.empty((int(gen.shape[0]/2), gen.shape[1]), np.int_)
for i in range(qqq.shape[0]):
for j in range(qqq.shape[1]):
if gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx:
qqq[i, j] = 1
elif gen[2*i, j] == aaxx1 and gen[2*i+1, j] == aaxx1:
qqq[i, j] = 2
elif gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx1:
qqq[i, j] = 3
else:
qqq[i, j] = 4
return qqq
def haptogen(gen, progress=False):
"""Convert haplotypes to coded genotypes."""
if progress:
print("Converting phased haplotypes to genotypes")
if gen.shape[1] == 2:
gen = np.array(gen.iloc[:, 1]) # del col containing ID
# convert string to 2D array of integers
gen = [list(gen[i].rstrip()) for i in range(gen.shape[0])]
gen = np.array(gen, int)
# derives the frequency of alleles to determine the major allele
allele = np.asarray(np.unique(gen, return_counts=True)).T.astype(int)
if len(allele[:, 0]) != 2:
sys.exit("method only supports biallelic markers")
aaxx = allele[:, 0][np.argmax(allele[:, 1])] # major allele
aaasns = np.isin(allele[:, 0], aaxx, invert=True)
aaxx1 = int(allele[:, 0][aaasns]) # minor allele
gen = np.array(gen, int)
gen = fnrep2(gen, aaxx, aaxx1)
elif gen.shape[1] > 2:
gen = gen.iloc[:, 1:gen.shape[1]] # del col containing ID
# derives the frequency of alleles to determine the major allele
allele = np.asarray(np.unique(gen, return_counts=True)).T.astype(int)
if len(allele[:, 0]) != 2:
sys.exit("method only supports biallelic markers")
aaxx = allele[:, 0][np.argmax(allele[:, 1])] # major allele
aaasns = np.isin(allele[:, 0], aaxx, invert=True)
aaxx1 = int(allele[:, 0][aaasns]) # minor allele
gen = np.array(gen, int)
gen = fnrep2(gen, aaxx, aaxx1)
return gen
class Datacheck:
"""Check the input data for errors and store relevant info as an object."""
def __init__(self, gmap, meff, gmat, group, indwt, progress=False):
"""
Check input data for errors and store relevant info as class object.
Parameters
----------
gmap : pandas.DataFrame
Index: RangeIndex
Columns:
Name: CHR, dtype: int64; chromosome number
Name: SNPName, dtype: object; marker name
Name: Position: dtype: int64; marker position in bp
Name: group: dtype: float64; marker distance (cM) or reco rates
meff : pandas.DataFrame
Index: RangeIndex
Columns:
Name: trait names: float64; no. of columns = no of traits
gmat : pandas.DataFrame
Index: RangeIndex
Columns:
Name: ID, dtype: int64 or str; identification of individuals
Name: haplotypes, dtype: object; must be biallelic
group : pandas.DataFrame
Index: RangeIndex
Columns:
Name: group, dtype: object; group code of individuals, e.g., M, F
Name: ID, dtype: int64 or str; identification of individuals
indwt : list of index weights for each trait
progress : bool, optional; print progress of the function if True
Returns stored input files
-------
"""
# check: ensures number of traits match size of index weights
indwt = np.array(indwt)
if (meff.shape[1]-1) != indwt.size:
sys.exit('no. of index weights do not match marker effects cols')
# check: ensure individuals' genotypes match group and ID info
id_indgrp = pd.Series(group.iloc[:, 1]).astype(str) # no of inds
if not pd.Series(
pd.unique(gmat.iloc[:, 0])).astype(str).equals(id_indgrp):
sys.exit("ID of individuals in group & genotypic data don't match")
# check: ensure marker names in marker map and effects match
if not (gmap.iloc[:, 1].astype(str)).equals(meff.iloc[:, 0].astype(str)):
print("Discrepancies between marker names")
sys.exit("Check genetic map and marker effects")
# check: ensure marker or allele sub effect are all numeric
meff = meff.iloc[:, 1:meff.shape[1]]
test = meff.apply(
lambda s: pd.to_numeric(s, errors='coerce').notnull().all())
if not test.all():
sys.exit("Marker or allele sub effects contain non-numeric values")
# check: ensure unique maps match no of groups if map more than 1
grpg = pd.unique(group.iloc[:, 0]) # groups of individuals
grp_chrom = gmap.shape[1]-3 # no of unique maps
gmat = haptogen(gmat, progress)
if grp_chrom > 1 and grp_chrom != grpg.size:
sys.exit("no. of unique maps does not match no. of groups")
# check no of markers in genotype and map and marker effects match
no_markers = gmap.shape[0] # no of markers
if no_markers != gmat.shape[1] or no_markers != meff.shape[0]:
sys.exit("markers nos in gen, chrm or marker effects don't match")
# check: ordered marker distance or recombination rates
for grn in range(grp_chrom):
for chrm in pd.unique(gmap.iloc[:, 0]):
mpx = np.array(gmap.iloc[:, 3+grn][gmap.iloc[:, 0] == chrm])
if not (mpx == np.sort(sorted(mpx))).any():
sys.exit(
f"Faulty marker map on chr {chrm} for grp {grpg[grn]}")
if progress:
print('Data passed the test!')
print("Number of individuals: ", len(id_indgrp))
print("Number of groups: ", len(grpg), ": ", grpg)
print("Number of specific maps:", grp_chrom)
print("Number of chromosomes: ", len(pd.unique(gmap.iloc[:, 0])))
print("Total no. markers: ", no_markers)
print("Number of trait(s): ", meff.columns.size)
print("Trait name(s) and Index weight(s)")
if meff.columns.size == 1:
print(meff.columns[0], ": ", indwt[0])
elif meff.columns.size > 1:
for i in range(meff.columns.size):
print(meff.columns[i], ": ", indwt[i])
self.gmap = gmap
self.meff = meff
self.gmat = gmat
self.group = group
self.indwt = indwt
def elem_cor(mylist, mprc, ngp, mposunit, method, chrm):
"""Derive pop cov matrix."""
if method == 1: # Bonk et al's approach
if mposunit in ("cM", "cm", "CM", "Cm"):
tmp = np.exp(-2*(np.abs(mprc - mprc[:, None])/100))/4
elif mposunit in ("reco", "RECO"):
if mprc[0] != 0:
sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
aaa = (1-(2*mprc))/4
ida = np.arange(aaa.size)
tmp = aaa[np.abs(ida - ida[:, None])]
elif method == 2: # Santos et al's approach
if mposunit in ("cM", "cm", "CM", "Cm"):
tmp = (-1*(np.abs(mprc - mprc[:, None])/200))+0.25
cutoff = (-1*(50/200))+0.25
tmp = np.where(tmp < cutoff, 0, tmp)
elif mposunit in ("reco", "RECO"):
if mprc[0] != 0:
sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
aaa = (-1*(mprc/2))+0.25
ida = np.arange(aaa.size)
tmp = aaa[np.abs(ida - ida[:, None])]
cutoff = (-1*(0.5/2))+0.25
tmp = np.where(tmp < cutoff, 0, tmp)
# append chromosome-specific covariance matrix to list
mylist[int(ngp)].append(tmp)
return mylist
def popcovmat(info, mposunit, method):
"""
Derive population-specific covariance matrices.
Parameters
----------
info : class object
A class object created using the function "datacheck"
mposunit : string
A sting with containing "cM" or "reco".
method : int
An integer with a value of 1 for Bonk et al.'s approach or
2 for Santos et al's approach'
Returns
-------
mylist : list
A list containing group-specific pop covariance matrices for each chr.
"""
if mposunit not in ("cM", "cm", "CM", "Cm", "reco", "RECO"):
sys.exit("marker unit should be either cM or reco")
# unique group name for naming the list if map is more than 1
probn = pd.unique(info.group.iloc[:, 0].astype(str)).tolist()
chromos = pd.unique(info.gmap.iloc[:, 0]) # chromosomes
no_grp = info.gmap.shape[1]-3 # no of maps
mylist = [] # list stores chromosome-wise covariance matrix
for ngp in range(no_grp):
mylist.append([])
# marker position in cM or recombination rates
grouprecodist = info.gmap.iloc[:, 3+ngp]
for chrm in chromos:
mpo = np.array(grouprecodist[info.gmap.iloc[:, 0] == (chrm)])
elem_cor(mylist, mpo, ngp, mposunit, method, chrm)
if no_grp > 1:
# if map is more than one, name list using group names
mylist = dict(zip(probn, mylist))
return mylist
@njit
def makemems(gmat, meff):
"""Set up family-specific marker effects (Mendelian sampling)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 4:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 3:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq
@njit
def makemebv(gmat, meff):
"""Set up family-specific marker effects (GEBV)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 2:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 1:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq
def traitspecmatrices(gmat, meff):
"""Store trait-specific matrices in a list."""
notr = meff.shape[1] # number of traits
slist = [] # list stores trait-specific matrices
slist.append([])
for i in range(notr):
# specify data type for numba
mefff = np.array(meff.iloc[:, i], float)
matrix_ms = makemems(gmat, mefff)
slist[0].append(matrix_ms)
return slist
def namesdf(notr, trait_names):
"""Create names of dataframe columns for Mendelian co(var)."""
tnn = np.zeros((notr, notr), 'U20')
tnn = np.chararray(tnn.shape, itemsize=30)
for i in range(notr):
for trt in range(notr):
if i == trt:
tnn[i, trt] = str(trait_names[i])
elif i != trt:
tnn[i, trt] = "{}_{}".format(trait_names[i], trait_names[trt])
colnam = tnn[np.tril_indices(notr)]
return colnam
def mrmmult(temp, covmat):
"""Matrix multiplication (MRM' or m'Rm)."""
return temp @ covmat @ temp.T
def dgmrm(temp, covmat):
"""Matrix multiplication (MRM') for bigger matrices."""
temp1111 = scipy.linalg.blas.dgemm(alpha=1.0, a=temp, b=covmat)
return scipy.linalg.blas.dgemm(alpha=1.0, a=temp1111, b=temp.T)
def progr(itern, total):
"""Print progress of a task."""
fill, printend, prefix, suffix = '█', "\r", 'Progress:', 'Complete'
deci, length = 0, 50
percent = ("{0:." + str(deci) + "f}").format(100 * (itern / float(total)))
filledlen = int(length * itern // total)
bars = fill * filledlen + '-' * (length - filledlen)
print(f'\r{prefix} |{bars}| {percent}% {suffix}', end=printend)
if itern == total:
print()
def subindcheck(info, sub_id):
"""Check if inds provided in pd.DataFrame (sub_id) are in group data."""
sub_id = pd.DataFrame(sub_id).reset_index(drop=True)
if sub_id.shape[1] != 1:
sys.exit("Individuals' IDs (sub_id) should be provided in one column")
numbers = info.group.iloc[:, 1].astype(str).tolist()
sub_id = sub_id.squeeze().astype(str).tolist()
aaa = [numbers.index(x) if x in numbers else None for x in sub_id]
aaa = np.array(aaa)
if len(aaa) != len(sub_id):
sys.exit("Some individual ID could not be found in group data")
return aaa
def msvarcov_g_st(info, covmat, sub_id, progress=False):
"""Derive Mendelian sampling co(variance) for single trait."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
if (info.gmap.shape[1]-3 == 1 and len(pd.unique(groupsex)) > 1):
print("The same map will be used for all groups")
if progress:
progr(0, matsub.shape[0]) # print progress bar
snpindexxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
notr = info.meff.columns.size
slist = traitspecmatrices(matsub, info.meff)
# dataframe to save Mendelian sampling (co)variance and aggregate breeding
msvmsc = np.empty((matsub.shape[0], 1))
for i in range(matsub.shape[0]): # loop over no of individuals
mscov = np.zeros((notr, notr)) # Mendelian co(var) mat for ind i
for chrm in pd.unique(info.gmap.iloc[:, 0]):
# snp index for chromosome chrm
s_ind = np.array(snpindexxx[info.gmap.iloc[:, 0] == (chrm)])
# family-specific marker effects for ind i
temp = np.zeros((notr, len(s_ind)))
for trt in range(notr):
temp[trt, :] = slist[0][trt][i, s_ind]
if info.gmap.shape[1]-3 == 1:
mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
else:
mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
msvmsc[i, 0] = mscov
if progress:
progr(i + 1, matsub.shape[0]) # print progress bar
msvmsc = pd.DataFrame(msvmsc)
msvmsc.columns = info.meff.columns
msvmsc.insert(0, "ID", idn, True)
msvmsc.insert(1, "Group", groupsex, True) # insert group
return msvmsc
def msvarcov_g_mt(info, covmat, sub_id, progress=False):
"""Derive Mendelian sampling co(variance) for multiple traits."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
if (info.gmap.shape[1]-3 == 1 and len(pd.unique(groupsex)) > 1):
print("The same map will be used for all groups")
if progress:
progr(0, matsub.shape[0]) # print progress bar
snpindexxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
notr = info.meff.columns.size
slist = traitspecmatrices(matsub, info.meff)
# dataframe to save Mendelian sampling (co)variance and aggregate breeding
mad = len(np.zeros((notr+1, notr+1))[np.tril_indices(notr+1)])
msvmsc = np.empty((matsub.shape[0], mad))
for i in range(matsub.shape[0]): # loop over no of individuals
mscov = np.zeros((notr+1, notr+1)) # Mendelian co(var) mat for ind i
for chrm in pd.unique(info.gmap.iloc[:, 0]):
# snp index for chromosome chrm
s_ind = np.array(snpindexxx[info.gmap.iloc[:, 0] == (chrm)])
# family-specific marker effects for ind i
temp = np.zeros((notr+1, len(s_ind)))
for trt in range(notr):
temp[trt, :] = slist[0][trt][i, s_ind]
temp[notr, :] = np.matmul(info.indwt.T, temp[0:notr, :])
if info.gmap.shape[1]-3 == 1:
mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
else:
mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
msvmsc[i, :] = mscov[np.tril_indices(notr+1)]
if progress:
progr(i + 1, matsub.shape[0]) # print progress bar
msvmsc = pd.DataFrame(msvmsc)
tnames = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, tnames).decode('utf-8')
msvmsc.columns = colnam
msvmsc.insert(0, "ID", idn, True)
msvmsc.insert(1, "Group", groupsex, True) # insert group
return msvmsc
def msvarcov_g(info, covmat, sub_id, progress=False):
"""
Derive Mendelian sampling co(variance) and aggregate genotype.
Parameters
----------
info : class object
A class object created using the function "datacheck"
covmat : A list of pop cov matrices created using "popcovmat" function
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
progress : bool, optional; print progress of the function if True
Returns
-------
msvmsc : pandas.DataFrame
containing the Mendelian sampling (co)variance and aggregate genotype
Note: If sub_id is None, Mendelian (co-)variance will be estimated for
all individuals. Otherwise, Mendelian (co-)variance will be estimated for
the individuals in sub_id
"""
notr = info.meff.columns.size
if notr == 1:
msvmsc = msvarcov_g_st(info, covmat, sub_id, progress)
elif notr > 1:
msvmsc = msvarcov_g_mt(info, covmat, sub_id, progress)
return msvmsc
def array2sym(array):
"""Convert array to stdized symm mat, and back to array without diags."""
dfmsize = array.size
for notr in range(1, 10000):
if dfmsize == len(np.zeros((notr, notr))[np.tril_indices(notr)]):
break
iii, jjj = np.tril_indices(notr)
mat = np.empty((notr, notr), float)
mat[iii, jjj], mat[jjj, iii] = array, array
mat = np.array(mat)
mat1 = cov2corr(mat)
return np.array(mat1[np.tril_indices(notr, k=-1)])
def msvarcov_gcorr(msvmsc):
"""
Standardize Mendelian sampling co(variance) and aggregate genotype.
Parameters
----------
msvmsc : pandas.DataFrame
containing the Mendelian sampling (co)variance and aggregate genotype
created using msvarcov_g function
Returns
-------
dfcor : pandas.DataFrame
containing standardized Mendelian sampling (co)variance
"""
if msvmsc.columns.size == 3:
sys.exit("Correlation cannot be derived for a single trait")
dfm = msvmsc.iloc[:, 2:msvmsc.shape[1]] # exclude ID and group
dfmsize = dfm.shape[1]
# derive number of traits
for notr in range(1, 10000):
if dfmsize == len(np.zeros((notr, notr))[np.tril_indices(notr)]):
break
# standardize covariance between traits
dfcor = dfm.apply(array2sym, axis=1)
# extract column names
listnames = dfm.columns.tolist()
cnames = [x for x in listnames if "_" in x]
# convert pd.series of list to data frame
dfcor = pd.DataFrame.from_dict(dict(zip(dfcor.index, dfcor.values))).T
dfcor.columns = cnames
# insert ID and group info
dfcor = [pd.DataFrame(msvmsc.iloc[:, 0:2]), dfcor] # add ID and GRP
dfcor = pd.concat(dfcor, axis=1)
return dfcor
def calcgbv(info, sub_id):
"""Calculate breeding values for each trait."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
no_individuals = matsub.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size # number of traits
if notr == 1:
gbv = np.zeros((no_individuals, notr))
mefff = np.array(info.meff.iloc[:, 0], float) # type spec for numba
matrix_me = makemebv(matsub, mefff) # fam-spec marker effects BV
gbv[:, 0] = matrix_me.sum(axis=1) # sum all effects
gbv = pd.DataFrame(gbv)
gbv.columns = trait_names
elif notr > 1:
gbv = np.zeros((no_individuals, notr+1))
for i in range(notr):
mefff = np.array(info.meff.iloc[:, i], float) # type spec 4 numba
matrix_me = makemebv(matsub, mefff) # fam-spec marker effects BV
gbv[:, i] = matrix_me.sum(axis=1) # sum all effects for each trait
gbv[:, notr] = gbv[:, notr] + info.indwt[i]*gbv[:, i] # Agg gen
gbv = pd.DataFrame(gbv)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
gbv.columns = colnames
gbv.insert(0, "ID", idn, True) # insert ID
gbv.insert(1, "Group", groupsex, True) # insert group
return gbv
def calcprob(info, msvmsc, thresh):
"""Calculate the probability of breeding top individuals."""
aaa = subindcheck(info, pd.DataFrame(msvmsc.iloc[:, 0]))
gbvall = calcgbv(info, None) # calc GEBV for all inds used by thresh
gbv = gbvall.iloc[aaa, :].reset_index(drop=True) # GEBV matching msvmsc
no_individuals = gbv.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size # number of traits
if notr == 1:
probdf = np.zeros((no_individuals, notr))
ttt = np.quantile(gbvall.iloc[:, (0+2)], q=1-thresh) # threshold
probdf[:, 0] = 1 - scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (0+2)], scale=np.sqrt(msvmsc.iloc[:, 0+2]))
probdf = pd.DataFrame(probdf)
probdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
ttt = np.quantile(gbvall.iloc[:, (notr+2)], q=1-thresh) # threshold
probdf = np.zeros((no_individuals, notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
ttt = np.quantile(gbvall.iloc[:, (i+2)], q=1-thresh) # threshold
probdf[:, i] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (i+2)], scale=np.sqrt(
msvmsc.iloc[:, (t_ind[i])+2]))
probdf[:, i] = np.nan_to_num(probdf[:, i]) # convert Inf to zero
probdf[:, i] = 1 - probdf[:, i]
ttt = np.quantile(gbvall.iloc[:, (notr+2)], q=1-thresh)
probdf[:, notr] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (notr+2)], scale=np.sqrt(
msvmsc["AG"]))
probdf[:, notr] = np.nan_to_num(probdf[:, notr]) # Agg
probdf[:, notr] = 1 - probdf[:, notr]
probdf = pd.DataFrame(probdf) # convert matrix to dataframe
colnames = np.concatenate((trait_names, "ABV"), axis=None)
probdf.columns = colnames
probdf = [pd.DataFrame(gbv.iloc[:, 0:2]), probdf] # add ID and GRP
probdf = pd.concat(probdf, axis=1)
return probdf
def calcindex(info, msvmsc, const):
"""Calculate the index if constant is known."""
sub_id = pd.DataFrame(msvmsc.iloc[:, 0])
gbv = calcgbv(info, sub_id) # calc GEBV
no_individuals = gbv.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size
if notr == 1:
indexdf = np.zeros((no_individuals, notr))
indexdf[:, 0] = (gbv.iloc[:, (0+2)]/2) + np.sqrt(
msvmsc.iloc[:, 0+2])*const
indexdf = pd.DataFrame(indexdf)
indexdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
indexdf = np.zeros((no_individuals, notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
indexdf[:, i] = (gbv.iloc[:, (i+2)]/2) + np.sqrt(
msvmsc.iloc[:, (t_ind[i]+2)])*const
indexdf[:, notr] = (gbv.iloc[:, (notr+2)]/2) + np.sqrt(
msvmsc["AG"])*const
indexdf = pd.DataFrame(indexdf)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
indexdf.columns = colnames
indexdf = [pd.DataFrame(gbv.iloc[:, 0:2]), indexdf] # add ID and GRP
indexdf = pd.concat(indexdf, axis=1)
return indexdf
def selstrat_g(selstrat, info, sub_id, msvmsc, throrconst):
"""
Calc selection criteria (GEBV, PBTI, or index using gametic approach.
Parameters
----------
selstrat : str
A str containing any of GEBV, PBTI or index
info : class object
A class object created using the function "datacheck"
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
msvmsc : pandas.DataFrame
DF created using the function "msvarcov_g"
throrconst : float
If selstrat is PBTI, a throrconst of value 0.05 sets threshold at
top 5% of GEBV. If selstrat is index, throrconst is a constant.
If selstrat is GEBV, throrconst can be any random value.
Returns
-------
data : pandas.DataFrame
Index: RangeIndex
Columns:
ID, Group, trait names and Aggregate Breeding Value (ABV)
Note: If selstrat is GEBV, None may be used for throrconst and msvmsc.
If sub_id is None and selstrat is GEBV, GEBVs will be estimated for all
individuals. However, if selstrat is not GEBV, the chosen selection
criterion will be estimated for all individuals in msvmsc data frame.
"""
if selstrat in ("PBTI", "pbti", "index", "INDEX") and msvmsc is None:
sys.exit("Provide Mendelian (co-)variance dataframe: 'msvmsc'")
if selstrat in ("PBTI", "pbti", "index", "INDEX") and throrconst is None:
sys.exit("Provide value for throrconst parameter")
if selstrat not in ('GEBV', 'gebv', 'PBTI', 'pbti', 'index', 'INDEX'):
sys.exit("selection strategy should be one of GEBV, PBTI or INDEX")
if selstrat in ('GEBV', 'gebv'):
data = calcgbv(info, sub_id)
elif selstrat in ('PBTI', 'pbti'):
if throrconst > 1 or throrconst < 0:
sys.exit("value must be in the range of 0 and 1")
data = calcprob(info, msvmsc, throrconst)
elif selstrat in ('index', 'INDEX'):
data = calcindex(info, msvmsc, throrconst)
return data
def cov2corr(cov):
"""Convert covariance to correlation matrix."""
cov = np.asanyarray(cov)
std_ = np.sqrt(np.diag(cov))
with np.errstate(invalid='ignore'):
corr = cov / np.outer(std_, std_)
return corr
def aggen(us_ind, no_markers, slst, indwt):
"""Set up additive effects matrix of aggregate genotype."""
mmfinal = np.empty((len(us_ind), no_markers))
xxx = 0
for iii in us_ind:
tmpmt1 = np.array([slst[0][trt][iii, :] for trt in range(indwt.size)])
mmfinal[xxx, :] = np.matmul(indwt.transpose(), tmpmt1)
xxx = xxx + 1
return mmfinal
def chr_int(xxxxx):
"""Format chromomosome of interest parameter."""
if 'all' in xxxxx:
xxxxx = 'all'
elif 'none' in xxxxx:
xxxxx = 'none'
else:
xxxxx = np.array([int(i) for i in xxxxx])
return xxxxx
def writechr(covtmpx, chrinterest, chrm, trtnam, probx, stdsim):
"""Write matrices to file."""
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfile1 = "{}/Sim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx)
np.save(chrfile1, covtmpx)
elif chrm in chrinterest:
chrfile1 = "{}/Sim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfile1, covtmpx)
if stdsim:
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfilec = "{}/Stdsim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfilec, cov2corr(covtmpx))
elif chrm in chrinterest:
chrfilec = "{}/Stdsim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfilec, cov2corr(covtmpx))
def writechrunspec(covtmpx, chrinterest, chrm, trtnam, stdsim):
"""Write matrices to file."""
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfile1 = "{}/Sim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm)
np.save(chrfile1, covtmpx)
elif chrm in chrinterest:
chrfile1 = "{}/Sim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfile1, covtmpx)
if stdsim:
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfilec = "{}/Stdsim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfilec, cov2corr(covtmpx))
elif chrm in chrinterest:
chrfilec = "{}/Stdsim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfilec, cov2corr(covtmpx))
def grtonum(numnx):
"""Map chracters to numeric (0-no of groups)."""
numnx = numnx.reset_index(drop=True)
probn = pd.unique(numnx).tolist()
alt_no = np.arange(0, len(probn), 1)
noli = numnx.tolist()
numnx = np.array(list(map(dict(zip(probn, alt_no)).get, noli, noli)))
return numnx, probn
def datret(info, rw_nms, pfnp, us_ind, slist, covmat, cov_indxx, stdsim,
progress):
"""Return sim mat based on aggregate genotypes."""
snpindexxxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
if info.meff.shape[1] == 1 and not stdsim:
mat = cov_indxx
elif info.meff.shape[1] == 1 and stdsim:
mat = cov2corr(cov_indxx)
elif info.meff.shape[1] > 1:
if info.gmap.shape[1]-3 > 1:
rw_nms = pd.DataFrame(rw_nms)
rw_nms.to_csv(f"order of inds in mat grp {pfnp}.csv", index=False)
if progress:
print('Creating similarity matrix based on aggregate genotype')
progr(0, max(pd.unique(info.gmap.iloc[:, 0])))
tmpmt1 = aggen(us_ind, info.gmap.shape[0], slist, info.indwt)
# stores ABV covariance btw inds
mat = np.zeros((len(us_ind), len(us_ind)))
# loop over chromososomes
for chrm in pd.unique(info.gmap.iloc[:, 0]):
s_ind = np.array(snpindexxxx[info.gmap.iloc[:, 0] == (chrm)])
if info.gmap.shape[1]-3 == 1:
covtmpx = abs(dgmrm(tmpmt1[:, s_ind], covmat[0][chrm-1]))
else:
covtmpx = abs(dgmrm(tmpmt1[:, s_ind], covmat[pfnp][chrm-1]))
mat = mat + covtmpx
if progress:
progr(chrm, max(pd.unique(info.gmap.iloc[:, 0])))
if stdsim:
mat = cov2corr(mat)
return mat
def mrmcals(info, us_ind, stdsim, slist, covmat, probn, chrinterest, save,
progress):
"""Compute similarity matrix for each chromosome."""
if progress:
progr(0, info.meff.columns.size)
for i in range(info.meff.columns.size):
cov_indxx = np.zeros((len(us_ind), len(us_ind)))
for chrm in pd.unique(info.gmap.iloc[:, 0]):
s_ind = np.array(np.arange(0, info.gmap.shape[0], 1
)[info.gmap.iloc[:, 0] == (chrm)])
if info.gmap.shape[1]-3 == 1: # map is 1
covtmpx = abs(dgmrm(slist[0][i][:, s_ind], covmat[0][chrm-1]))
else: # if map is more than 1
covtmpx = abs(dgmrm(slist[0][i][us_ind[:, None], s_ind],
covmat[probn][chrm-1]))
cov_indxx = cov_indxx + covtmpx # sums up chrm-specific sims
if len(pd.unique(info.group.iloc[:, 0].astype(str))) == 1:
writechrunspec(covtmpx, chrinterest, chrm,
info.meff.columns[i], stdsim)
else:
writechr(covtmpx, chrinterest, chrm, info.meff.columns[i],
probn, stdsim) # write sim to file
if stdsim:
if save is True:
if info.gmap.shape[1]-3 == 1:
covxfile = "{}/Stdsim mat for {}.npy".format(
os.getcwd(), info.meff.columns[i])
else:
covxfile = "{}/Stdsim mat for {} grp {}.npy".format(
os.getcwd(), info.meff.columns[i], probn)
np.save(covxfile, cov2corr(cov_indxx)) # write std sim mats
else:
if save is True:
if info.gmap.shape[1]-3 == 1:
covxfile = "{}/Sim mat for {}.npy".format(
os.getcwd(), info.meff.columns[i])
else:
covxfile = "{}/Sim mat for {} grp {}.npy".format(
os.getcwd(), info.meff.columns[i], probn)
np.save(covxfile, cov_indxx) # write sim matrices
if progress:
progr(i + 1, info.meff.columns.size)
return cov_indxx
def simmat_g(info, covmat, sub_id, chrinterest, save=False, stdsim=False,
progress=False):
"""
Compute similarity matrices using gametic approach.
Parameters
----------
info : class object
A class object created using the function "datacheck"
covmat : A list of pop cov matrices created using "popcovmat" function
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
chrinterest : str or list of int
list of chromosome numbers of interest or str with "all" or "none"
save : bool, optional; write trait-specific sim mats to file if true
stdsim : bool, optional; print write std sim mats to file if true
progress : bool, optional; print progress of the task if true
Returns
-------
multgrpcov : list containing simimlarity matrices for each group
"""
if sub_id is None:
inda = np.arange(0, info.gmat.shape[0], 1)
sub_id = pd.DataFrame(info.group.iloc[inda, 1])
aaa = subindcheck(info, sub_id)
else:
aaa = subindcheck(info, sub_id)
chrinterest = chr_int(chrinterest)
slist = traitspecmatrices(info.gmat[aaa, :], info.meff) # trt-spec mat
grp = info.gmap.shape[1]-3
if (grp == 1 and len(pd.unique(info.group.iloc[:, 0].astype(str))) > 1):
print("The same map will be used for all groups")
numbers, probn = grtonum(info.group.iloc[aaa, 0].astype(str))
multgrpcov = []
for gnp in range(grp):
multgrpcov.append([])
if grp == 1:
us_ind = np.arange(start=0, stop=info.gmat[aaa, :].shape[0],
step=1)
else:
tng = numbers == gnp
us_ind = np.array(list(compress(np.arange(0, len(tng), 1),
tng))).T
print("Processing group ", probn[gnp])
rw_nms = info.group.iloc[aaa, 1].reset_index(drop=True).astype(
str)[us_ind]
cov_indxx = mrmcals(info, us_ind, stdsim, slist, covmat, probn[gnp],
chrinterest, save, progress)
multgrpcov[int(gnp)].append(
datret(info, rw_nms, probn[gnp], us_ind, slist, covmat,
cov_indxx, stdsim, progress))
if len(probn) == 1:
break
if grp > 1 and len(probn):
multgrpcov = dict(zip(probn, multgrpcov))
return multgrpcov
def submsvmsc(msvmsc, sub_idz):
"""Extract index in msvmsc data frame."""
sub_idz = pd.DataFrame(sub_idz)
numbs = msvmsc.iloc[:, 0].astype(str).tolist()
sub_idz = sub_idz.reset_index(drop=True).squeeze()
mal = sub_idz.iloc[:, 0].astype(str).tolist()
fem = sub_idz.iloc[:, 1].astype(str).tolist()
if sub_idz is not None:
for i in mal:
if i not in numbs:
sys.exit("Individuals are not in msvmsc parameter")
for i in fem:
if i not in numbs:
sys.exit("Individuals are not in msvmsc parameter")
mal1 = [numbs.index(x) if x in numbs else None for x in mal]
fem1 = [numbs.index(x) if x in numbs else None for x in fem]
return mal1, fem1
def pot_parents(info, data, selmale, selfm):
"""Subset individuals of interest."""
trait_names = info.meff.columns
if trait_names.size == 1:
datamale = data[data.iloc[:, 1] == selmale[0]]
pos = subindcheck(info, pd.DataFrame(datamale.iloc[:, 0]))
datamale.insert(0, "pos", pos, True)
no_sire = int(datamale.shape[0] * selmale[1])
datamale = datamale.sort_values(
by=[trait_names[0]], ascending=False).iloc[0:no_sire, :]
datafemale = data[data.iloc[:, 1] == selfm[0]]
pos = subindcheck(info, pd.DataFrame(datafemale.iloc[:, 0]))
datafemale.insert(0, "pos", pos, True)
no_dam = int(datafemale.shape[0] * selfm[1])
datafemale = datafemale.sort_values(
by=[trait_names[0]], ascending=False).iloc[0:no_dam, :]
elif trait_names.size > 1:
datamale = data[data.iloc[:, 1] == selmale[0]]
pos = subindcheck(info, pd.DataFrame(datamale.iloc[:, 0]))
datamale.insert(0, "pos", pos, True)
no_sire = int(datamale.shape[0] * selmale[1])
datamale = datamale.sort_values(
by=['ABV'], ascending=False).iloc[0:no_sire, :]
datafemale = data[data.iloc[:, 1] == selfm[0]]
pos = subindcheck(info, pd.DataFrame(datafemale.iloc[:, 0]))
datafemale.insert(0, "pos", pos, True)
no_dam = int(datafemale.shape[0] * selfm[1])
datafemale = datafemale.sort_values(
by=['ABV'], ascending=False).iloc[0:no_dam, :]
matlist = np.array(np.meshgrid(
datamale.iloc[:, 0], datafemale.iloc[:, 0])).T.reshape(-1, 2)
ids = np.array(np.meshgrid(
datamale.iloc[:, 1], datafemale.iloc[:, 1])).T.reshape(-1, 2)
if trait_names.size == 1:
matndat = pd.DataFrame(index=range(matlist.shape[0]), columns=range(
4+trait_names.size))
else:
matndat = pd.DataFrame(
index=range(matlist.shape[0]), columns=range(5+trait_names.size))
matndat.iloc[:, [0, 1]] = ids
matndat.iloc[:, [2, 3]] = matlist
return matndat
def selsgebv(notr, matndat, gbv, maxmale):
"""Calculate breeding values for each trait (zygote)."""
mal = matndat.iloc[:, 2].tolist()
fem = matndat.iloc[:, 3].tolist()
if notr == 1:
matndat.iloc[:, 4] = (np.array(gbv.iloc[mal, (0+2)]) + np.array(
gbv.iloc[fem, (0+2)]))/2
elif notr > 1:
matndat.iloc[:, 4:(5+notr)] = (np.array(
gbv.iloc[mal, 2:(notr+3)]) + np.array(gbv.iloc[fem, 2:(notr+3)]))/2
idfxxx = np.unique(matndat.iloc[:, 3])
mmat = pd.DataFrame(index=range(len(idfxxx)),
columns=range(matndat.shape[1]))
for mmm in np.arange(0, len(idfxxx), 1):
axx = matndat.loc[matndat.iloc[:, 3] == idfxxx[mmm]]
tsire = np.array(axx.iloc[:, 2])
mmat.iloc[mmm, :] = axx.iloc[np.argmax(
axx.iloc[:, axx.columns.size-1]), :]
norepssire = Counter(mmat.iloc[:, 2])
lents = len(tsire)
for nrs in range(lents):
if norepssire[tsire[nrs]] <= maxmale-1:
mmat.iloc[mmm, :] = np.array(axx[axx.iloc[:, 2] == tsire[nrs]])
break
matndat = mmat
if notr == 1:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[gbv.columns.size-1]), axis=None)
else:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[2:gbv.columns.size].tolist()), axis=None)
return matndat
def selspbtizyg(notr, gbv, matndat, msvmsc, throrconst, maxmale):
"""Calculate prob of breeding top inds (zygote)."""
mal1, fem1 = submsvmsc(msvmsc, pd.DataFrame(matndat.iloc[:, 0:2]))
mal = matndat.iloc[:, 2].tolist()
fem = matndat.iloc[:, 3].tolist()
if notr == 1:
matndat.iloc[:, 4] = (np.array(gbv.iloc[mal, (0+2)]) + np.array(
gbv.iloc[fem, (0+2)]))/2
ttt = np.quantile(gbv.iloc[:, 0+2], q=1-throrconst)
msvtemp = np.array(msvmsc.iloc[mal1, 0+2]) + np.array(
msvmsc.iloc[fem1, 0+2])
matndat.iloc[:, 4] = 1 - scipy.stats.norm.cdf(
ttt, loc=matndat.iloc[:, 4], scale=np.sqrt(
msvtemp))
elif notr > 1:
trait_names = gbv.columns[2:2+notr]
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
matndat.iloc[:, 4+i] = (
np.array(gbv.iloc[mal, (i+2)]) + np.array(
gbv.iloc[fem, (i+2)]))/2
ttt = np.quantile(gbv.iloc[:, 2+i], q=1-throrconst)
msvtemp = np.array(msvmsc.iloc[mal1, t_ind[i]+2]) + np.array(
msvmsc.iloc[fem1, t_ind[i]+2])
matndat.iloc[:, 4+i] = 1 - scipy.stats.norm.cdf(
ttt, loc=matndat.iloc[:, 4+i], scale=np.sqrt(msvtemp))
matndat.iloc[:, 4+notr] = (
np.array(gbv.iloc[mal, (notr+2)]) + np.array(
gbv.iloc[fem, (notr+2)]))/2
ttt = np.quantile(gbv.iloc[:, 2+notr], q=1-throrconst)
msvtemp = np.array(msvmsc.loc[mal1, ["AG"]]) + np.array(
msvmsc.loc[fem1, ["AG"]])
matndat.iloc[:, 4+notr] = 1 - scipy.stats.norm.cdf(
ttt, loc=matndat.iloc[:, 4+notr], scale=np.sqrt(msvtemp.ravel()))
idfxxx = np.unique(matndat.iloc[:, 3])
mmat = pd.DataFrame(index=range(len(idfxxx)),
columns=range(matndat.shape[1]))
for mmm in np.arange(0, len(idfxxx), 1):
axx = matndat.loc[matndat.iloc[:, 3] == idfxxx[mmm]]
tsire = np.array(axx.iloc[:, 2])
mmat.iloc[mmm, :] = axx.iloc[np.argmax(
axx.iloc[:, axx.columns.size-1]), :]
norepssire = Counter(mmat.iloc[:, 2])
lents = len(tsire)
for nrs in range(lents):
if norepssire[tsire[nrs]] <= maxmale-1:
mmat.iloc[mmm, :] = np.array(axx[axx.iloc[:, 2] == tsire[nrs]])
break
matndat = mmat
if notr == 1:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[gbv.columns.size-1]), axis=None)
else:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[2:gbv.columns.size].tolist()), axis=None)
return matndat
def selsindex(notr, gbv, matndat, msvmsc, throrconst, maxmale):
"""Calculate the index if constant is known (zygote)."""
mal1, fem1 = submsvmsc(msvmsc, pd.DataFrame(matndat.iloc[:, 0:2]))
mal = matndat.iloc[:, 2].tolist()
fem = matndat.iloc[:, 3].tolist()
if notr == 1:
matndat.iloc[:, 4] = (np.array(gbv.iloc[mal, (0+2)]) + np.array(
gbv.iloc[fem, (0+2)]))/2
msvtemp = np.array(msvmsc.iloc[mal1, 0+2]) + np.array(
msvmsc.iloc[fem1, 0+2])
matndat.iloc[:, 4] = matndat.iloc[:, 4] + np.sqrt(msvtemp)*throrconst
elif notr > 1:
trait_names = gbv.columns[2:2+notr]
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
matndat.iloc[:, 4+i] = (
np.array(gbv.iloc[mal, (i+2)]) + np.array(
gbv.iloc[fem, (i+2)]))/2
msvtemp = np.array(msvmsc.iloc[mal1, t_ind[i]+2]) + np.array(
msvmsc.iloc[fem1, t_ind[i]+2])
matndat.iloc[:, 4+i] = matndat.iloc[:, 4+i] + np.sqrt(
msvtemp)*throrconst
matndat.iloc[:, 4+notr] = (
np.array(gbv.iloc[mal, (notr+2)]) + np.array(
gbv.iloc[fem, (notr+2)]))/2
msvtemp = np.array(msvmsc.loc[mal1, ["AG"]]) + np.array(
msvmsc.loc[fem1, ["AG"]])
matndat.iloc[:, 4+notr] = matndat.iloc[:, 4+notr] + (
np.sqrt(msvtemp)*throrconst).ravel()
idfxxx = np.unique(matndat.iloc[:, 3])
mmat = pd.DataFrame(index=range(len(idfxxx)),
columns=range(matndat.shape[1]))
for mmm in np.arange(0, len(idfxxx), 1):
axx = matndat.loc[matndat.iloc[:, 3] == idfxxx[mmm]]
tsire = np.array(axx.iloc[:, 2])
mmat.iloc[mmm, :] = axx.iloc[np.argmax(
axx.iloc[:, axx.columns.size-1]), :]
norepssire = Counter(mmat.iloc[:, 2])
lents = len(tsire)
for nrs in range(lents):
if norepssire[tsire[nrs]] <= maxmale-1:
mmat.iloc[mmm, :] = np.array(axx[axx.iloc[:, 2] == tsire[nrs]])
break
matndat = pd.DataFrame(mmat)
if notr == 1:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[gbv.columns.size-1]), axis=None)
else:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[2:gbv.columns.size].tolist()), axis=None)
return matndat
def subindcheckzyg(info, sub_idz):
"""Check sex and if matepairs provided in sub_idz are in group data."""
numbs = info.group.iloc[:, 1].astype(str).tolist()
sub_idz = pd.DataFrame(sub_idz).reset_index(drop=True).squeeze()
mal = sub_idz.iloc[:, 0].astype(str).tolist()
fem = sub_idz.iloc[:, 1].astype(str).tolist()
mal1 = [numbs.index(x) if x in numbs else None for x in mal]
fem1 = [numbs.index(x) if x in numbs else None for x in fem]
if len(pd.unique(info.group.iloc[mal1, 0])) != 1:
sys.exit("Group class in sub_idz is not unique to ID of males")
if len(pd.unique(info.group.iloc[fem1, 0])) != 1:
sys.exit("Group class in sub_idz is not unique to ID of females")
idn = sub_idz.reset_index(drop=True)
mgp = list(set(info.group.iloc[mal1, 0]))
fgp = list(set(info.group.iloc[fem1, 0]))
if len(mgp) > 1 or len(fgp) > 1:
sys.exit("multiple sexes detected in data")
probn = [mgp[0], fgp[0]]
return mal1, fem1, idn, probn
def calcgbvzygsub(info, sub_idz):
"""Calc breeding values for matepairs."""
mal1, fem1, idn, _ = subindcheckzyg(info, sub_idz)
no_individuals, trait_names = idn.shape[0], info.meff.columns
notr = trait_names.size
if notr == 1:
gbv = np.zeros((no_individuals, notr))
mefff = np.array(info.meff.iloc[:, 0], float)
matrix_me1 = makemebv(info.gmat[mal1, :], mefff)
matrix_me2 = makemebv(info.gmat[fem1, :], mefff)
gbv[:, 0] = (matrix_me1.sum(axis=1) + matrix_me2.sum(axis=1))/2
gbv = pd.DataFrame(gbv)
gbv.columns = trait_names
elif notr > 1:
gbv = np.zeros((no_individuals, notr+1))
for i in range(notr):
mefff = np.array(info.meff.iloc[:, i], float)
matrix_me1 = makemebv(info.gmat[mal1, :], mefff)
matrix_me2 = makemebv(info.gmat[fem1, :], mefff)
gbv[:, i] = (matrix_me1.sum(axis=1) + matrix_me2.sum(axis=1))/2
gbv[:, notr] = gbv[:, notr] + info.indwt[i]*gbv[:, i]
gbv = pd.DataFrame(gbv)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
gbv.columns = colnames
gbv.insert(0, "FemaleIndex", fem1, True) # insert ID
gbv.insert(0, "MaleIndex", mal1, True) # insert ID
gbv.insert(0, "FemaleID", idn.iloc[:, 1], True) # insert ID
gbv.insert(0, "MaleID", idn.iloc[:, 0], True) # insert ID
return gbv
def calcprobzygsub(info, msvmsc, thresh, sub_idz):
"""Calculate the probability of breeding top individuals."""
subindcheckzyg(info, sub_idz)
mal1, fem1 = submsvmsc(msvmsc, sub_idz)
gbv = calcgbvzygsub(info, sub_idz)
trait_names = info.meff.columns # traits names
notr = trait_names.size
gbvall = calcgbv(info, None)
if notr == 1:
probdf = np.zeros((gbv.shape[0], notr))
ttt = np.quantile(gbvall.iloc[:, (0+2)], q=1-thresh)
msvmsc111 = np.array(msvmsc.iloc[mal1, (0+2)]) + np.array(
msvmsc.iloc[fem1, (0+2)])
probdf[:, 0] = 1 - scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (0+4)], scale=np.sqrt(msvmsc111))
probdf = pd.DataFrame(probdf)
probdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
probdf = np.zeros((gbv.shape[0], notr+1))
t_ind =
|
np.arange(colnam.shape[0])
|
numpy.arange
|
"""
Finite difference methods module.
"""
from __future__ import division, print_function
import warnings
import numpy as np
from numpy import linalg
from scipy import special
from numdifftools.extrapolation import convolve
from numdifftools.multicomplex import Bicomplex
_SQRT_J = (1j + 1.0) / np.sqrt(2.0) # = 1j**0.5
# step_ratio, parity, nterms
FD_RULES = {}
# (2.0, 1, 1): array([[1.]]),
# (2.0, 1, 2): array([[-0.333333333333333333333, 2.666666666666666666666666666],
# [8., -16.]]),
# (2.0, 1, 3): array([[2.22222222222222222e-02, -8.8888888888889e-01, 5.6888888888888889e+00],
# [-2.666666666666667e+00, 9.0666666666666667e+01, -1.7066666666666667e+02],
# [1.7066666666666667e+02, -1.7066666666666667e+03, 2.7306666666666667e+03]]),
# (2.0, 0, 2): array([[-1., 4.],
# [4., -8.]]),
# (2.0, 0, 4): array([[-4.76190476e-02, 1.33333333e+00, -1.06666667e+01, 2.43809524e+01],
# [1.33333333e+00, -3.46666667e+01, 2.34666667e+02, -3.41333333e+02],
# [-1.60000000e+01, 3.52000000e+02, -1.66400000e+03, 2.04800000e+03],
# [7.31428571e+01, -1.02400000e+03, 4.09600000e+03, -4.68114286e+03]])}
# @PydevIgnore
def _assert(cond, msg):
if not cond:
raise ValueError(msg)
def make_exact(h):
"""Make sure h is an exact representable number
This is important when calculating numerical derivatives and is
accomplished by adding 1.0 and then subtracting 1.0.
"""
return (h + 1.0) - 1.0
class DifferenceFunctions(object):
"""
Class defining difference functions
Notes
-----
The d
"""
# pylint: disable=unused-argument
@staticmethod
def _central_even(f, f_x0i, x0i, h):
return (f(x0i + h) + f(x0i - h)) / 2.0 - f_x0i
@staticmethod
def _central(f, f_x0i, x0i, h): # @UnusedVariable
return (f(x0i + h) - f(x0i - h)) / 2.0
@staticmethod
def _forward(f, f_x0i, x0i, h):
return f(x0i + h) - f_x0i
@staticmethod
def _backward(f, f_x0i, x0i, h):
return f_x0i - f(x0i - h)
@staticmethod
def _complex(f, f_x, x, h): # @UnusedVariable
return f(x + 1j * h).imag
@staticmethod
def _complex_odd(f, f_x, x, h):
i_h = h * _SQRT_J
return ((_SQRT_J / 2.) * (f(x + i_h) - f(x - i_h))).imag
@staticmethod
def _complex_odd_higher(f, f_x, x, h):
i_h = h * _SQRT_J
return ((3 * _SQRT_J) * (f(x + i_h) - f(x - i_h))).real
@staticmethod
def _complex_even(f, f_x, x, h):
i_h = h * _SQRT_J
return (f(x + i_h) + f(x - i_h)).imag
@staticmethod
def _complex_even_higher(f, f_x, x, h):
i_h = h * _SQRT_J
return 12.0 * (f(x + i_h) + f(x - i_h) - 2 * f_x).real
@staticmethod
def _multicomplex(f, f_x, x, h):
z = Bicomplex(x + 1j * h, 0)
return Bicomplex.__array_wrap__(f(z)).imag
@staticmethod
def _multicomplex2(f, f_x, x, h):
z = Bicomplex(x + 1j * h, h)
return Bicomplex.__array_wrap__(f(z)).imag12
class JacobianDifferenceFunctions(object):
"""Class defining Jacobian difference functions"""
# pylint: disable=unused-argument
@staticmethod
def increments(n, h):
"""Returns Jacobian steps"""
e_i = np.zeros(np.shape(h), float)
for k in range(n):
e_i[k] = h[k]
yield e_i
e_i[k] = 0
@staticmethod
def _central(f, f_x, x, h):
n = len(x)
steps = JacobianDifferenceFunctions.increments(n, h)
return np.array([(f(x + hi) - f(x - hi)) / 2.0 for hi in steps])
@staticmethod
def _central_even(f, f_x, x, h):
n = len(x)
steps = JacobianDifferenceFunctions.increments(n, h)
return np.array([(f(x + hi) + f(x - hi)) / 2.0 - f_x for hi in steps])
@staticmethod
def _backward(f, f_x, x, h):
n = len(x)
steps = JacobianDifferenceFunctions.increments(n, h)
return np.array([f_x - f(x - hi) for hi in steps])
@staticmethod
def _forward(f, f_x, x, h):
n = len(x)
steps = JacobianDifferenceFunctions.increments(n, h)
return np.array([f(x + hi) - f_x for hi in steps])
@staticmethod
def _complex(f, f_x, x, h):
n = len(x)
steps = JacobianDifferenceFunctions.increments(n, h)
return np.array([f(x + 1j * ih).imag for ih in steps])
@staticmethod
def _complex_even(f, f_x, x, h):
n = len(x)
j_1 = _SQRT_J
steps = JacobianDifferenceFunctions.increments(n, h)
return np.array([(f(x + j_1*ih) + f(x - j_1*ih)).imag for ih in steps])
@staticmethod
def _complex_odd(f, f_x, x, h):
n = len(x)
j_1 = _SQRT_J
steps = JacobianDifferenceFunctions.increments(n, h)
return np.array([((j_1 / 2.) * (f(x + j_1 * ih) - f(x - j_1 * ih))).imag for ih in steps])
@staticmethod
def _multicomplex(f, f_x, x, h):
n = len(x)
steps = JacobianDifferenceFunctions.increments(n, h)
cmplx_wrap = Bicomplex.__array_wrap__
partials = [cmplx_wrap(f(Bicomplex(x + 1j * hi, 0))).imag for hi in steps]
return np.array(partials)
class HessdiagDifferenceFunctions(object):
"""Class defining Hessdiag difference functions
References
----------
<NAME>. (2009) Statistical applications of the complex-step method
of numerical differentiation. The American Statistician, 63, 66-74
"""
# pylint: disable=unused-argument
@staticmethod
def _central2(f, f_x, x, h):
"""Eq. 8 in Ridout (2009)."""
n = len(x)
increments = np.identity(n) * h
partials = [(f(x + 2 * hi) + f(x - 2 * hi)
+ 2 * f_x - 2 * f(x + hi) - 2 * f(x - hi)) / 4.0
for hi in increments]
return np.array(partials)
@staticmethod
def _central_even(f, f_x, x, h):
"""Eq. 9 in Ridout (2009)."""
n = len(x)
increments = np.identity(n) * h
partials = [(f(x + hi) + f(x - hi)) / 2.0 - f_x for hi in increments]
return np.array(partials)
@staticmethod
def _backward(f, f_x, x, h):
n = len(x)
increments = np.identity(n) * h
partials = [f_x - f(x - hi) for hi in increments]
return np.array(partials)
@staticmethod
def _forward(f, f_x, x, h):
n = len(x)
increments = np.identity(n) * h
partials = [f(x + hi) - f_x for hi in increments]
return np.array(partials)
@staticmethod
def _multicomplex2(f, f_x, x, h):
n = len(x)
increments = np.identity(n) * h
cmplx_wrap = Bicomplex.__array_wrap__
partials = [cmplx_wrap(f(Bicomplex(x + 1j * hi, hi))).imag12
for hi in increments]
return np.array(partials)
@staticmethod
def _complex_even(f, f_x, x, h):
n = len(x)
increments = np.identity(n) * h * (1j + 1) / np.sqrt(2)
partials = [(f(x + hi) + f(x - hi)).imag for hi in increments]
return np.array(partials)
class HessianDifferenceFunctions(object):
"""Class defining Hessian difference functions
References
----------
<NAME>. (2009)
"Statistical applications of the complex-step method of numerical differentiation",
The American Statistician, 63, 66-74
"""
# pylint: disable=unused-argument
@staticmethod
def _complex_even(f, f_x, x, h):
"""
Calculate Hessian with complex-step derivative approximation
The stepsize is the same for the complex and the finite difference part
Eq 10 in Ridout (2009).
"""
n = len(x)
eee = np.diag(h)
hess = 2. * np.outer(h, h)
for i in range(n):
for j in range(i, n):
hess[i, j] = (f(x + 1j * eee[i] + eee[j])
- f(x + 1j * eee[i] - eee[j])).imag / hess[j, i]
hess[j, i] = hess[i, j]
return hess
@staticmethod
def _multicomplex2(f, f_x, x, h):
"""Calculate Hessian with Bicomplex-step derivative approximation"""
n = len(x)
eee = np.diag(h)
hess = np.outer(h, h)
cmplx_wrap = Bicomplex.__array_wrap__
for i in range(n):
for j in range(i, n):
zph = Bicomplex(x + 1j * eee[i, :], eee[j, :])
hess[i, j] = cmplx_wrap(f(zph)).imag12 / hess[j, i]
hess[j, i] = hess[i, j]
return hess
@staticmethod
def _central_even(f, f_x, x, h):
"""Eq 9 in Ridout (2009)."""
n = len(x)
eee = np.diag(h)
dtype = np.result_type(f_x, float) # make sure it is at least float64
hess = np.empty((n, n), dtype=dtype)
np.outer(h, h, out=hess)
for i in range(n):
e_i = eee[i, :]
hess[i, i] = (f(x + 2 * e_i) - 2 * f_x + f(x - 2 * e_i)) / (4. * hess[i, i])
for j in range(i + 1, n):
e_j = eee[j, :]
hess[i, j] = (f(x + e_i + e_j) - f(x + e_i - e_j)
- f(x - e_i + e_j) + f(x - e_i - e_j)) / (4. * hess[j, i])
hess[j, i] = hess[i, j]
return hess
@staticmethod
def _central2(f, f_x, x, h):
"""Eq. 8 in Ridout (2009)"""
n = len(x)
eee = np.diag(h)
dtype = np.result_type(f_x, float)
f_xpe = np.empty(n, dtype=dtype)
f_xme = np.empty(n, dtype=dtype)
for i in range(n):
f_xpe[i] = f(x + eee[i])
f_xme[i] = f(x - eee[i])
hess = np.empty((n, n), dtype=dtype)
np.outer(h, h, out=hess)
for i in range(n):
for j in range(i, n):
hess[i, j] = (f(x + eee[i, :] + eee[j, :])
+ f(x - eee[i, :] - eee[j, :])
- f_xpe[i] - f_xpe[j] + f_x
- f_xme[i] - f_xme[j] + f_x) / (2 * hess[j, i])
hess[j, i] = hess[i, j]
return hess
@staticmethod
def _forward(f, f_x, x, h):
"""Eq. 7 in Ridout (2009)"""
n = len(x)
eee = np.diag(h)
dtype = np.result_type(f_x, float)
g = np.empty(n, dtype=dtype)
for i in range(n):
g[i] = f(x + eee[i, :])
hess = np.empty((n, n), dtype=dtype)
np.outer(h, h, out=hess)
for i in range(n):
for j in range(i, n):
hess[i, j] = (f(x + eee[i, :] + eee[j, :]) - g[i] - g[j] + f_x) / hess[j, i]
hess[j, i] = hess[i, j]
return hess
@staticmethod
def _backward(f, f_x, x, h):
return HessianDifferenceFunctions._forward(f, f_x, x, -h)
class LogRule(object):
"""Log spaced finite difference rule class
Parameters
----------
n : int, optional
Order of the derivative.
method : {'central', 'complex', 'multicomplex', 'forward', 'backward'}
defines the method used in the approximation
order : int, optional
defines the order of the error term in the Taylor approximation used.
For 'central' and 'complex' methods, it must be an even number.
Examples
--------
>>> from numdifftools.finite_difference import LogRule
>>> np.allclose(LogRule(n=1, method='central', order=2).rule(step_ratio=2.0), 1)
True
>>> np.allclose(LogRule(n=1, method='central', order=4).rule(step_ratio=2.),
... [-0.33333333, 2.66666667])
True
>>> np.allclose(LogRule(n=1, method='central', order=6).rule(step_ratio=2.),
... [ 0.02222222, -0.88888889, 5.68888889])
True
>>> np.allclose(LogRule(n=1, method='forward', order=2).rule(step_ratio=2.), [-1., 4.])
True
>>> np.allclose(LogRule(n=1, method='forward', order=4).rule(step_ratio=2.),
... [ -0.04761905, 1.33333333, -10.66666667, 24.38095238])
True
>>> np.allclose(LogRule(n=1, method='forward', order=6).rule(step_ratio=2.),
... [ -1.02406554e-04, 1.26984127e-02, -5.07936508e-01,
... 8.12698413e+00, -5.20126984e+01, 1.07381055e+02])
True
>>> step_ratio=2.0
>>> fd_rule = LogRule(n=2, method='forward', order=4)
>>> h = 0.002*(1./step_ratio)**np.arange(6)
>>> x0 = 1.
>>> f = np.exp
>>> f_x0 = f(x0)
>>> f_del = f(x0+h) - f_x0 # forward difference
>>> f_del = fd_rule.diff(f, f_x0, x0, h) # or alternatively
>>> fder, h, shape = fd_rule.apply(f_del, h, step_ratio)
>>> np.allclose(fder, f(x0))
True
"""
_difference_functions = DifferenceFunctions()
def __init__(self, n=1, method='central', order=2):
self.n = n
self.method = method
self.order = order
# --- properties ---
@property
def _odd_derivative(self):
return self.n % 2 == 1
@property
def _even_derivative(self):
return self.n % 2 == 0
@property
def _derivative_mod_four_is_three(self):
return self.n % 4 == 3
@property
def _derivative_mod_four_is_zero(self):
return self.n % 4 == 0
@property
def eval_first_condition(self):
"""True if f(x0) needs to be evaluated given the differentiation method."""
even_derivative = self._even_derivative
return ((even_derivative and self.method in ('central', 'central2')) or
self.method in ['forward', 'backward'] or
self.method == 'complex' and self._derivative_mod_four_is_zero)
@property
def _complex_high_order(self):
return self.method == 'complex' and (self.n > 1 or self.order >= 4)
@property
def richardson_step(self):
"""The step between exponents in the error polynomial of the Richardson extrapolation."""
complex_step = 4 if self._complex_high_order else 2
return dict(central=2,
central2=2,
complex=complex_step,
multicomplex=2).get(self.method, 1)
@property
def method_order(self):
"""The leading order of the truncation error of the Richardson extrapolation."""
step = self.richardson_step
# Make sure it is even and at least 2 or 4
order = max((self.order // step) * step, step)
return order
def _parity_complex(self, order, method_order):
if self.n == 1 and method_order < 4:
return (order % 2) + 1
return (3
+ 2 * int(self._odd_derivative)
+ int(self._derivative_mod_four_is_three)
+ int(self._derivative_mod_four_is_zero))
def _parity(self, method, order, method_order):
if method.startswith('central'):
return (order % 2) + 1
if method == 'complex':
return self._parity_complex(order, method_order)
return 0
@staticmethod
def _fd_matrix(step_ratio, parity, nterms):
"""
Return matrix for finite difference and complex step derivation.
Parameters
----------
step_ratio : real scalar
ratio between steps in unequally spaced difference rule.
parity : scalar, integer
0 (one sided, all terms included but zeroth order)
1 (only odd terms included)
2 (only even terms included)
3 (only every 4'th order terms included starting from order 2)
4 (only every 4'th order terms included starting from order 4)
5 (only every 4'th order terms included starting from order 1)
6 (only every 4'th order terms included starting from order 3)
nterms : scalar, integer
number of terms
"""
_assert(0 <= parity <= 6,
'Parity must be 0, 1, 2, 3, 4, 5 or 6! ({0:d})'.format(parity))
step = [1, 2, 2, 4, 4, 4, 4][parity]
inv_sr = 1.0 / step_ratio
offset = [1, 1, 2, 2, 4, 1, 3][parity]
c_0 = [1.0, 1.0, 1.0, 2.0, 24.0, 1.0, 6.0][parity]
c = c_0 / special.factorial(np.arange(offset, step * nterms + offset, step))
[i, j] = np.ogrid[0:nterms, 0:nterms]
return np.atleast_2d(c[j] * inv_sr ** (i * (step * j + offset)))
@property
def _flip_fd_rule(self):
return ((self._even_derivative and (self.method == 'backward'))
or (self.method == 'complex' and (self.n % 8 in [3, 4, 5, 6])))
@property
def _multicomplex_middle_name(self):
if self.method == 'multicomplex' and self.n > 1:
_assert(self.n <= 2, 'Multicomplex method only support first '
'and second order derivatives.')
return '2'
return ''
def _get_middle_name(self):
if self._even_derivative and self.method in ('central', 'complex'):
return '_even'
if self._complex_high_order and self._odd_derivative:
return '_odd'
return self._multicomplex_middle_name
def _get_last_name(self):
last = ''
if (self.method == 'complex' and self._derivative_mod_four_is_zero or
self._complex_high_order and
self._derivative_mod_four_is_three):
last = '_higher'
return last
@property
def diff(self):
"The difference function"
first = '_{0!s}'.format(self.method)
middle = self._get_middle_name()
last = self._get_last_name()
name = first + middle + last
return getattr(self._difference_functions, name)
def rule(self, step_ratio=2.0):
"""
Return finite differencing rule.
Parameters
----------
step_ratio : real scalar, optional, default 2.0
Ratio between sequential steps generated.
Notes
-----
The rule is for a nominal unit step size, and must be scaled later
to reflect the local step size.
Member method used: _fd_matrix
Member variables used:
n
order
method
"""
method = self.method
if method in ('multicomplex',) or self.n == 0:
return
|
np.ones((1,))
|
numpy.ones
|
import numpy as np
from .utils import binary_cross_entropy, check_shapes
from abc import ABC
def supported_losses():
return [x.__name__ for x in LossFunc.__subclasses__()]
class Loss:
def __init__(self, value, delta):
self.value = value
self.delta = delta
class LossFunc:
def __init__(self, pred=None, target=None):
self.pred = pred
self.target = target
self.eps = 1e-6
@check_shapes
def apply(self, p, t):
raise NotImplementedError
@property
def delta(self):
raise NotImplementedError
def __call__(self, p, t):
return self.apply(p, t)
class MSE(LossFunc, ABC):
def __init__(self):
super(MSE, self).__init__()
@check_shapes
def apply(self, p, t):
super(MSE, self).__init__(p, t)
return Loss(np.mean((p - t) ** 2, axis=0) / 2, self.delta)
@property
def delta(self):
return self.pred - self.target
class CrossEntropy(LossFunc, ABC):
# https://cs231n.github.io/neural-networks-case-study/#grad
def __init__(self):
super(CrossEntropy, self).__init__()
@check_shapes
def apply(self, p, t):
super(CrossEntropy, self).__init__(p, t)
probs = self.soft_max(p)
loss = -np.log(probs[range(p.shape[0]), np.array(t).squeeze(-1)])
return Loss(
|
np.mean(loss, axis=0)
|
numpy.mean
|
import numpy as np
import scipy
from scipy import linalg
import cvxpy as cp
havedoneqcqpimports = False
'''READ THIS IF YOU WANT TO USE QCQP'''
#Need a very specific set of libraries
#pip install cvxpy==0.4.9
#pip install CVXcanon==0.1.0
#pip install qcqp
#AFTER THIS: Go to the python files in your computer
#For me its C:\Users\jonat\AppData\Local\Programs\Python\Python38\Lib\site-packages\cvxpy\atoms\log_sum_exp
#Change from scipy.misc import logsumexp to from scipy.special import logsumexp
#Also might require you to get a mosek license and install mosek
#pip install Mosek
#Request personal Academic License at https://www.mosek.com/products/academic-licenses/ and follow instructions
'''2nd attempt'''
#pip install gurobipy
'''
import cvxpy as cvx
#import mosek
from qcqp import *
#import qcqp as qcp
import importlib
importlib.reload(qcqp)
importlib.reload(cvx)'''
def cvxpy_density_matrix_feasibility_sdp_routine(D_matrix,E_matrix,R_matrices,F_matrices,gammas,sdp_tolerance_bound, verbose = True,additionalbetaconstraints=None,betainitialpoint=None):#additional beta constraints comes in list of list , [[matrix,value],[matrix,value]]
numstate = len(D_matrix)
D_matrix_np = np.array(D_matrix)
#D_matrix_np = 0.5*(D_matrix_np + np.conjugate(np.transpose(D_matrix_np)))
E_matrix_np = np.array(E_matrix)
#E_matrix_np = 0.5*(E_matrix_np + np.conjugate(np.transpose(E_matrix_np)))
R_matrices_np = []
F_matrices_np = []
for r in R_matrices:
R_matrices_np.append(np.array(r))
for f in F_matrices:
F_matrices_np.append(np.array(f))
beta = cp.Variable((numstate,numstate),complex=True)
constraints = [beta >> 0]
#constraints += [cp.trace(E_matrix_np @ beta) == 1]
constraints += [beta.H == beta]
if additionalbetaconstraints != None:
for con,value in additionalbetaconstraints:
constraints += [cp.trace(con@beta)==value]
a = -1j*(D_matrix_np@beta@E_matrix_np-E_matrix_np@beta@D_matrix_np)
#finalconstraints = []
for i in range(len(gammas)):
a = a + gammas[i]*(R_matrices_np[i]@[email protected](np.conjugate(R_matrices_np[i]))-0.5*F_matrices_np[i]@beta@E_matrix_np-0.5*E_matrix_np@beta@F_matrices_np[i])
#a = -1j*(D_matrix_np@beta@E_matrix_np-E_matrix_np@beta@D_matrix_np)+gammas[0]*(R_matrices_np[0]@[email protected](np.conjugate(R_matrices_np[0]))-0.5*F_matrices_np[0]@beta@E_matrix_np-0.5*E_matrix_np@beta@F_matrices_np[0])
#constraints += [cp.trace(E_matrix_np @ beta) == 1]
#constraints += [cp.trace(a@(a.H))<=sdp_tolerance_bound]
#constraints += [cp.trace(a@(a.H))>=-sdp_tolerance_bound]
if sdp_tolerance_bound == 0:
if verbose:
print('Feasibility SDP is set up with hard equality constraints')
#constraints += [a == 0]
constraints += [a == 0]
# constraints += [cp.trace(E_matrix_np @ beta) == 1] #try not enforcing the trace condition
else:
print('Feasibility SDP is set up with interval constraint <'+str(sdp_tolerance_bound)+ ' & >-'+str(sdp_tolerance_bound))
raise(RuntimeError("Not implemented yet"))
#constraints += [cp.abs(cp.trace(a@(a.H)))<=sdp_tolerance_bound]
#constraints += [cp.abs(cp.trace(a@(a.H)))>=-sdp_tolerance_bound]
# constraints += [cp.real(cp.trace(E_matrix_np @ beta)) <= 1+sdp_tolerance_bound]
# constraints += [cp.real(cp.trace(E_matrix_np @ beta)) >= 1-sdp_tolerance_bound]
prob = cp.Problem(cp.Minimize(0),constraints)
if type(betainitialpoint)!=type(None):
beta.value = betainitialpoint
prob.solve(solver=cp.MOSEK,verbose=False)
denmat = beta.value
denmat = denmat / np.trace(E_matrix_np @ denmat)
#print(denmat)
if type(denmat) == type(None):
return None,None
else:
minval = np.trace(denmat@D_matrix_np)
return denmat,minval
def cvxpy_density_matrix_feasibility_sdp_routine_old(D_matrix,E_matrix,R_matrices,F_matrices,gammas,sdp_tolerance_bound):
numstate = len(D_matrix)
D_matrix_np = np.array(D_matrix)
#D_matrix_np = 0.5*(D_matrix_np + np.conjugate(np.transpose(D_matrix_np)))
E_matrix_np = np.array(E_matrix)
#E_matrix_np = 0.5*(E_matrix_np + np.conjugate(np.transpose(E_matrix_np)))
R_matrices_np = []
F_matrices_np = []
for r in R_matrices:
R_matrices_np.append(np.array(r))
for f in F_matrices:
F_matrices_np.append(np.array(f))
beta = cp.Variable((numstate,numstate),complex=True)
constraints = [beta >> 0]
#constraints += [cp.trace(E_matrix_np @ beta) == 1]
constraints += [beta.H == beta]
a = -1j*(D_matrix_np@beta@E_matrix_np-E_matrix_np@beta@D_matrix_np)
#finalconstraints = []
for i in range(len(gammas)):
a = a + gammas[i]*(R_matrices_np[i]@[email protected](np.conjugate(R_matrices_np[i]))-0.5*F_matrices_np[i]@beta@E_matrix_np-0.5*E_matrix_np@beta@F_matrices_np[i])
#a = -1j*(D_matrix_np@beta@E_matrix_np-E_matrix_np@beta@D_matrix_np)+gammas[0]*(R_matrices_np[0]@[email protected](np.conjugate(R_matrices_np[0]))-0.5*F_matrices_np[0]@beta@E_matrix_np-0.5*E_matrix_np@beta@F_matrices_np[0])
#constraints += [cp.trace(E_matrix_np @ beta) == 1]
constraints += [a == 0]
#constraints += [cp.trace(a@(a.H))<=sdp_tolerance_bound]
#constraints += [cp.trace(a@(a.H))>=-sdp_tolerance_bound]
if sdp_tolerance_bound == 0:
print('Feasibility SDP is set up with hard constraint == 1')
#constraints += [a == 0]
constraints += [cp.trace(E_matrix_np @ beta) == 1]
else:
print('Feasibility SDP is set up with interval constraint <'+str(sdp_tolerance_bound)+ ' & >-'+str(sdp_tolerance_bound))
#constraints += [cp.abs(cp.trace(a@(a.H)))<=sdp_tolerance_bound]
#constraints += [cp.abs(cp.trace(a@(a.H)))>=-sdp_tolerance_bound]
constraints += [cp.real(cp.trace(E_matrix_np @ beta)) <= 1+sdp_tolerance_bound]
constraints += [cp.real(cp.trace(E_matrix_np @ beta)) >= 1-sdp_tolerance_bound]
prob = cp.Problem(cp.Minimize(0),constraints)
prob.solve(solver=cp.MOSEK,verbose=False)
denmat = beta.value
#print(denmat)
if type(denmat) == type(None):
return None,None
else:
minval = np.trace(denmat@D_matrix_np)
return denmat,minval
def cvxpy_density_matrix_routine(D_matrix,E_matrix):
numstate = len(D_matrix)
#print(numstate)
D_matrix_np = np.array(D_matrix)
D_matrix_np = 0.5*(D_matrix_np + np.conjugate(np.transpose(D_matrix_np)))
E_matrix_np = np.array(E_matrix)
E_matrix_np = 0.5*(E_matrix_np + np.conjugate(np.transpose(E_matrix_np)))
#Realification of E
#E_real = np.real(E_matrix_np)
#E_imag = np.imag(E_matrix_np)
#E_realified = np.bmat([[E_real,-E_imag],[E_imag,E_real]])
#Realification of D
#D_real = np.real(D_matrix_np)
#D_imag = np.imag(D_matrix_np)
#D_realified = np.bmat([[D_real,-D_imag],[D_imag,D_real]])
beta = cp.Variable((numstate,numstate),hermitian=True)
#print(np.shape(beta))
# Define and solve the CVXPY problem.
constraints = [beta >> 0]
#constraints += [beta == beta.H]
constraints += [cp.trace(E_matrix_np @ beta) == 1]
prob = cp.Problem(cp.Minimize(cp.real(cp.trace(D_matrix_np @ beta))),constraints)
prob.solve(solver=cp.MOSEK,verbose=False)
#Return result.
#Returns an np array of the density matrix and the min eigenvalue
#Needs to unrealify
denmat = beta.value
#denmat_real = denmat[0:numstate,0:numstate]
#denmat_imag = denmat[numstate:numstate*2,0:numstate]
#denmat = denmat_real+1j*denmat_imag
minval = np.trace(denmat@D_matrix_np)
return denmat,minval
def gram_schmidt(set_of_vectors, psd_matrix):
"""
computes the gram schmidt decomposition of a set of vectors w.r.t a psd,
Hermitian matrix E. Here, the vector space is over the complex field.
"""
new_set = []
E = psd_matrix
for i in range(len(set_of_vectors)):
new_vec = set_of_vectors[i]
for j in range(len(new_set)):
already_done = new_set[j]
new_vec = new_vec - already_done.conj().T @ E @ new_vec * already_done
new_vec_norm = np.sqrt(new_vec.conj().T @ E @ new_vec)
new_vec = new_vec / new_vec_norm
new_set.append(new_vec)
return new_set
def eig_diag_routine(D_matrix, E_matrix, inv_cond = 10**(-2), degeneracy_tol = 5):
#degeneracy tol is the number of decimal places until I consider an eigenvalue non degenerate
#Here, its like Toby's diag routine but I write it myself for learning purposes
#Also, with some modifications (return all the eigvecs rather than just the first one)
#inversion cutoff, e.g. eigenvalues smaller than this are set to zero
#this value can be adjusted depending on shot noise, try to find value that fits your noise level
# inv_cond=10**-12 #i use this value for statevector simulation (no shotnoise)
# inv_cond=10**-2 #i use this value for shotnoise with ~10000 shots
e_vals,e_vecs=scipy.linalg.eigh(E_matrix)
#get e_matrix eigenvalues inverted, cutoff with inv_cond
e_vals_inverted=np.array(e_vals)
for k in range(len(e_vals_inverted)):
#print(e_vals_inverted[k])
#print(inv_cond)
if(e_vals_inverted[k]<inv_cond):
e_vals_inverted[k]=0
else:
e_vals_inverted[k]=1/e_vals_inverted[k]
#get e_matrix eigenvalues conditioned, such that small/negative eigenvalues are set to zero
e_vals_cond=np.array(e_vals)
for k in range(len(e_vals_cond)):
if(e_vals_cond[k]<inv_cond):
e_vals_cond[k]=0
#convert generalized eigenvalue problem with a regular eigenvalue problem using paper "EIGENVALUE PROBLEMS IN STRUCTURAL MECHANICS"
#we want to solve D\alpha=\lambda E\alpha
#turns out this does not work well if E_matrix has near zero eigenvalues
#instead, we turn this into regular eigenvalue problem which is more behaved
#we diagonalize E_matrix=U*F*F*U^\dag with diagonal F
#Then, define S=U*F, and S^-1=F^-1*U^\dag. Use conditioned eigenvalues F for this such that no negative eigenvalues appear, and for inverse large eigenvalues set to zero
#solve S^-1*D*S^-1^\dag*a=\lambda a
#convert \alpha=S^-1^\dag*a. This is the solution to original problem.
#this procedure ensures that converted eigenvalue problem remains hermitian, and no other funny business happens
# s_matrix = [email protected](np.sqrt(e_vals_cond))
s_matrix_inv = np.diag(np.sqrt(e_vals_inverted))@e_vecs.conj().T
toeigmat = s_matrix_inv @ D_matrix @ s_matrix_inv.conj().T
#S^-1*D*S^-1^\dag matrix might not be Hermitian if D is not Hermitian. SO use
#eig instead of eigh
#We still use the term "qae_energy" because traditionally this is where the generalised eigenvalue problem came from
qae_energy,qae_vectors=scipy.linalg.eig(toeigmat)
ini_alpha_vecs = qae_vectors
ini_alpha_vecs = s_matrix_inv.conj().T @ ini_alpha_vecs
#Note that after the above procedure, since some of the eigenvectors are in the null-space of the E_matrix (recall that we mapped those "wrong" eigenvalues to 0),
#those eigenvectors are not what we want. So, we need to throw them away.
#To find those eigenvectors corresponding to these wrong eigenvalues, because they are in the nullspace of E, they correspond to alphavec^\dag E alphavec = 0
#Here, we might as well normalise the alpha_vecs,too because they might be
#like wrong, cause we kinda threw values away
correct_eigvals = []
first_index = 0
for j in range(len(qae_energy)):
jth_vector = ini_alpha_vecs[:,j]
norm = np.sqrt(jth_vector.conj().T @ E_matrix @ jth_vector)
if np.abs(1-norm) < inv_cond:
first_index = j
break
first_vector = ini_alpha_vecs[:,first_index]
correct_eigvals.append(qae_energy[first_index])
after_normalisation = first_vector / np.sqrt(first_vector.conj().T @ E_matrix @ first_vector)
for j in range(first_index + 1, len(qae_energy)):
jth_vector = ini_alpha_vecs[:,j]
norm = np.sqrt(jth_vector.conj().T @ E_matrix @ jth_vector)
if np.abs(1-norm) < inv_cond:
jth_vector = jth_vector/norm
correct_eigvals.append(qae_energy[j])
after_normalisation = np.column_stack((after_normalisation,
jth_vector))
#Here, we do gram schimidt on the eigenspaces of dimension 2 and above
unique_eval_indices = dict()
for index in range(len(correct_eigvals)):
val = correct_eigvals[index]
val = round(val.real,degeneracy_tol) + 1j * round(val.imag, degeneracy_tol)
if val not in unique_eval_indices.keys():
unique_eval_indices[val] = [index]
else:
unique_eval_indices[val].append(index)
# print(unique_eval_indices)
if len(unique_eval_indices.keys()) == len(correct_eigvals):
#all eigenvalues are unique, don't need to do any gram schmit
return (np.array(correct_eigvals), after_normalisation)
else:
#sadly, have to do gram schmidt..
print("Doing gram schmidt cause there are some degeneracies")
for eigval in unique_eval_indices.keys():
eigenvectors = []
for index in unique_eval_indices[eigval]:
eigenvectors.append(after_normalisation[:,index])
new_eigenvectors = gram_schmidt(eigenvectors, E_matrix)
counter = 0
for index in unique_eval_indices[eigval]:
after_normalisation[:,index] = new_eigenvectors[counter]
counter += 1
return (np.array(correct_eigvals) ,after_normalisation)
def diag_routine(D_matrix, E_matrix, inv_cond = 10**(-6)):
#Like toby's diag routine, with some modifications (return all the eigvecs rather than just the first one)
#inversion cutoff, e.g. eigenvalues smaller than this are set to zero
#this value can be adjusted depending on shot noise, try to find value that fits your noise level
# inv_cond=10**-12 #i use this value for statevector simulation (no shotnoise)
# inv_cond=10**-2 #i use this value for shotnoise with ~10000 shots
#Here, I'll use eigh directly because now I'm always solving the Hermitian version. I.e, D_matrix must be hermitian
e_vals,e_vecs=scipy.linalg.eigh(E_matrix)
#get e_matrix eigenvalues inverted, cutoff with inv_cond
e_vals_inverted=np.array(e_vals)
for k in range(len(e_vals_inverted)):
if(e_vals_inverted[k]<inv_cond):
e_vals_inverted[k]=0
else:
e_vals_inverted[k]=1/e_vals_inverted[k]
#get e_matrix eigenvalues conditioned, such that small/negative eigenvalues are set to zero
e_vals_cond=np.array(e_vals)
for k in range(len(e_vals_cond)):
if(e_vals_cond[k]<inv_cond):
e_vals_cond[k]=0
#convert generalized eigenvalue problem with a regular eigenvalue problem using paper "EIGENVALUE PROBLEMS IN STRUCTURAL MECHANICS"
#we want to solve D\alpha=\lambda E\alpha
#turns out this does not work well if E_matrix has near zero eigenvalues
#instead, we turn this into regular eigenvalue problem which is more behaved
#we diagonalize E_matrix=U*F*F*U^\dag with diagonal F
#Then, define S=U*F, and S^-1=F^-1*U^\dag. Use conditioned eigenvalues F for this such that no negative eigenvalues appear, and for inverse large eigenvalues set to zero
#solve S^-1*D*S^-1^\dag*a=\lambda a
#convert \alpha=S^-1^\dag*a. This is the solution to original problem.
#this procedure ensures that converted eigenvalue problem remains hermitian, and no other funny business happens
# s_matrix = [email protected](np.sqrt(e_vals_cond))
s_matrix_inv = np.diag(np.sqrt(e_vals_inverted))@e_vecs.conj().T
toeigmat = s_matrix_inv @ D_matrix @ s_matrix_inv.conj().T
#S^-1*D*S^-1^\dag matrix might not be Hermitian if D is not Hermitian. SO use
#eig instead of eigh
#We still use the term "qae_energy" because traditionally this is where the generalised eigenvalue problem came from
qae_energy,qae_vectors=scipy.linalg.eigh(toeigmat)
ini_alpha_vecs = qae_vectors
ini_alpha_vecs = s_matrix_inv.conj().T @ ini_alpha_vecs
#Note that after the above procedure, since some of the eigenvectors are in the null-space of the E_matrix (recall that we mapped those "wrong" eigenvalues to 0),
#those eigenvectors are not what we want. So, we need to throw them away.
#To find those eigenvectors corresponding to these wrong eigenvalues, because they are in the nullspace of E, they correspond to alphavec^\dag E alphavec = 0
#Here, we might as well normalise the alpha_vecs,too because they might be
#like wrong, cause we kinda threw values away
correct_eigvals = []
first_index = 0
for j in range(len(qae_energy)):
jth_vector = ini_alpha_vecs[:,j]
norm = np.sqrt(jth_vector.conj().T @ E_matrix @ jth_vector)
if np.abs(1-norm) < inv_cond:
first_index = j
break
#else:
# print('Thrown away 1')
#print(first_index)
first_vector = ini_alpha_vecs[:,first_index]
correct_eigvals.append(qae_energy[first_index])
after_normalisation = first_vector / np.sqrt(first_vector.conj().T @ E_matrix @ first_vector)
for j in range(first_index + 1, len(qae_energy)):
jth_vector = ini_alpha_vecs[:,j]
norm = np.sqrt(jth_vector.conj().T @ E_matrix @ jth_vector)
if np.abs(1-norm) < inv_cond:
#print('Add 1 in')
jth_vector = jth_vector/norm
correct_eigvals.append(qae_energy[j])
after_normalisation = np.column_stack((after_normalisation,
jth_vector))
#else:
# print('Thrown away 1')
return (np.array(correct_eigvals), after_normalisation)
def qcqp_IQAE_routine(D_matrix, E_matrix):
#global havedoneqcqpimports
'''if havedoneqcqpimports == False:
import cvxpy as cvx
import mosek
#from qcqp import *
import qcqp as qcp
import importlib
importlib.reload(qcqp)
importlib.reload(cvx)
havedoneqcqpimports = True'''
lengthofalpha = D_matrix.shape[0]
#Realification of E
E_real = np.real(E_matrix)
E_imag = np.imag(E_matrix)
E_realified = np.bmat([[E_real,-E_imag],[E_imag,E_real]])
#realification of D
D_real = np.real(D_matrix)
D_imag = np.imag(D_matrix)
D_realified = np.bmat([[D_real,-D_imag],[D_imag,D_real]])
#Create Variable
x = cvx.Variable(2*lengthofalpha)
#Define objective and constraints
objective = cvx.quad_form(x,D_realified)
constraints = [cvx.quad_form(x,E_realified)==1]
#Solve
prob = cvx.Problem(cvx.Minimize(objective), constraints)
qcqp = QCQP(prob)
#Here could use SDP relaxation to find a starting point for local methods
#Although here, we just use a random point
qcqp.suggest(SDR,solver=cvx.MOSEK)
#Attempt to improve the starting point given by the suggest method
f_cd, v_cd = qcqp.improve(COORD_DESCENT)
result = np.array(x.value)
#unrealify
newalphareal = np.array(result[:lengthofalpha])
newalphaimag = np.array(result[lengthofalpha:])
newalpha = newalphareal + 1j*newalphaimag
#normalpha = np.sqrt(np.abs(np.dot(np.transpose(np.conjugate(newalpha)),np.dot(E_matrix,newalpha))))
#newalpha = newalpha/normalpha
return newalpha
#OPTIMIZERS MUST RETURN THE UPDATED ALPHAS
def eigh_method_for_TTQS(E_matrix,W_matrix,alphas,inv_cond):
e_vals,e_vecs=scipy.linalg.eigh(E_matrix)
e_vals_adjusted=np.array(e_vals)
e_vals_inverted=np.array(e_vals)
for k in range(len(e_vals_inverted)):
if(e_vals_inverted[k]<inv_cond):
e_vals_inverted[k]=0
else:
e_vals_inverted[k]=1/e_vals_inverted[k]
e_vals_cond=np.array(e_vals)
for k in range(len(e_vals_cond)):
if(e_vals_cond[k]<inv_cond):
e_vals_cond[k]=0
W_matrix = -W_matrix
#convert generalized eigenvalue problem with a regular eigenvalue problem using paper "EIGENVALUE PROBLEMS IN STRUCTURAL MECHANICS"
#we want to solve W\alpha=\lambda E\alpha
#turns out this does not work well if E_matrix has near zero eigenvalues
#instead, we turn this into regular eigenvalue problem which is more behaved
#we diagonalize E_matrix=U*F*F*U^\dag with diagonal F
#Then, define S=U*F, and S^-1=F^-1*U^\dag. Use conditioned eigenvalues F for this such that no negative eigenvalues appear, and for inverse large eigenvalues set to zero
#solve S^-1*W*S^-1^\dag*a=\lambda a
#convert \alpha=S^-1^\dag*a. This is the solution to original problem.
#this procedure ensures that converted eigenvalue problem remains hermitian, and no other funny business happens
s_matrix=np.dot(e_vecs,np.diag(np.sqrt(e_vals_cond)))
s_matrix_inv=np.dot(np.diag(np.sqrt(e_vals_inverted)),np.transpose(np.conjugate(e_vecs)))
toeigmat=np.dot(s_matrix_inv,np.dot(W_matrix,np.transpose(np.conjugate(s_matrix_inv))))
energy,vectors=scipy.linalg.eigh(toeigmat)
#energy,vectors=scipy.linalg.eig(toeigmat)
#print(energy)
#smallestindex = 0
#minimumev = np.real(energy[0])
#for i in range(len(energy)):
# if np.real(energy[i])<minimumev:
# smallestindex = i
# minimumev = np.real(energy[i])
#print(smallestindex)
ini_alpha_vec=vectors[:,0]
ini_alpha_vec=np.dot(np.transpose(np.conjugate(s_matrix_inv)),ini_alpha_vec)
norm_ini_alpha=np.sqrt(np.abs(np.dot(np.transpose(np.conjugate(ini_alpha_vec)),np.dot(E_matrix,ini_alpha_vec))))
newalpha=ini_alpha_vec/norm_ini_alpha
return newalpha
#havedoneqcqpimports = False
def qcqp_for_TTQS(E_matrix,W_matrix,alphas,bounddiff=10**(-6)):
#global havedoneqcqpimports
'''
if havedoneqcqpimports == False:
import cvxpy as cvx
import mosek
#from qcqp import *
import qcqp as qcp
import importlib
importlib.reload(qcp)
importlib.reload(cvx)
havedoneqcqpimports = True'''
lengthofalpha = len(alphas)
#Realification of E
E_real = np.real(E_matrix)
E_imag = np.imag(E_matrix)
E_realified = np.bmat([[E_real,-E_imag],[E_imag,E_real]])
#realification of W
W_real = np.real(W_matrix)
W_imag = np.imag(W_matrix)
W_realified = np.bmat([[W_real,-W_imag],[W_imag,W_real]])
#Create Variable
x = cvx.Variable(2*lengthofalpha)
#x = cp.Variable((2*lengthofalpha,1))
#X = <EMAIL>
#Define objective and constraints
objective = cvx.quad_form(x,-W_realified)
#constraints = [cp.trace(X@E_realified)<=1+bounddiff,1-bounddiff<=cp.trace(X@E_realified)]
constraints = [cvx.quad_form(x,E_realified)==1]
prob = cvx.Problem(cvx.Minimize(objective), constraints)
#Solve
#prob.solve(solver = cp.MOSEK, mosek_params = {mosek.dparam.optimizer_max_time: 100.0,mosek.iparam.intpnt_solve_form: mosek.solveform.dual},verbose = False)
#prob.solve(verbose = True)
qcqp = QCQP(prob)
#Here could use SDP relaxation to find a starting point for local methods
#Although here, we just use a random point
#qcqp.suggest(SDR,solver=cvx.MOSEK)
qcqp.suggest(SDR)
#qcqp.suggest(RANDOM)
#Attempt to improve the starting point given by the suggest method
f_cd, v_cd = qcqp.improve(COORD_DESCENT)
#print("Coordinate descent: objective %.3f, violation %.3f" % (f_cd, v_cd))
#print(x.value)
#print(result)
result = np.array(x.value)
#unrealify
newalphareal =
|
np.array(result[:lengthofalpha])
|
numpy.array
|
import os
import nibabel as nib
import numpy as np
from numpy.testing import assert_array_equal, assert_raises, assert_equal, assert_almost_equal
from calie.fields import queries as qr
from .decorators_tools import create_and_erase_temporary_folder_with_a_dummy_nifti_image, pfo_tmp_test
''' test check_omega '''
def test_check_omega_type():
with assert_raises(IOError):
qr.check_omega((10, 10, 10.2))
def test_check_omega_wrong_dimension4():
with assert_raises(IOError):
qr.check_omega((10, 10, 10, 1))
def test_check_omega_wrong_dimension1():
with assert_raises(IOError):
qr.check_omega((10, ))
def test_check_omega_ok():
assert qr.check_omega((10, 11, 12)) == 3
assert qr.check_omega((10, 11)) == 2
''' test check_is_vf '''
def test_check_is_vf_wrong_input():
with
|
assert_raises(IOError)
|
numpy.testing.assert_raises
|
# coding=utf-8
"""
Plot a CMA diagram.
In a typical CMA diagram, the horizontal axis, corresponds to X=(omega_pe/omega_0)^2,
where omega_pe is the electron plasma frequency and omega_0 the angular wave frequency,
thus it corresponds to the electron plasma density n_e.
The vertical axis corresponds to Y=(omega_ce/omega_0), where omega_ce is the
electron cyclotron frequency, thus it corresponds to the absolute value of the
background magnetic field B_0.
Simply run this script to produce a png plot:
$ python CMA_diagram.py
"""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__copyright__ = 'University of Stuttgart'
__license__ = 'MIT'
# import standard modules
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker
# credit string to include at top of plot, to ensure people know they can use the plot
# (someone once told me, every plot appearing somewhere in the internet
# should contain information on how to use it, otherwise it is useless)
# note that the license refers only to that specific plot
# the license for the code is mentioned in the LICENSE file (and above)
credit_str = f'{__author__}, CC BY-SA 4.0'
plt.rcParams.update({'font.size':12})
# force ticks to point inwards
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.top'] = True
plt.rcParams['ytick.right'] = True
def make_plot( fname_plot='' ):
#;{{{
'''
Output a plot, either to X-window (default) or into file.
Parameters
----------
fname_plot: str
possible values are 'Bosch', 'my_dset', 'EUROfusion'
Returns
-------
'''
if len(fname_plot) > 0:
plt.savefig( fname_plot, dpi=600, bbox_inches='tight' )
print( 'written plot into file {0}'.format(fname_plot) )
else:
plt.show()
#;}}}
def oplot_Ocut( ax, y_range=[], linestyle='solid', linewidth=3, color='black' ):
#;{{{
"""
Overplot the O-mode cut-off in a CMA diagram.
In a typical CMA diagram, the horizontal axis corresponds to X=(omega_pe/omega_0)^2,
where omega_pe is the electron plasma frequency and omega_0 the angular wave frequency;
the vertical axis corresponds to Y=(omega_ce/omega_0), where omega_ce is the
electron cyclotron frequency.
The O-mode cut-off is thus located at X=1.
Parameters
----------
ax: Axes object
y_range: list or np.array
2-element list or array specifying the start and end point for the line
indicating the O-mode cut-off. If not provided, the range of the y-axis
is used.
linestyle: str
linewidth: int
color: str
Returns
-------
"""
if len(y_range) == 0:
y0, y1 = ax.get_ylim()
else:
y0 = y_range[0]
y1 = y_range[1]
# plot the cut-off position
ax.plot( [1,1], [y0,y1],
marker='None',
linestyle=linestyle, linewidth=linewidth,
color=color,
)
# write text to the cut-off (annotate it)
txt_y = y1 - .3*(y1-y0) #1.4
ax.annotate( 'O cut-off', xy=(1.,txt_y), xytext=(1.,txt_y), rotation=90,
horizontalalignment='right', verticalalignment='bottom',
)
#;}}}
def oplot_XRcut( ax, linestyle='solid', linewidth=3, color='black' ):
#;{{{
"""
Overplot the X-mode R cut-off in a CMA diagram.
In a typical CMA diagram, the horizontal axis corresponds to X=(omega_pe/omega_0)^2,
where omega_pe is the electron plasma frequency and omega_0 the angular wave frequency;
the vertical axis corresponds to Y=(omega_ce/omega_0), where omega_ce is the
electron cyclotron frequency.
The right-hand cut-off reads
w_Rcut = 0.5*( sqrt(w_ce^2+4*w_pe^2) + w_ce )
<=>
w_Rcut - 0.5*w_ce = 0.5 * sqrt(w_ce^2+4*w_pe^2)
Normalizing to w_0 yields
2 - Y = sqrt(Y^2 + 4*X)
<=>
4 - 4Y + Y^2 = Y^2 + 4*X
<=>
4*(1-Y) = 4*X
<=>
1-Y = X
<=>
Y = 1 - X
Parameters
----------
ax: Axes object
linestyle: str
linewidth: int
color: str
Returns
------
"""
x0, x1 = ax.get_ylim()
x_range = np.array( [x0, x1] )
arr_x = np.linspace( np.min(x_range), np.max(x_range), 200 )
# calculate right-hand cut-off
arr_y = 1. - arr_x
ax.plot( arr_x, arr_y,
linestyle=linestyle, marker='None', linewidth=linewidth,
color=color,
)
ax.annotate( 'XR cut-off', xy=(.2,.2), xytext=(.2,.2), rotation=-47,
horizontalalignment='left', verticalalignment='bottom',
)
#;}}}
def oplot_XLcut( ax, x_range=[], linestyle='solid', linewidth=3, color='black' ):
#;{{{
"""
Overplot the X-mode L cut-off in a CMA diagram.
In a typical CMA diagram, the horizontal axis corresponds to X=(omega_pe/omega_0)^2,
where omega_pe is the electron plasma frequency and omega_0 the angular wave frequency;
the vertical axis corresponds to Y=(omega_ce/omega_0), where omega_ce is the
electron cyclotron frequency.
The left-hand cut-off reads
w_Lcut = 0.5*( sqrt(w_ce^2+4*w_pe^2) - w_ce )
<=>
w_Rcut + 0.5*w_ce = 0.5 * sqrt(w_ce^2+4*w_pe^2)
Normalizing to w_0 yields
2 + Y = sqrt(Y^2 + 4*X)
<=>
4 + 4Y + Y^2 = Y^2 + 4*X
<=>
4*(1+Y) = 4*X
<=>
1+Y = X
<=>
Y = X - 1
Parameters
----------
ax: Axes object
x_range: list or np.array
2-element list or array specifying the start and end point for the line
indicating the X-mode L cut-off. If not provided, the range of the x-axis
is used.
linestyle: str
linewidth: int
color: str
Returns
------
"""
if len(x_range) == 0:
x0, x1 = ax.get_xlim()
x_range = np.array( [x0, x1] )
arr_x = np.linspace( 1., np.max(x_range), 200 )
arr_y = -1. + arr_x
ax.plot( arr_x, arr_y,
marker='None',
linestyle=linestyle, linewidth=linewidth,
color=color,
)
ax.annotate( 'XL cut-off', xy=(1.2,.33), xytext=(1.2,.33), rotation=47,
horizontalalignment='left', verticalalignment='bottom',
)
#;}}}
def oplot_Xres( ax, theta=np.array([90.]), annotation_x=np.array([.5]),
linestyle='dashed', linewidth=3, color='black'
):
#;{{{
"""
Overplot the X-mode upper-hybrid resonance in a CMA diagram.
In a typical CMA diagram, the horizontal axis corresponds to X=(omega_pe/omega_0)^2,
where omega_pe is the electron plasma frequency and omega_0 the angular wave frequency;
the vertical axis corresponds to Y=(omega_ce/omega_0), where omega_ce is the
electron cyclotron frequency.
Parameters
----------
ax: Axes object
theta: np.array
Angle of microwave propagation with respect to the background magnetic field B_0,
units are degrees.
annotation_x: np.array
X-coordinates for the labels (which are the theta-values) to be written into the plot.
linestyle: str
linewidth: int
color: str
Returns
------
"""
for ii in range( len(theta) ):
arr_x = np.linspace( 0., 1., 200 )
arr_y = +1.*np.sqrt( (1.-arr_x)/(1.-arr_x*np.cos(theta[ii]/180.*np.pi)) )
# remove NaN-values in arr_y
arr_x = arr_x[ np.isfinite(arr_y) ]
arr_y = arr_y[ np.isfinite(arr_y) ]
# string for legend (only for one theta value)
if theta[ii] == theta[0]:
label_str = 'X-resonance'
else:
label_str = None
ax.plot( arr_x, arr_y,
marker='None',
linestyle=linestyle, linewidth=linewidth,
color=color,
label=label_str
)
# annotate with theta-value
annot_x_id = np.argmin( np.abs(arr_x-annotation_x[ii]) )
annot_x = arr_x[annot_x_id]
annot_y = arr_y[annot_x_id]
annot_txt = r'${0:2.0f}\degree$'.format( theta[ii] )
ax.annotate( annot_txt, xy=(annot_x,annot_y), xytext=(annot_x,annot_y-.05),
horizontalalignment='left', verticalalignment='top',
)
#;}}}
def oplot_Ores( ax, theta=np.array([30.]), annotation_x=np.array([1.5]),
x_range=[],
linestyle='dotted', linewidth=3, color='black'
):
#;{{{
"""
Overplot the O-mode O resonance in a CMA diagram.
In a typical CMA diagram, the horizontal axis corresponds to X=(omega_pe/omega_0)^2,
where omega_pe is the electron plasma frequency and omega_0 the angular wave frequency;
the vertical axis corresponds to Y=(omega_ce/omega_0), where omega_ce is the
electron cyclotron frequency.
Parameters
----------
ax: Axes object
theta: np.array
Angle of microwave propagation with respect to the background magnetic field B_0,
units are degrees.
annotation_x: np.array
X-coordinates for the labels (which are the theta-values) to be written into the plot.
x_range: list or np.array
2-element list or array, where currently only the second element is used to specify
the end point for the line indicating the O-resonance. If not provided, the range of
the x-axis is used.
linestyle: str
linewidth: int
color: str
Returns
"""
if len(x_range) == 0:
x0, x1 = ax.get_xlim()
x_range = np.array( [x0, x1] )
arr_x = np.linspace( (1.+1e-6), np.max(x_range), 200)
for ii in range( len(theta) ):
arr_y = +1.*np.sqrt( (1.-arr_x)/(1.-arr_x*np.cos(theta[ii]/180.*np.pi)) )
# string for legend (only for one theta value)
if theta[ii] == theta[0]:
label_str = 'O-resonance'
else:
label_str = None
ax.plot( arr_x, arr_y,
marker='None',
linestyle=linestyle, linewidth=linewidth,
color=color,
label=label_str
)
# annotate with theta-value
annot_x_id = np.argmin( np.abs(arr_x-annotation_x[ii]) )
annot_x = arr_x[annot_x_id]
annot_y = arr_y[annot_x_id]
annot_txt = r'${0:2.0f}\degree$'.format( theta[ii] )
ax.annotate( annot_txt, xy=(annot_x,annot_y), xytext=(annot_x,annot_y+.01),
horizontalalignment='left', verticalalignment='bottom',
)
#;}}}
def oplot_ECR( ax, x_range=[], linestyle='solid', linewidth=3, color='black' ):
#;{{{
"""
Overplot the electron cyclotron resonance in a CMA diagram.
In a typical CMA diagram, the horizontal axis corresponds to X=(omega_pe/omega_0)^2,
where omega_pe is the electron plasma frequency and omega_0 the angular wave frequency;
the vertical axis corresponds to Y=(omega_ce/omega_0), where omega_ce is the
Parameters
----------
ax: Axes object
x_range: list or np.array
2-element list or array specifying the start and end point for the line
indicating the electron cyclotron resonance. If not provided, the range of the x-axis
is used.
linestyle: str
linewidth: int
color: str
Returns
------
"""
if len(x_range) == 0:
x0, x1 = ax.get_xlim()
x_range = np.array( [x0, x1] )
arr_x = x_range
arr_y = np.array( [1.,1.] )
ax.plot( arr_x, arr_y,
marker='None',
linestyle=linestyle, linewidth=linewidth,
color=color,
)
label_str_Rres = 'ECR'
ax.annotate( label_str_Rres, xy=(.1,1), xytext=(.1,1.02),
horizontalalignment='left', verticalalignment='bottom',
)
#;}}}
def main():
#;{{{
# plot configuration
fname_plot = 'CMA_diagram.png'
# linewidth for O- and X-mode
lw_O = 3
lw_X = 3
# color for O- and X-mode
color_O = 'black'
color_X = 'black' #'red'
# linestyle for cut-offs and resonances
ls_Ocut = 'solid'
ls_Xcut = 'solid'
ls_Xres = 'dashed'
ls_Ores = 'dotted'
# (width, height) in inches
fig = plt.figure( figsize=(8,6) )
ax1 = fig.add_subplot( 1,1,1 )
x_range = np.array( [0,3] )
y_range = np.array( [0,2] )
# oplot O cut-off
oplot_Ocut( ax1, y_range=y_range, linestyle=ls_Ocut, linewidth=lw_O, color=color_O )
# oplot XR cut-off
oplot_XRcut( ax1, linestyle=ls_Xcut, linewidth=lw_X, color=color_X )
# oplot XL cut-off
oplot_XLcut( ax1, x_range=x_range, linestyle=ls_Xcut, linewidth=lw_X, color=color_X )
# oplot resonances for different thetas
thetas = np.array( [90., 30., 10.] )
annotations_x = np.array( [.5, .65, .8] )
# X resonance
oplot_Xres( ax1, theta=thetas, annotation_x=annotations_x,
linestyle=ls_Xres, linewidth=lw_X, color=color_X
)
# O resonance
thetas =
|
np.array( [30., 10.] )
|
numpy.array
|
#!/usr/bin/env python
from osgeo import gdal
from osgeo import osr
import numpy as np
import os, sys
from netCDF4 import Dataset
osr.UseExceptions()
def rgb_geotiff(file, outfile, red, green, blue, lat, lon):
with Dataset(file, "r", format="NETCDF4") as nc:
red = np.array(nc.variables[red][:])
green = np.array(nc.variables[green][:])
blue = np.array(nc.variables[blue][:])
lat = np.array(nc.variables[lat][:])
lon = np.array(nc.variables[lon][:])
image_size = red.shape
if np.isnan(red).all() or np.isnan(green).all() or np.isnan(blue).all():
return False
r_pixels = np.around((( red - np.nanmin(red) ) / ( np.nanmax(red) - np.nanmin(red) )) * 255)
g_pixels = np.around((( green - np.nanmin(green) ) / ( np.nanmax(green) - np.nanmin(green) )) * 255)
b_pixels = np.around((( blue - np.nanmin(blue) ) / ( np.nanmax(blue) - np.nanmin(blue) )) * 255)
# set geotransform
nx = image_size[0]
ny = image_size[1]
xmin, ymin, xmax, ymax = [min(lon), min(lat), max(lon), max(lat)]
xres = (xmax - xmin) / float(ny)
yres = (ymax - ymin) / float(nx)
geotransform = (xmin, xres, 0, ymax, 0, -yres)
# create the 3-band raster file
dst_ds = gdal.GetDriverByName('GTiff').Create(outfile, ny, nx, 3, gdal.GDT_Byte)
dst_ds.SetGeoTransform(geotransform) # specify coords
srs = osr.SpatialReference() # establish encoding
srs.ImportFromEPSG(4326) # WGS84 lat/long
dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file
dst_ds.GetRasterBand(1).WriteArray(r_pixels) # write r-band to the raster
dst_ds.GetRasterBand(2).WriteArray(g_pixels) # write g-band to the raster
dst_ds.GetRasterBand(3).WriteArray(b_pixels) # write b-band to the raster
dst_ds.FlushCache() # write to disk
dst_ds = None
def singleband_geotiff(file, outfile, band, lat, lon):
with Dataset(file, "r", format="NETCDF4") as nc:
band = np.array(nc.variables[band][:])
lat = np.array(nc.variables[lat][:])
lon = np.array(nc.variables[lon][:])
image_size = band.shape
if
|
np.isnan(band)
|
numpy.isnan
|
import os
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Select GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1, 3"
# Load imdb dataset
imdb, info = tfds.load("imdb_reviews", with_info=True, as_supervised=True)
train_data, test_data = imdb['train'], imdb['test']
# Prepare the dataset
training_sentences = []
training_labels = []
testing_sentences = []
testing_labels = []
# str(s.tonumpy()) is needed in Python3 instead of just s.numpy()
for s, l in train_data:
training_sentences.append(s.numpy().decode('utf8'))
training_labels.append(l.numpy())
for s, l in test_data:
testing_sentences.append(s.numpy().decode('utf8'))
testing_labels.append(l.numpy())
training_labels_final =
|
np.array(training_labels)
|
numpy.array
|
import io
import numpy as np
import pytest
import pylas
from pylas import PointFormat
from pylastests.test_common import write_then_read_again, simple_las, test1_4_las
@pytest.fixture()
def file1_4():
return pylas.read(test1_4_las)
@pytest.fixture()
def file():
return pylas.read(simple_las)
def test_xyz():
las = pylas.create()
shape = (150,)
las.X = np.zeros(shape, dtype=np.int32)
las.Y = np.ones(shape, dtype=np.int32)
las.Z = np.zeros(shape, dtype=np.int32)
las.Z[:] = -152
las = write_then_read_again(las)
assert np.alltrue(las.X == 0)
assert np.alltrue(las.Y == 1)
assert np.alltrue(las.Z == -152)
def test_wrong_version():
for i in range(6, 8):
with pytest.raises(pylas.errors.PylasError):
_ = pylas.create(point_format=i, file_version="1.2")
def test_good_version_is_used():
for i in range(6, 8):
las = pylas.create(point_format=i)
assert las.header.version.major == 1
assert las.header.version.minor == 4
def test_create_fmt_0():
new = pylas.create(point_format=0)
with pytest.raises(ValueError):
new.red = np.zeros(len(new.points), np.uint16)
with pytest.raises(ValueError):
new.red = np.zeros(len(new.points), np.uint16)
with pytest.raises(ValueError):
new.red = np.zeros(len(new.points), np.uint16)
with pytest.raises(ValueError):
new.gps_time = np.zeros(len(new.points), np.float64)
def test_create_fmt_1():
new = pylas.create(point_format=1)
with pytest.raises(ValueError):
new.red = np.zeros(len(new.points), np.uint16)
with pytest.raises(ValueError):
new.red = np.zeros(len(new.points), np.uint16)
with pytest.raises(ValueError):
new.red = np.zeros(len(new.points), np.uint16)
gps_time = np.random.uniform(0, 25641, len(new.points))
new.gps_time = gps_time
assert np.allclose(new.gps_time, gps_time)
new = write_then_read_again(new)
assert np.allclose(new.gps_time, gps_time)
def test_create_fmt_2(file):
new = pylas.create(point_format=2)
with pytest.raises(ValueError):
new.gps_time = file.gps_time
new.red = file.red
new.green = file.green
new.blue = file.blue
assert np.allclose(new.red, file.red)
assert np.allclose(new.green, file.green)
assert np.allclose(new.blue, file.blue)
new = write_then_read_again(new)
assert np.allclose(new.red, file.red)
assert np.allclose(new.green, file.green)
assert
|
np.allclose(new.blue, file.blue)
|
numpy.allclose
|
# coding = utf-8
import numpy as np
from read_sphere_wav import read_sphere_wav
from matplotlib import pyplot
import matplotlib.pyplot as plt
from scipy.io import wavfile
def hz2mel(f):
return 2595. * np.log10(1. + f / 700.)
def mel2hz(z):
return 700. * (np.power(10., z / 2595.) - 1.)
def get_dct_coeff(in_channel, out_channel):
dct_coef = np.zeros((out_channel, in_channel), dtype=np.float32)
for i in range(out_channel):
n = np.linspace(0, in_channel - 1, in_channel)
dct_coef[i, :] = np.cos((2 * n + 1) * i * np.pi / (2 * in_channel))
return dct_coef
def get_fft_mel_mat(nfft, sr=8000, nfilts=None, width=1.0, minfrq=20, maxfrq=None, constamp=0):
if nfilts is None:
nfilts = nfft
if maxfrq is None:
maxfrq = sr // 2
wts = np.zeros((nfilts, nfft//2+1))
fftfrqs = np.arange(0, nfft//2+1) / (1. * nfft) * (sr)
minmel = hz2mel(minfrq)
maxmel = hz2mel(maxfrq)
binfrqs = mel2hz(minmel + np.arange(0, nfilts+2) / (nfilts+1.) * (maxmel - minmel))
# binbin = np.round(binfrqs / maxfrq * nfft)
for i in range(nfilts):
fs = binfrqs[[i+0, i+1, i+2]]
fs = fs[1] + width * (fs - fs[1])
loslope = (fftfrqs - fs[0]) / (fs[1] - fs[0])
hislope = (fs[2] - fftfrqs) / (fs[2] - fs[1])
wts[i, :] = np.maximum(0, np.minimum(loslope, hislope))
return wts
def mfcc_extractor(xx, sr, win_len, shift_len, mel_channel, dct_channel, win_type, include_delta):
my_melbank = get_fft_mel_mat(win_len, sr, mel_channel)
pre_emphasis_weight = 0.9375
# x = xx * (1-pre_emphasis_weight)
x = np.append(xx[0], xx[1:] - pre_emphasis_weight * xx[:-1])
dctcoef = np.zeros((dct_channel, mel_channel), dtype=np.float32)
for i in range(dct_channel):
n = np.linspace(0, mel_channel-1, mel_channel)
dctcoef[i, :] = np.cos((2 * n + 1) * i * np.pi / (2 * mel_channel))
w = 1 + 6 * np.sin(np.pi * np.linspace(0, dct_channel-1, dct_channel) / (dct_channel-1))
w /= w.max()
w = np.reshape(w, newshape=(dct_channel, 1))
samples = x.shape[0]
frames = (samples - win_len) // shift_len
stft = np.zeros((win_len, frames), dtype=np.complex64)
spectrum = np.zeros((win_len // 2 + 1, frames), dtype=np.float32)
mfcc = np.zeros((dct_channel, frames), dtype=np.float32)
if win_type == 'hanning':
window =
|
np.hanning(win_len)
|
numpy.hanning
|
# nonlinear mixed effects model
import numpy as np
import ipopt
from copy import deepcopy
from limetr import utils
class LimeTr:
def __init__(self, n, k_beta, k_gamma, Y, F, JF, Z,
S=None, share_obs_std=False,
C=None, JC=None, c=None,
H=None, JH=None, h=None,
uprior=None, gprior=None, lprior=None,
inlier_percentage=1.0):
"""
Create LimeTr object, for general mixed effects model
Parameters
----------
n : ndarray
study sizes, n[i] is the number of observation for ith study.
k_beta : int
dimension of beta
k_gamma : int
dimension of gamma
Y : ndarray
study observations
F : function
return the predict observations given beta
JF : function
return the jacobian function of F
Z : ndarray
covariates matrix for the random effect
S : optional, ndarray
observation standard deviation
"""
# pass in the dimension
self.n = np.array(n)
self.m = len(n)
self.N = sum(n)
self.k_beta = k_beta
self.k_gamma = k_gamma
# if include measurement error also as variable
if S is not None:
self.std_flag = 0
self.k_delta = 0
elif share_obs_std:
self.std_flag = 1
self.k_delta = 1
else:
self.std_flag = 2
self.k_delta = self.m
self.k = self.k_beta + self.k_gamma + self.k_delta
self.k_total = self.k
self.idx_beta = slice(0, self.k_beta)
self.idx_gamma = slice(self.k_beta, self.k_beta + self.k_gamma)
self.idx_delta = slice(self.k_beta + self.k_gamma, self.k)
self.idx_split = np.cumsum(np.insert(n, 0, 0))[:-1]
# pass in the data
self.Y = Y
self.F = F
self.JF = JF
self.Z = Z
self.S = S
if self.std_flag == 0:
self.V = S**2
# pass in the priors
self.use_constraints = (C is not None)
self.use_regularizer = (H is not None)
self.use_uprior = (uprior is not None)
self.use_gprior = (gprior is not None)
self.use_lprior = (lprior is not None)
self.C = C
self.JC = JC
self.c = c
if self.use_constraints:
self.constraints = C
self.jacobian = JC
self.num_constraints = C(np.zeros(self.k)).size
self.cl = c[0]
self.cu = c[1]
else:
self.num_constraints = 0
self.cl = []
self.cu = []
self.H = H
self.JH = JH
self.h = h
if self.use_regularizer:
self.num_regularizer = H(np.zeros(self.k)).size
self.hm = self.h[0]
self.hw = 1.0/self.h[1]**2
else:
self.num_regularizer = 0
if self.use_uprior:
self.uprior = uprior
else:
self.uprior = np.array([
[-np.inf]*self.k_beta + [0.0]*self.k_gamma +\
[1e-7]*self.k_delta,
[np.inf]*self.k
])
self.use_uprior = True
self.lb = self.uprior[0]
self.ub = self.uprior[1]
if self.use_gprior:
self.gprior = gprior
self.gm = gprior[0]
self.gw = 1.0/gprior[1]**2
if self.use_lprior:
self.lprior = lprior
self.lm = lprior[0]
self.lw = np.sqrt(2.0)/lprior[1]
# double dimension pass into ipopt
self.k_total += self.k
# extend the constraints matrix
if self.use_constraints:
def constraints(x):
v = x[:self.k]
v_abs = x[self.k:]
vec1 = C(v)
vec2 = np.hstack((v_abs - (v - self.lm),
v_abs + (v - self.lm)))
return np.hstack((vec1, vec2))
def jacobian(x):
v = x[:self.k]
v_abs = x[self.k:]
Id = np.eye(self.k)
mat1 = JC(v)
mat2 = np.block([[-Id, Id], [Id, Id]])
return np.vstack((mat1, mat2))
else:
def constraints(x):
v = x[:self.k]
v_abs = x[self.k:]
vec = np.hstack((v_abs - v, v_abs + v))
return vec
def jacobian(x):
v = x[:self.k]
v_abs = x[self.k:]
Id = np.eye(self.k)
mat = np.block([[-Id, Id], [Id, Id]])
return mat
self.num_constraints += 2*self.k
self.constraints = constraints
self.jacobian = jacobian
self.cl = np.hstack((self.cl, np.zeros(2*self.k)))
self.cu = np.hstack((self.cu, np.repeat(np.inf, 2*self.k)))
# extend the regularizer matrix
if self.use_regularizer:
def H_new(x):
v = x[:self.k]
return H(v)
def JH_new(x):
v = x[:self.k]
return np.hstack((JH(v),
np.zeros((self.num_regularizer,
self.k))))
self.H = H_new
self.JH = JH_new
# extend Gaussian and Uniform priors
if self.use_gprior:
gprior_abs = np.array([[0.0]*self.k, [np.inf]*self.k])
self.gprior = np.hstack((self.gprior, gprior_abs))
self.gm = self.gprior[0]
self.gw = 1.0/self.gprior[1]**2
if self.use_uprior:
uprior_abs = np.array([[0.0]*self.k, [np.inf]*self.k])
self.uprior = np.hstack((self.uprior, uprior_abs))
self.lb = self.uprior[0]
self.ub = self.uprior[1]
# trimming option
self.use_trimming = (0.0 < inlier_percentage < 1.0)
self.inlier_percentage = inlier_percentage
self.num_inliers = np.floor(inlier_percentage*self.N)
self.num_outliers = self.N - self.num_inliers
self.w = np.repeat(self.num_inliers/self.N, self.N)
# specify solution to be None
self.soln = None
self.info = None
self.beta = np.zeros(self.k_beta)
self.gamma = np.repeat(0.01, self.k_gamma)
self.delta = np.repeat(0.01, self.k_delta)
# check the input
self.check()
def check(self):
assert self.Y.shape == (self.N,)
assert self.Z.shape == (self.N, self.k_gamma)
if self.S is not None:
assert self.S.shape == (self.N,)
assert np.all(self.S > 0.0)
if self.use_constraints:
assert self.c.shape == (2, self.num_constraints)
assert np.all(self.cl <= self.cu)
if self.use_regularizer:
assert self.h.shape == (2, self.num_regularizer)
assert np.all(self.h[1] > 0.0)
if self.use_uprior:
assert np.all(self.lb <= self.ub)
if self.use_gprior:
assert self.gprior.shape == (2, self.k)
assert
|
np.all(self.gprior[1] > 0.0)
|
numpy.all
|
#!/usr/bin/python
#
# Project Saturn
# _____________________________________________________________________________
#
# _.oo.
# August 2019 _.u[[/;:,. .odMMMMMM'
# .o888UU[[[/;:-. .o@P^ MMM^
# preprocessor.py oN88888UU[[[/;::-. dP^
# preprocess files and dNMMNN888UU[[[/;:--. .o@P^
# normalize according to input stats ,MMMMMMN888UU[[/;::-. o@^
# NNMMMNN888UU[[[/~.o@P^
# <NAME> 888888888UU[[[/o@^-..
# oI8888UU[[[/o@P^:--..
# .@^ YUU[[[/o@^;::---..
# oMP ^/o@P^;:::---..
# .dMMM .o@^ ^;::---...
# dMMMMMMM@^` `^^^^
# YMMMUP^
# ^^
# _____________________________________________________________________________
#
#
# Copyright 2019 <NAME>
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.#
# _____________________________________________________________________________
# ----------------
# import libraries
# ----------------
# standard libraries
# -----
# import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
import numpy as np
# custom functions
# -----
import utilities.networks.buildingblocks as bb
class PreprocessorNetwork(bb.ComposedModule):
"""
PreprocessorNetwork inherits from ComposedModule. It is a input network
that preprocesses the network input. It can crop parts of the image and
applies normalization before the image is handed to the neural network.
"""
def initialize_stats(self):
# input statistics for normalization
self.stats = {}
self.update_stats = {}
self.reset_stats = {}
# not initialized to zero for graceful error handling
self.stats['N'] = tf.Variable(1., trainable=False)
self.stats['Sx'] = tf.Variable(
tf.zeros([1, self.image_height, self.image_width,
self.image_channels]),
trainable=False)
self.stats['Sxx'] = tf.Variable(
tf.ones([1, self.image_height, self.image_width,
self.image_channels]),
trainable=False)
self.update_stats['N'] = tf.assign_add(self.stats['N'],
self.batchsize)
self.update_stats['Sx'] = tf.assign_add(
self.stats['Sx'], tf.expand_dims(
tf.reduce_sum(
tf.cast(self.input_module.outputs[0], tf.float32),
axis=0), 0))
self.update_stats['Sxx'] = tf.assign_add(
self.stats['Sxx'], tf.expand_dims(
tf.reduce_sum(tf.square(
tf.cast(self.input_module.outputs[0], tf.float32)),
axis=0), 0))
self.reset_stats['N'] = tf.assign(self.stats['N'], 0)
self.reset_stats['Sx'] = tf.assign(
self.stats['Sx'],
tf.zeros([1, self.image_height, self.image_width,
self.image_channels]))
self.reset_stats['Sxx'] = tf.assign(
self.stats['Sxx'],
tf.zeros([1, self.image_height, self.image_width,
self.image_channels]))
pass
def gather_statistics(self, session, iterator, flnames,
filenames_placeholder, is_training,
show_image=False):
self.initialize_stats()
session.run(iterator.initializer,
feed_dict={filenames_placeholder: flnames})
print(" " * 80 + "\r" + "[Statistics]\tstarted", end="\r")
session.run([self.reset_stats['N'],
self.reset_stats['Sx'], self.reset_stats['Sxx']])
while True:
try:
N, Sx, Sxx = session.run(
[self.update_stats['N'], self.update_stats['Sx'],
self.update_stats['Sxx']],
feed_dict={is_training.placeholder: False})
except (tf.errors.OutOfRangeError):
session.run([tf.assign(
self.layers['inp_norm'].n, self.stats['N']),
tf.assign(self.layers['inp_norm'].sx,
self.stats['Sx']),
tf.assign(self.layers['inp_norm'].sxx,
self.stats['Sxx'])])
if show_image:
import matplotlib.pyplot as plt
session.run(iterator.initializer,
feed_dict={
filenames_placeholder: flnames})
im = session.run(self.layers['inp_norm'].outputs[0],
feed_dict={
is_training.placeholder: False})
for i in range(10):
print('max:', np.max(im[i, :, :, 0]),
'min:', np.min(im[i, :, :, 0]),
'std:',
|
np.std(im[i, :, :, 0])
|
numpy.std
|
"""
Chain outlier tests.
"""
__all__ = ["identify_outliers"]
import os
from numpy import mean, std, sqrt, where, argmin, arange, array
from numpy import sort
from scipy.stats import t as student_t
from scipy.stats import scoreatpercentile
from .mahal import mahalanobis
from .acr import ACR
tinv = student_t.ppf
# Hack to adjust the aggressiveness of the interquartile range test.
# TODO: Document/remove BUMPS_INTERQUARTILES or replace with command option.
BUMPS_INTERQUARTILES = float(os.environ.get("BUMPS_INTERQUARTILES", "2.0"))
# CRUFT: scoreatpercentile not accepting array arguments in older scipy
def prctile(v, Q):
v =
|
sort(v)
|
numpy.sort
|
# pylint: disable=missing-module-docstring (C0114)
# pylint: disable=missing-function-docstring (C0116)
# pylint: disable=protected-access (W0212)
import inspect
from typing import Callable, Tuple
from unittest.mock import MagicMock, Mock, patch
import numpy as np
import pytest
from matching import ChannelsType
from matching.operations import \
FeatureDistributionMatching as FeatureDistMatching
from tests import (CHANNEL_RANGES_DEFAULT, CHANNELS_DEFAULT, MUNICH_1_PATH,
MUNICH_2_PATH)
from utils.cs_conversion import ChannelRange
from utils.image_io import read_image
ImageGenType = Callable[[Tuple[int, int, int], int], np.ndarray]
TEST_IMAGE_423 = np.array([[[0.09634835, 0.67985358, 0.71879272],
[0.2746647, 0.55942712, 0.17269985]],
[[0.60345517, 0.70931529, 0.14624073],
[0.23241476, 0., 0.91803395]], # 0. entry
[[0.58758528, 0.66624122, 0.55438404],
[1., 0.26015386, 0.28256821]], # 1. entry
[[0.87368081, 0.85794979, 0.11262025],
[0.70622847, 0.9368422, 0.39187311]]])
TEST_IMAGE_421 = TEST_IMAGE_423[:, :, 0:1]
TEST_IMAGE_243 = np.array([[[0.96883339, 0.6280047, 0.55266079],
[0.64656832, 0.96452021, 0.01043656],
[1., 0.75418668, 0.77878355], # 1. entry
[0.80548047, 0.73128374, 0.72436705]],
[[0.81233289, 0.67804285, 0.10213132],
[0.3819547, 0.67940864, 0.8103251],
[0.30974552, 0.49638342, 0.], # 0. entry
[0.11005092, 0.48727506, 0.31166669]]])
TEST_IMAGE_241 = TEST_IMAGE_243[:, :, 0:1]
def test_channel_ranges() -> None:
with pytest.raises(TypeError):
FeatureDistMatching(CHANNELS_DEFAULT, check_input=True,
channel_ranges=[1, 2, 3]) # type: ignore
with pytest.raises(TypeError):
FeatureDistMatching(CHANNELS_DEFAULT, check_input=True,
channel_ranges=(
ChannelRange(0.1, 1.7), 1, # type: ignore
ChannelRange(0.1, 1.7)))
@pytest.mark.parametrize('test_image, des_shape',
[(TEST_IMAGE_423, (8, 3)),
(TEST_IMAGE_421, (8, 1))
])
def test_get_feature_matrix(test_image: np.array,
des_shape: Tuple[int, int]) -> None:
result = FeatureDistMatching._get_feature_matrix(test_image)
np.testing.assert_array_equal(result, test_image.reshape(des_shape))
def test_center_image() -> None:
test_image = TEST_IMAGE_423
test_image_mat = FeatureDistMatching._get_feature_matrix(test_image)
test_image_mean = np.mean(test_image_mat, axis=0)
result_image_mat, result_mean = FeatureDistMatching._center_image(
test_image_mat)
np.testing.assert_array_equal(result_mean, test_image_mean)
np.testing.assert_almost_equal(np.mean(result_image_mat, axis=0),
np.zeros(3))
def test_whitening() -> None:
test_image = TEST_IMAGE_423
test_image_mat = FeatureDistMatching._get_feature_matrix(test_image)
FeatureDistMatching._center_image(test_image_mat)
result = FeatureDistMatching._whitening(test_image_mat)
np.testing.assert_almost_equal(np.cov(result, rowvar=False),
np.identity(3))
def test_whitening_2d() -> None:
test_image = TEST_IMAGE_421
test_image_mat = FeatureDistMatching._get_feature_matrix(test_image)
FeatureDistMatching._center_image(test_image_mat)
result = FeatureDistMatching._whitening(test_image_mat)
np.testing.assert_almost_equal(np.var(result), 1.)
def test_covariance_transformation() -> None:
test_image = TEST_IMAGE_423
test_image_mat = FeatureDistMatching._get_feature_matrix(test_image)
FeatureDistMatching._center_image(test_image_mat)
test_image_white = FeatureDistMatching._whitening(test_image_mat)
feature_mat_ref = FeatureDistMatching._get_feature_matrix(TEST_IMAGE_243)
result = FeatureDistMatching._covariance_transformation(test_image_white,
feature_mat_ref)
np.testing.assert_almost_equal(np.cov(result, rowvar=False),
np.cov(feature_mat_ref, rowvar=False))
def test_covariance_transformation_2d() -> None:
test_image = TEST_IMAGE_421
test_image_mat = FeatureDistMatching._get_feature_matrix(test_image)
FeatureDistMatching._center_image(test_image_mat)
test_image_white = FeatureDistMatching._whitening(test_image_mat)
feature_mat_ref = FeatureDistMatching._get_feature_matrix(
TEST_IMAGE_243[:, :, 0])
result = FeatureDistMatching._covariance_transformation(test_image_white,
feature_mat_ref)
np.testing.assert_almost_equal(np.var(result),
np.var(feature_mat_ref))
@pytest.fixture(name='feature_dist_matching')
def fixture_feature_dist_matching() -> FeatureDistMatching:
return FeatureDistMatching(CHANNELS_DEFAULT,
CHANNEL_RANGES_DEFAULT,
check_input=True)
def test_design() -> None:
assert inspect.isabstract(FeatureDistMatching) is False
assert len(FeatureDistMatching.__mro__) == 4
@pytest.mark.parametrize('channel_range_max', [1.0, 255.])
def test_matching(channel_range_max: float) -> None:
source = TEST_IMAGE_423
reference = TEST_IMAGE_243
source[:, :, 1] *= channel_range_max
reference[:, :, 1] *= channel_range_max
result = FeatureDistMatching._matching(source, reference)
feature_mat_result = FeatureDistMatching._get_feature_matrix(result)
feature_mat_ref = FeatureDistMatching._get_feature_matrix(reference)
result_mean = np.mean(feature_mat_result, axis=0)
reference_mean = np.mean(feature_mat_ref, axis=0)
np.testing.assert_almost_equal(result_mean, reference_mean)
feature_mat_result -= result_mean
feature_mat_ref -= reference_mean
np.testing.assert_almost_equal(np.cov(feature_mat_result, rowvar=False),
np.cov(feature_mat_ref, rowvar=False))
@pytest.mark.parametrize('channel_range_max', [1.0, 255.])
def test_matching_2d(channel_range_max: float) -> None:
source = TEST_IMAGE_421 * channel_range_max
reference = TEST_IMAGE_421 * channel_range_max
result = FeatureDistMatching._matching(source, reference)
feature_mat_result = FeatureDistMatching._get_feature_matrix(result)
feature_mat_ref = FeatureDistMatching._get_feature_matrix(reference)
result_mean = np.mean(feature_mat_result, axis=0)
reference_mean = np.mean(feature_mat_ref, axis=0)
np.testing.assert_almost_equal(result_mean, reference_mean)
feature_mat_result -= result_mean
feature_mat_ref -= reference_mean
np.testing.assert_almost_equal(np.var(feature_mat_result),
np.var(feature_mat_ref))
@pytest.mark.parametrize('source, reference',
[(TEST_IMAGE_423, TEST_IMAGE_243),
(TEST_IMAGE_421, TEST_IMAGE_241)])
def test_apply(feature_dist_matching: FeatureDistMatching,
source: np.array,
reference: np.array) -> None:
feature_dist_matching.channels = CHANNELS_DEFAULT[0:source.shape[-1]]
feature_dist_matching.channel_ranges = CHANNEL_RANGES_DEFAULT[
0:source.shape[-1]]
source_copy = np.copy(source)
reference_copy = np.copy(reference)
result = feature_dist_matching._apply(source, reference)
# Check modification of input:
np.testing.assert_array_equal(source, source_copy)
np.testing.assert_array_equal(reference, reference_copy)
# Check result:
assert result.shape == source.shape
assert np.max(result) <= 1.
assert 0.1 < np.mean(result) < 0.9
assert np.min(result) >= 0.
assert result.dtype == np.float32
@pytest.mark.parametrize('channels',
[(0,), (1,), (2,), (0, 1), (0, 2), (1, 2), (0, 1, 2)])
def test_apply_channels(channels: ChannelsType,
feature_dist_matching: FeatureDistMatching) -> None:
source = TEST_IMAGE_423.astype(np.float32)
reference = TEST_IMAGE_243.astype(np.float32)
feature_dist_matching.channels = channels
result = feature_dist_matching._apply(source, reference)
for untouched_channel in {0, 1, 2} - set(channels):
np.testing.assert_array_equal(source[:, :, untouched_channel],
result[:, :, untouched_channel])
def test_apply_channel_range(
feature_dist_matching: FeatureDistMatching) -> None:
source = TEST_IMAGE_423
reference = TEST_IMAGE_243
source[:, :, 1] *= 255.
source[:, :, 2] *= 254.
source[:, :, 2] -= 127.
reference[:, :, 1] *= 255.
reference[:, :, 2] *= 254.
reference[:, :, 2] -= 127.
feature_dist_matching.channel_ranges = tuple([ChannelRange(0., 1.),
ChannelRange(0., 255.),
ChannelRange(-127.0, 127.0)])
result = feature_dist_matching._apply(source, reference)
np.testing.assert_array_equal(source, TEST_IMAGE_423)
np.testing.assert_array_equal(reference, TEST_IMAGE_243)
assert result.shape == source.shape
assert result.dtype == np.float32
assert np.max(result[:, :, 0]) <= 1.
assert 0.1 < np.mean(result[:, :, 0]) < 0.9
assert np.max(result[:, :, 0]) >= 0.
assert np.max(result[:, :, 1]) <= 255.
assert 5. < np.mean(result[:, :, 1]) < 250.
assert np.min(result[:, :, 1]) >= 0.
assert np.max(result[:, :, 2]) <= 127.
assert -120. < np.mean(result[:, :, 2]) < 120.
assert np.min(result[:, :, 2]) >= -127.
@patch.object(FeatureDistMatching,
FeatureDistMatching._get_feature_matrix.__name__)
@patch.object(FeatureDistMatching,
FeatureDistMatching._center_image.__name__,
return_value=(
|
np.ones((2, 3))
|
numpy.ones
|
import unittest
from hera_sim import noise
import numpy as np
import aipy
np.random.seed(0)
class TestNoise(unittest.TestCase):
def test_white_noise(self):
n1 = noise.white_noise(100)
self.assertEqual(n1.size, 100)
self.assertEqual(n1.shape, (100,))
n2 = noise.white_noise((100, 100))
self.assertEqual(n2.shape, (100, 100))
n3 = noise.white_noise(100000)
self.assertAlmostEqual(np.average(n3), 0, 1)
self.assertAlmostEqual(np.std(n3), 1, 2)
def test_resample_Tsky(self):
fqs = np.linspace(0.1, 0.2, 100)
lsts = np.linspace(0, 2 * np.pi, 200)
tsky = noise.resample_Tsky(fqs, lsts)
self.assertEqual(tsky.shape, (200, 100))
self.assertTrue(np.all(tsky[0] == tsky[1]))
self.assertFalse(np.all(tsky[:, 0] == tsky[:, 1]))
tsky = noise.resample_Tsky(fqs, lsts, Tsky_mdl=noise.HERA_Tsky_mdl["xx"])
self.assertEqual(tsky.shape, (200, 100))
self.assertFalse(
|
np.all(tsky[0] == tsky[1])
|
numpy.all
|
"""
inertia.py
-------------
Functions for dealing with inertia tensors.
Results validated against known geometries and checked for
internal consistency.
"""
import numpy as np
from trimesh import util
# a matrix where all non- diagonal terms are -1.0
# and all diagonal terms are 1.0
negate_nondiagonal = (np.eye(3, dtype=np.float64) * 2) - 1
def cylinder_inertia(mass, radius, height, transform=None):
"""
Return the inertia tensor of a cylinder.
Parameters
------------
mass : float
Mass of cylinder
radius : float
Radius of cylinder
height : float
Height of cylinder
transform : (4, 4) float
Transformation of cylinder
Returns
------------
inertia : (3, 3) float
Inertia tensor
"""
h2, r2 = height ** 2, radius ** 2
diagonal = np.array([((mass * h2) / 12) + ((mass * r2) / 4),
((mass * h2) / 12) + ((mass * r2) / 4),
(mass * r2) / 2])
inertia = diagonal * np.eye(3)
if transform is not None:
inertia = transform_inertia(transform, inertia)
return inertia
def sphere_inertia(mass, radius):
"""
Return the inertia tensor of a sphere.
Parameters
------------
mass : float
Mass of sphere
radius : float
Radius of sphere
Returns
------------
inertia : (3, 3) float
Inertia tensor
"""
inertia = (2.0 / 5.0) * (radius ** 2) * mass * np.eye(3)
return inertia
def principal_axis(inertia):
"""
Find the principal components and principal axis
of inertia from the inertia tensor.
Parameters
------------
inertia : (3, 3) float
Inertia tensor
Returns
------------
components : (3,) float
Principal components of inertia
vectors : (3, 3) float
Row vectors pointing along the
principal axes of inertia
"""
inertia = np.asanyarray(inertia, dtype=np.float64)
if inertia.shape != (3, 3):
raise ValueError('inertia tensor must be (3, 3)!')
# you could any of the following to calculate this:
# np.linalg.svd, np.linalg.eig, np.linalg.eigh
# moment of inertia is square symmetric matrix
# eigh has the best numeric precision in tests
components, vectors = np.linalg.eigh(inertia * negate_nondiagonal)
# eigh returns them as column vectors, change them to row vectors
vectors = vectors.T
return components, vectors
def transform_inertia(transform, inertia_tensor):
"""
Transform an inertia tensor to a new frame.
More details in OCW PDF:
MIT16_07F09_Lec26.pdf
Parameters
------------
transform : (3, 3) or (4, 4) float
Transformation matrix
inertia_tensor : (3, 3) float
Inertia tensor
Returns
------------
transformed : (3, 3) float
Inertia tensor in new frame
"""
# check inputs and extract rotation
transform = np.asanyarray(transform, dtype=np.float64)
if transform.shape == (4, 4):
rotation = transform[:3, :3]
elif transform.shape == (3, 3):
rotation = transform
else:
raise ValueError('transform must be (3, 3) or (4, 4)!')
inertia_tensor = np.asanyarray(inertia_tensor, dtype=np.float64)
if inertia_tensor.shape != (3, 3):
raise ValueError('inertia_tensor must be (3, 3)!')
transformed = util.multi_dot([rotation,
inertia_tensor * negate_nondiagonal,
rotation.T])
transformed *= negate_nondiagonal
return transformed
def radial_symmetry(mesh):
"""
Check whether a mesh has radial symmetry.
Returns
-----------
symmetry : None or str
None No rotational symmetry
'radial' Symmetric around an axis
'spherical' Symmetric around a point
axis : None or (3,) float
Rotation axis or point
section : None or (3, 2) float
If radial symmetry provide vectors
to get cross section
"""
# shortcuts to avoid typing and hitting cache
scalar = mesh.principal_inertia_components
vector = mesh.principal_inertia_vectors
# the sorted order of the principal components
order = scalar.argsort()
# we are checking if a geometry has radial symmetry
# if 2 of the PCI are equal, it is a revolved 2D profile
# if 3 of the PCI (all of them) are equal it is a sphere
# thus we take the diff of the sorted PCI, scale it as a ratio
# of the largest PCI, and then scale to the tolerance we care about
# if tol is 1e-3, that means that 2 components are identical if they
# are within .1% of the maximum PCI.
diff = np.abs(
|
np.diff(scalar[order])
|
numpy.diff
|
import argparse
import os
from time import time as t
import matplotlib.pyplot as plt
import numpy as np
import torch
from torchvision import transforms
from tqdm import tqdm
from sklearn.preprocessing import normalize
from scipy.stats import entropy
from bindsnet import ROOT_DIR
from bindsnet.analysis.plotting import (
plot_assignments,
plot_input,
plot_performance,
plot_spikes,
plot_voltages,
plot_weights,
)
from bindsnet.datasets import MNIST, DataLoader
from bindsnet.encoding import PoissonEncoder
from bindsnet.evaluation import all_activity, assign_labels, proportion_weighting
from bindsnet.models import DiehlAndCook2015
from bindsnet.network.monitors import Monitor
from bindsnet.utils import get_square_assignments, get_square_weights
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--n_neurons", type=int, default=100)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--n_epochs", type=int, default=1)
parser.add_argument("--n_test", type=int, default=10000)
parser.add_argument("--n_train", type=int, default=60000)
parser.add_argument("--n_workers", type=int, default=-1)
parser.add_argument("--update_steps", type=int, default=256)
parser.add_argument("--exc", type=float, default=22.5)
parser.add_argument("--inh", type=float, default=120)
parser.add_argument("--theta_plus", type=float, default=0.05)
parser.add_argument("--time", type=int, default=100)
parser.add_argument("--dt", type=int, default=1.0)
parser.add_argument("--intensity", type=float, default=128)
parser.add_argument("--progress_interval", type=int, default=10)
parser.add_argument("--train", dest="train", action="store_true")
parser.add_argument("--test", dest="train", action="store_false")
parser.add_argument("--plot", dest="plot", action="store_true")
parser.add_argument("--gpu", dest="gpu", action="store_false")
parser.set_defaults(plot=True, gpu=True)
args = parser.parse_args()
seed = args.seed
n_neurons = args.n_neurons
batch_size = args.batch_size
n_epochs = args.n_epochs
n_test = args.n_test
n_train = args.n_train
n_workers = args.n_workers
update_steps = args.update_steps
exc = args.exc
inh = args.inh
theta_plus = args.theta_plus
time = args.time
dt = args.dt
intensity = args.intensity
progress_interval = args.progress_interval
train = args.train
plot = args.plot
gpu = args.gpu
plot = True
update_interval = update_steps * batch_size
device = "cpu"
torch.manual_seed(seed)
torch.set_num_threads(os.cpu_count() - 1)
print("Running on Device = ", device)
# Determines number of workers to use
if n_workers == -1:
n_workers = 0 # gpu * 1 * torch.cuda.device_count()
n_sqrt = int(np.ceil(np.sqrt(n_neurons)))
start_intensity = intensity
# Build network.
network = DiehlAndCook2015(
n_inpt=784,
n_neurons=n_neurons,
exc=exc,
inh=inh,
dt=dt,
norm=78.4,
nu=(1e-4, 1e-2),
theta_plus=theta_plus,
inpt_shape=(1, 28, 28),
)
# Directs network to GPU
if gpu:
network.to("cuda")
# Load MNIST data.
dataset = MNIST(
PoissonEncoder(time=time, dt=dt),
None,
"../../data/MNIST",
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Lambda(lambda x: x * intensity)]
),
)
# Selecting classes 1,2,3,5,6,8
# idx = (dataset.targets==1) | (dataset.targets==2) | (dataset.targets==3) | (dataset.targets==5) | (dataset.targets==6) | (dataset.targets==8)
# dataset.targets = dataset.targets[idx]
# dataset.data = dataset.data[idx]
# Neuron assignments and spike proportions.
n_classes = 10
assignments = -torch.ones(n_neurons, device=device)
proportions = torch.zeros((n_neurons, n_classes), device=device)
rates = torch.zeros((n_neurons, n_classes), device=device)
# Sequence of accuracy estimates.
accuracy = {"all": [], "proportion": []}
# Voltage recording for excitatory and inhibitory layers.
exc_voltage_monitor = Monitor(
network.layers["Ae"], ["v"], time=int(time / dt), device=device
)
inh_voltage_monitor = Monitor(
network.layers["Ai"], ["v"], time=int(time / dt), device=device
)
network.add_monitor(exc_voltage_monitor, name="exc_voltage")
network.add_monitor(inh_voltage_monitor, name="inh_voltage")
# Set up monitors for spikes and voltages
spikes = {}
for layer in set(network.layers):
spikes[layer] = Monitor(
network.layers[layer], state_vars=["s"], time=int(time / dt), device=device
)
network.add_monitor(spikes[layer], name="%s_spikes" % layer)
voltages = {}
for layer in set(network.layers) - {"X"}:
voltages[layer] = Monitor(
network.layers[layer], state_vars=["v"], time=int(time / dt), device=device
)
network.add_monitor(voltages[layer], name="%s_voltages" % layer)
inpt_ims, inpt_axes = None, None
spike_ims, spike_axes = None, None
weights_im = None
assigns_im = None
perf_ax = None
voltage_axes, voltage_ims = None, None
spike_record = torch.zeros((update_interval, int(time / dt), n_neurons), device=device)
# Train the network.
print("\nBegin training.\n")
start = t()
model_to_save = None
running_perf = 0
confusion_matrix = np.zeros((10,10))
mae_from_uniform = []
for epoch in range(n_epochs):
labels = []
if epoch % progress_interval == 0:
print("\n Progress: %d / %d (%.4f seconds)" % (epoch, n_epochs, t() - start))
start = t()
# Create a dataloader to iterate and batch data
train_dataloader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=n_workers,
pin_memory=gpu,
)
pbar_training = tqdm(total=n_train)
for step, batch in enumerate(train_dataloader):
if step > n_train:
break
# Get next input sample.
inputs = {"X": batch["encoded_image"]}
if gpu:
inputs = {k: v.cuda() for k, v in inputs.items()}
if step % update_steps == 0 and step > 0:
# Convert the array of labels into a tensor
label_tensor = torch.tensor(labels, device=device)
# Get network predictions.
all_activity_pred = all_activity(
spikes=spike_record, assignments=assignments, n_labels=n_classes
)
proportion_pred = proportion_weighting(
spikes=spike_record,
assignments=assignments,
proportions=proportions,
n_labels=n_classes,
)
# Compute network accuracy according to available classification strategies.
# new_running_perf = 100 * torch.sum(label_tensor.long() == all_activity_pred).item() / len(label_tensor)
# if (step > 50 and new_running_perf / running_perf > 1.8):
# question = "before: "+str(running_perf)+"after: "+str(new_running_perf)+"step: "+str(step) + " -- would you like to save (Y/N)?"
# save = input(question)
# if save == "Y":
# torch.save(network, "../saved models/"+str(n_neurons)+"_"+str(step*batch_size)+"_after")
# quit()
# torch.save(network, "../saved models/"+str(n_neurons)+"_before")
# running_perf = new_running_perf
accuracy["all"].append(
100
* torch.sum(label_tensor.long() == all_activity_pred).item()
/ len(label_tensor)
)
confusion_matrix = np.zeros((10,10))
# Keep track of the confusion matrix
for i,label_ in enumerate(label_tensor):
real = label_tensor[i]
pred = all_activity_pred[i]
confusion_matrix[real][pred] += 1
accuracy["proportion"].append(
100
* torch.sum(label_tensor.long() == proportion_pred).item()
/ len(label_tensor)
)
print(
"\nAll activity accuracy: %.2f (last), %.2f (average), %.2f (best)"
% (
accuracy["all"][-1],
np.mean(accuracy["all"]),
np.max(accuracy["all"]),
)
)
print(
"Proportion weighting accuracy: %.2f (last), %.2f (average), %.2f"
" (best)\n"
% (
accuracy["proportion"][-1],
np.mean(accuracy["proportion"]),
np.max(accuracy["proportion"]),
)
)
# Assign labels to excitatory layer neurons.
assignments, proportions, rates = assign_labels(
spikes=spike_record,
labels=label_tensor,
n_labels=n_classes,
rates=rates,
)
labels = []
labels.extend(batch["label"].tolist())
input_exc_weights = network.connections[("X", "Ae")].w
# Getting the weights before changing them for the sake of seeing the weight changes
pre_weights = get_square_weights(
input_exc_weights.view(784, n_neurons), n_sqrt, 28
)
# Run the network on the input.
network.run(inputs=inputs, time=time, input_time_dim=1)
input_exc_weights = network.connections[("X", "Ae")].w
# Getting the weights after changing them for the sake of seeing the weight changes
post_weights = get_square_weights(
input_exc_weights.view(784, n_neurons), n_sqrt, 28
)
# The change of the weights from one batch
weight_changes = post_weights - pre_weights
weight_change_count = np.count_nonzero(weight_changes)
# weight_change_count = np.count_nonzero(weight_changes)
# change_arr.append(weight_change_count)
# Add to spikes recording.
s = spikes["Ae"].get("s").permute((1, 0, 2))
spike_record[
(step * batch_size)
% update_interval : (step * batch_size % update_interval)
+ s.size(0)
] = s
# Get voltage recording.
exc_voltages = exc_voltage_monitor.get("v")
inh_voltages = inh_voltage_monitor.get("v")
# Optionally plot various simulation information.
if step % update_steps == 0 and step > 0:
if plot:
image = batch["image"][:, 0].view(28, 28)
inpt = inputs["X"][:, 0].view(time, 784).sum(0).view(28, 28)
lable = batch["label"][0]
input_exc_weights = network.connections[("X", "Ae")].w
square_weights = get_square_weights(
input_exc_weights.view(784, n_neurons), n_sqrt, 28
)
# weights_im = plot_weights(square_weights, im=weights_im, save="../weights/"+str(step)+".png")
# perf_ax = plot_performance(
# accuracy, x_scale=update_steps * batch_size, ax=perf_ax
# )
# weight_changes = torch.from_numpy(normalize(weight_changes))
# weight_changes = get_square_weights(weight_changes.view(784, n_neurons), n_sqrt, 28)
# save_loc = "../weight_changes/"+str(step)+".png"
# weights_im = plot_weights(weight_changes, im=weights_im, save=save_loc)
fig, ax = plt.subplots()
im = ax.imshow(confusion_matrix)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(10):
for j in range(10):
text = ax.text(j, i, confusion_matrix[i, j],
ha="center", va="center", color="w")
ax.set_title("Confusion matrix of MNIST w/ SNN at " + str(step * batch_size))
ax.set_xlabel("predicted label")
ax.set_ylabel("true label")
fig.tight_layout()
plt.savefig("../confusion_matrices/"+str(step)+".png")
plt.close(fig)
fig, ax = plt.subplots()
prediction_freq = np.sum(confusion_matrix, axis=0)
ax.bar(
|
np.arange(10)
|
numpy.arange
|
import numpy as np
import matplotlib.pyplot as plt
from transforms3d.euler import mat2euler
from scipy.linalg import expm
def load_data(file_name):
'''
function to read visual features, IMU measurements and calibration parameters
Input:
file_name: the input data file. Should look like "XXX_sync_KLT.npz"
Output:
t: time stamp
with shape 1 * N_t
features: visual feature point coordinates in stereo images,
with shape 4 * M * N_t, where M is number of features
linear_velocity: IMU measurements in IMU frame
with shape 3 * N_t
rotational_velocity: IMU measurements in IMU frame
with shape 3 * N_t
K: (left)camera intrinsic matrix
[fx 0 cx
0 fy cy
0 0 1]
with shape 3*3
b: stereo camera baseline
with shape 1
cam_T_imu: extrinsic matrix from IMU to (left)camera, in SE(3).
close to
[ 0 -1 0 t1
0 0 -1 t2
1 0 0 t3
0 0 0 1]
with shape 4*4
'''
with np.load(file_name) as data:
t = data["time_stamps"] # time_stamps
features = data["features"] # 4 x num_features : pixel coordinates of features
linear_velocity = data["linear_velocity"] # linear velocity measured in the body frame
rotational_velocity = data["rotational_velocity"] # rotational velocity measured in the body frame
K = data["K"] # intrinsic calibration matrix
b = data["b"] # baseline
cam_T_imu = data["cam_T_imu"] # Transformation from imu to camera frame
return t, features, linear_velocity, rotational_velocity, K, b, cam_T_imu
def visualize_trajectory_2d(pose, landmarks, better_pose, better_landmarks, timestamp, path_name="Unknown", show_ori=False, show_grid=False, savefig=False):
'''
function to visualize the trajectory in 2D
Input:
pose: 4*4*N matrix representing the camera pose,
where N is the number of pose, and each
4*4 matrix is in SE(3)
'''
fig,ax = plt.subplots(figsize=(5, 5))
n_pose = pose.shape[2]
ax.plot(landmarks[0, :], landmarks[1, :], 'g.', markersize=1.5, label='landmarks')
ax.plot(better_landmarks[0, :], better_landmarks[1, :], 'c.', markersize=1.5, label='landmarks_VI')
ax.plot(pose[0, 3, :], pose[1, 3, :], 'r-', markersize=6, label=path_name)
ax.plot(better_pose[0, 3, :], better_pose[1, 3, :], 'b-', markersize=6, label=path_name + "_VI")
ax.scatter(pose[0, 3, 0], pose[1, 3, 0], marker='s', label="start")
ax.scatter(pose[0, 3, -1], pose[1, 3, -1], marker='o', label="end")
if show_ori:
select_ori_index = list(range(0, n_pose, max(int(n_pose / 50), 1)))
yaw_list = []
for i in select_ori_index:
_, _, yaw = mat2euler(pose[:3, :3, i])
yaw_list.append(yaw)
dx = np.cos(yaw_list)
dy = np.sin(yaw_list)
dx,dy = [dx, dy] / np.sqrt(dx**2 + dy**2)
ax.quiver(pose[0, 3, select_ori_index], pose[1, 3, select_ori_index], dx, dy,\
color="b", units="xy", width=1)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('timestamp ' + timestamp)
ax.axis('equal')
ax.grid(show_grid)
ax.legend()
if savefig:
fig.savefig("d" + path_name + "t" + timestamp, dpi = 300)
plt.show(block=True)
return fig, ax
# form the skew-symmetric matrix from a given vector x
def hat_map_3(x):
hat_map = np.array([[ 0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]])
return hat_map
def hat_map_6(u):
theta = u[3:, np.newaxis]
p = u[:3, np.newaxis]
hat_map = np.block([[hat_map_3(theta), -p],
[np.zeros((1, 4))]])
return hat_map
def projection(q):
return q / q[2]
def projection_derivative(q):
derivative = np.array([[1, 0, -q[0]/q[2], 0],
[0, 1, -q[1]/q[2], 0],
[0, 0, 0, 0],
[0, 0, -q[3]/q[2], 1]])
return derivative / q[2]
# K is the calibration matrix, b is the baseline
def stereo_camera_model(K, b):
M = np.array([[K[0, 0], 0, K[0, 2], 0],
[ 0, K[1, 1], K[1, 2], 0],
[K[0, 0], 0, K[0, 2], -K[0, 0] * b],
[ 0, K[1, 1], K[1, 2], 0]])
return M
# Converts car's current inverse pose (U) to world-frame
def world_T_imu(mean_pose):
R_T = np.transpose(mean_pose[:3, :3])
p = mean_pose[:3, 3].reshape(3, 1)
U_inv = np.vstack((np.hstack((R_T, -np.dot(R_T, p))), np.array([0, 0, 0, 1])))
return U_inv
def EKF_inertial_prediction(Car, v, omega, tau, weight_v = 0.00001, weight_omega = 0.0001):
# covariance for movement noise
W = np.block([[weight_v * np.eye(3), np.zeros((3,3))],
[ np.zeros((3, 3)), weight_omega * np.eye(3)]])
tau = -(tau)
u_hat = np.vstack((np.hstack((hat_map_3(omega), v.reshape(3, 1))),
|
np.zeros((1, 4))
|
numpy.zeros
|
# -*- coding: utf-8 -*-
import os
os.chdir('D:/Python/TAR/system')
import numpy as np
from load_data import load_data, get_nn_predict_values
#from nn_values import get_nn_predict_values
from sklearn.metrics import f1_score, precision_score, recall_score
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout
from keras import regularizers
def concatenate(m1, m2): # expects train/test_S as m1 and freq_train/test type as m2
matrix = []
for i in range(m1.shape[0]): # rows
row = []
for j in range(m1.shape[1]):
row.append(m1[i][j])
row.append(m2[0][i])
matrix.append(np.array(row))
return np.array(matrix)
def concatenate2(m1, m2):
matrix = []
for i in range(m1.shape[0]):
row = []
for j in range(m1.shape[1]):
row.append(m1[i][j])
for j in range(m2.shape[1]):
row.append(m2[i][j])
matrix.append(np.array(row))
return np.array(matrix)
def prep_data(train_S, test_S, freq_train, freq_test, chi2_train, chi2_test, tfidf_train, tfidf_test):
nn1_train = concatenate(train_S, freq_train)
nn2_train = concatenate(train_S, chi2_train)
nn3_train = concatenate(train_S, tfidf_train)
nn1_test = concatenate(test_S, freq_test)
nn2_test = concatenate(test_S, chi2_test)
nn3_test = concatenate(test_S, tfidf_test)
return nn1_train, nn2_train, nn3_train, nn1_test, nn2_test, nn3_test
def shuffle_data(X, y):
rng_state = np.random.get_state()
np.random.shuffle(X)
np.random.set_state(rng_state)
np.random.shuffle(y)
return X, y
def run_model(filename, train_X, train_y, test_X, test_y):
train_X, train_y = shuffle_data(train_X, train_y)
from sklearn import preprocessing
#train_X = preprocessing.scale(train_X)
#test_X = preprocessing.scale(test_X)
scaler = preprocessing.StandardScaler().fit(train_X)
scaler.fit(train_X)
scaler.transform(train_X)
scaler.transform(test_X)
from sklearn import svm
model = svm.SVC()
y =
|
np.argmax(train_y, axis=1)
|
numpy.argmax
|
#!/usr/bin/python
# Wflow is Free software, see below:
#
# Copyright (c) <NAME>/Deltares 2005-2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Run the wflow_sbm hydrological model..
usage
::
wflow_sbm [-h][-v level][-F runinfofile][-L logfile][-C casename][-R runId]
[-c configfile][-T last_step][-S first_step][-s seconds][-W][-E][-N][-U discharge]
[-P parameter multiplication][-X][-f][-I][-i tbl_dir][-x subcatchId][-u updatecols]
[-p inputparameter multiplication][-l loglevel]
-X: save state at the end of the run over the initial conditions at the start
-f: Force overwrite of existing results
-T: Set end time of the run: yyyy-mm-dd hh:mm:ss
-S: Set start time of the run: yyyy-mm-dd hh:mm:ss
-s: Set the model timesteps in seconds
-I: re-initialize the initial model conditions with default
-i: Set input table directory (default is intbl)
-x: Apply multipliers (-P/-p ) for subcatchment only (e.g. -x 1)
-C: set the name of the case (directory) to run
-R: set the name runId within the current case
-L: set the logfile
-E: Switch on reinfiltration of overland flow
-c: name of wflow the configuration file (default: Casename/wflow_sbm.ini).
-h: print usage information
-W: If set, this flag indicates that an ldd is created for the water level
for each timestep. If not the water is assumed to flow according to the
DEM. Wflow will run a lot slower with this option. Most of the time
(shallow soil, steep topography) you do not need this option. Also, if you
need it you migth actually need another model.
-U: The argument to this option should be a .tss file with measured discharge in
[m^3/s] which the progam will use to update the internal state to match
the measured flow. The number of columns in this file should match the
number of gauges in the wflow_gauges.map file.
-u: list of gauges/columns to use in update. Format:
-u [1 , 4 ,13]
The above example uses column 1, 4 and 13
-P: set parameter change string (e.g: -P "self.FC = self.FC * 1.6") for non-dynamic variables
-p: set parameter change string (e.g: -P "self.Precipitation = self.Precipitation * 1.11") for
dynamic variables
-l: loglevel (most be one of DEBUG, WARNING, ERROR)
"""
import os.path
import numpy as np
import pcraster.framework
from wflow.wf_DynamicFramework import *
from wflow.wflow_adapt import *
from wflow.wflow_funcs import *
import pcraster as pcr
import pdb
import math
from numba import jit
wflow = "wflow_sbm: "
updateCols = []
def usage(*args):
sys.stdout = sys.stderr
"""Way"""
for msg in args:
print(msg)
print(__doc__)
sys.exit(0)
def estimate_iterations_kin_wave(Q, Beta, alpha, timestepsecs, dx, mv):
celerity = pcr.ifthen(Q > 0.0, 1.0 / (alpha * Beta * Q**(Beta-1)))
courant = (timestepsecs / dx) * celerity
np_courant = pcr.pcr2numpy(courant, mv)
np_courant[np_courant==mv] = np.nan
try:
it_kin = int(np.ceil(1.25*(np.nanpercentile(np_courant,95))))
except:
it_kin = 1
return it_kin
@jit(nopython=True)
def _sCurve(X, a=0.0, b=1.0, c=1.0):
"""
sCurve function:
Input:
- X input map
- C determines the steepness or "stepwiseness" of the curve.
The higher C the sharper the function. A negative C reverses the function.
- b determines the amplitude of the curve
- a determines the centre level (default = 0)
Output:
- result
"""
s = 1.0 / (b + np.exp(-c * (X - a)))
return s
@jit(nopython=True)
def actEvap_unsat_SBM(
RootingDepth,
UStoreDepth,
UStoreLayerThickness,
sumLayer,
RestPotEvap,
sumActEvapUStore,
c,
L,
thetaS,
thetaR,
ust=0,
):
"""
Actual evaporation function:
- first try to get demand from the saturated zone, using the rootingdepth as a limiting factor
- secondly try to get the remaining water from the unsaturated store
- it uses an S-Curve the make sure roots het wet/dry gradually (basically)
representing a root-depth distribution
if ust is True, all ustore is deems to be avaiable fro the roots a
Input:
- RootingDepth, UStoreDepth, FirstZoneDepth, PotTrans, smoothpar
Output:
- ActEvap, FirstZoneDepth, UStoreDepth ActEvapUStore
"""
# AvailCap is fraction of unsat zone containing roots
if ust >= 1:
AvailCap = UStoreDepth * 0.99
else:
if L > 0:
AvailCap = min(1.0, max(0.0, (RootingDepth - sumLayer) / L))
else:
AvailCap = 0.0
MaxExtr = AvailCap * UStoreDepth
# Calculate the reduction of RestPotEvap due to differences in rooting density in the soil column
# The used model is based on Vrugt et al. (2001) and uses as input parameters for z* and Pz the
# values of Hoffman and van Genuchten (z* = 0.20 and Pz = 1.00)
# Next step is to make use of the Feddes curve in order to decrease ActEvapUstore when soil moisture values
# occur above or below ideal plant growing conditions (see also Feddes et al., 1978). h1-h4 values are
# actually negative, but all values are made positive for simplicity.
hb = 1 # cm (pF 1 for atmospheric pressure)
h1 = 1 # cm
h2 = 100 # cm (pF 2 for field capacity)
h3 = 400 # cm (pF 3, critical pF value)
h4 = 15849 # cm (pF 4.2, wilting point)
# According to Brooks-Corey
par_lambda = 2 / (c - 3)
if L > 0.0:
vwc = UStoreDepth / L
else:
vwc = 0.0
vwc = max(vwc, 0.0000001)
head = hb / (
((vwc) / (thetaS - thetaR)) ** (1 / par_lambda)
) # Note that in the original formula, thetaR is extracted from vwc, but thetaR is not part of the numerical vwc calculation
head = max(head,hb)
# Transform h to a reduction coefficient value according to Feddes et al. (1978).
if(head <= h1):
alpha = 0
elif(head >= h4):
alpha = 0
elif((head < h2) & (head > h1)):
alpha = (head - h1) / (h2 - h1)
elif((head > h3) & (head < h4)):
alpha = 1 - (head - h3) / (h4 - h3)
else:
alpha = 1
ActEvapUStore = (min(MaxExtr, RestPotEvap, UStoreDepth)) * alpha
UStoreDepth = UStoreDepth - ActEvapUStore
RestPotEvap = RestPotEvap - ActEvapUStore
sumActEvapUStore = ActEvapUStore + sumActEvapUStore
return UStoreDepth, sumActEvapUStore, RestPotEvap
@jit(nopython=True)
def infiltration(AvailableForInfiltration, PathFrac, cf_soil, TSoil,InfiltCapSoil,InfiltCapPath, UStoreCapacity, modelSnow, soilInfReduction):
SoilInf = AvailableForInfiltration * (1 - PathFrac)
PathInf = AvailableForInfiltration * PathFrac
if modelSnow & soilInfReduction:
bb = 1.0 / (1.0 - cf_soil)
soilInfRedu = _sCurve(TSoil, a=0.0, b=bb, c=8.0)
else:
soilInfRedu = 1.0
MaxInfiltSoil = min(InfiltCapSoil * soilInfRedu, SoilInf)
MaxInfiltPath = min(InfiltCapPath * soilInfRedu, PathInf)
InfiltSoilPath = min(MaxInfiltPath + MaxInfiltSoil, max(0.0, UStoreCapacity))
return InfiltSoilPath
@jit(nopython=True)
def unsatzone_flow(UStoreLayerDepth, InfiltSoilPath, L, z, KsatVerFrac, c, KsatVer, f, thetaS, thetaR, SoilWaterCapacity, SWDold, shape_layer, TransferMethod):
m = 0
UStoreLayerDepth[m] = UStoreLayerDepth[m] + InfiltSoilPath
if L[m] > 0.0:
#sbm option for vertical transfer (only for 1 layer)
if (TransferMethod == 1 and shape_layer == 1):
Sd = SoilWaterCapacity - SWDold
if Sd <= 0.00001:
st = 0.0
else:
st = KsatVerFrac[m] * KsatVer * (min(UStoreLayerDepth[m],L[m]*(thetaS-thetaR))/Sd)
else:
st = KsatVerFrac[m] * KsatVer * np.exp(-f * z[m]) * min((UStoreLayerDepth[m]/(L[m] * (thetaS-thetaR)))**c[m],1.0)
ast = min(st,UStoreLayerDepth[m])
UStoreLayerDepth[m] = UStoreLayerDepth[m] - ast
else:
ast = 0.0
for m in range(1,len(L)):
UStoreLayerDepth[m] = UStoreLayerDepth[m] + ast
if L[m] > 0.0:
st = KsatVerFrac[m] * KsatVer * np.exp(-f* z[m]) * min((UStoreLayerDepth[m]/(L[m] * (thetaS-thetaR)))**c[m],1.0)
ast = min(st,UStoreLayerDepth[m])
else:
ast = 0.0
UStoreLayerDepth[m] = UStoreLayerDepth[m] - ast
return ast, UStoreLayerDepth
@jit(nopython=True)
def sbm_cell(nodes, nodes_up, ldd, layer, static, dyn, modelSnow, soilInfReduction, timestepsecs, basetimestep, deltaT, nrpaddyirri, shape, TransferMethod, it_kinL=1, ust=0):
shape_layer = layer['UStoreLayerThickness'].shape
# flat new state
ssf_new = np.zeros(dyn['ssf'].size, dtype=dyn['ssf'].dtype)
qo_new = np.zeros(dyn['LandRunoff'].size, dtype=dyn['LandRunoff'].dtype)
qo_new = np.concatenate((qo_new, np.array([0], dtype=dyn['LandRunoff'].dtype)))
# append zero to end to deal with nodata (-1) in indices
ssf_new = np.concatenate((ssf_new, np.array([0], dtype=dyn['ssf'].dtype)))
ldd_ = np.concatenate((ldd, np.array([0], dtype=ldd.dtype)))
slope_ = np.concatenate((static['slope'], np.array([0], dtype=static['slope'].dtype)))
SWDold = np.zeros(dyn['ssf'].size, dtype=dyn['ssf'].dtype)
sumUSold = np.zeros(dyn['ssf'].size, dtype=dyn['ssf'].dtype)
for i in range(len(nodes)):
for j in range(len(nodes[i])):
idx = nodes[i][j]
nbs = nodes_up[i][j]
sumlayer = np.unique(layer['UStoreLayerThickness'][:,idx].cumsum())
sumlayer_0 = np.concatenate((np.array([0.0]), sumlayer))
SWDold[idx] = dyn['SatWaterDepth'][idx]
sumUSold[idx] = layer['UStoreLayerDepth'][:,idx].sum()
n = np.where(dyn['zi'][idx] > sumlayer_0)[0]
if len(n) > 1:
L = np.concatenate((layer['UStoreLayerThickness'][n[0:-1],idx], np.array([dyn['zi'][idx] - sumlayer_0[n[-1]]]))).astype(np.float64)
else:
L = np.array([dyn['zi'][idx]]).astype(np.float64)
z = L.cumsum()
dyn['ActEvapUStore'][idx] = 0.0
if static['River'][idx]:
ind = np.where(ldd_[nbs] != ldd_[idx])
chanperc = np.zeros(ldd_[nbs].size)
chanperc[ind] = slope_[nbs][ind]/(slope_[idx]+slope_[nbs][ind])
ssf_in = np.sum((1-chanperc)*ssf_new[nbs])
dyn['ssf_toriver'][idx] = np.sum((chanperc)*ssf_new[nbs])/(1000*1000*1000)/timestepsecs
else:
ssf_in = np.sum(ssf_new[nbs])
dyn['CellInFlow'][idx] = ssf_in
UStoreCapacity = static['SoilWaterCapacity'][idx] - dyn['SatWaterDepth'][idx] - layer['UStoreLayerDepth'][n,idx].sum()
InfiltSoilPath = infiltration(dyn['AvailableForInfiltration'][idx], static['PathFrac'][idx], static['cf_soil'][idx],
dyn['TSoil'][idx],static['InfiltCapSoil'][idx],static['InfiltCapPath'][idx],UStoreCapacity, modelSnow, soilInfReduction)
dyn['InfiltSoilPath'][idx] = InfiltSoilPath
# unsat fluxes first
ast, layer['UStoreLayerDepth'][:,idx] = unsatzone_flow(layer['UStoreLayerDepth'][:,idx], InfiltSoilPath, L, z, layer['KsatVerFrac'][:,idx], layer['c'][:,idx], static['KsatVer'][idx], static['f'][idx],
static['thetaS'][idx], static['thetaR'][idx], static['SoilWaterCapacity'][idx], SWDold[idx], shape_layer[0], TransferMethod)
dyn['Transfer'][idx] = ast
# then evaporation from layers
for k in range(len(L)):
if k==0:
SaturationDeficit = static['SoilWaterCapacity'][idx] - dyn['SatWaterDepth'][idx]
if shape_layer[0] == 1:
soilevapunsat = dyn['restEvap'][idx] * min(1.0, SaturationDeficit / static['SoilWaterCapacity'][idx])
else:
if len(L) == 1:
if dyn['zi'][idx] > 0:
soilevapunsat = dyn['restEvap'][idx] * min(1.0, layer['UStoreLayerDepth'][k,idx]/dyn['zi'][idx])
else:
soilevapunsat = 0.0
else:
soilevapunsat = dyn['restEvap'][idx] * min(1.0, layer['UStoreLayerDepth'][k,idx]/(layer['UStoreLayerThickness'][k,idx]*(static['thetaS'][idx]-static['thetaR'][idx])))
soilevapunsat = min(soilevapunsat, layer['UStoreLayerDepth'][k,idx])
dyn['restEvap'][idx] = dyn['restEvap'][idx] - soilevapunsat
layer['UStoreLayerDepth'][k,idx] = layer['UStoreLayerDepth'][k,idx] - soilevapunsat
if shape_layer[0] == 1:
soilevapsat = 0.0
else:
if len(L) == 1:
soilevapsat = dyn['restEvap'][idx] * min(1.0, (layer['UStoreLayerThickness'][k,idx] - dyn['zi'][idx])/ layer['UStoreLayerThickness'][k,idx])
soilevapsat = min(soilevapsat, (layer['UStoreLayerThickness'][k,idx] - dyn['zi'][idx]) * (static['thetaS'][idx] - static['thetaR'][idx]))
else:
soilevapsat = 0.0
dyn['soilevap'][idx] = soilevapunsat + soilevapsat
dyn['SatWaterDepth'][idx] = dyn['SatWaterDepth'][idx] - soilevapsat
# evaporation available for transpiration
PotTrans = dyn['PotTransSoil'][idx] - dyn['soilevap'][idx] - dyn['ActEvapOpenWaterLand'][idx]
# evaporation from saturated store
wetroots = _sCurve(dyn['zi'][idx], a=static['ActRootingDepth'][idx], c=static['rootdistpar'][idx])
dyn['ActEvapSat'][idx] = min(PotTrans * wetroots, dyn['SatWaterDepth'][idx])
dyn['SatWaterDepth'][idx] = dyn['SatWaterDepth'][idx] - dyn['ActEvapSat'][idx]
RestPotEvap = PotTrans - dyn['ActEvapSat'][idx]
# actual evaporation from UStore
layer['UStoreLayerDepth'][k,idx], dyn['ActEvapUStore'][idx], RestPotEvap = actEvap_unsat_SBM(static['ActRootingDepth'][idx], layer['UStoreLayerDepth'][k,idx], layer['UStoreLayerThickness'][k,idx],
sumlayer[k], RestPotEvap, dyn['ActEvapUStore'][idx], layer['c'][k,idx], L[k], static['thetaS'][idx], static['thetaR'][idx], ust)
else:
# actual evaporation from UStore
layer['UStoreLayerDepth'][k,idx], dyn['ActEvapUStore'][idx], RestPotEvap = actEvap_unsat_SBM(static['ActRootingDepth'][idx], layer['UStoreLayerDepth'][k,idx], layer['UStoreLayerThickness'][k,idx],
sumlayer[k], RestPotEvap, dyn['ActEvapUStore'][idx], layer['c'][k,idx], L[k], static['thetaS'][idx], static['thetaR'][idx], ust)
#check soil moisture balance per layer
du = 0.0
for k in range(L.size-1,-1,-1):
du = max(0,layer['UStoreLayerDepth'][k,idx] - L[k]*(static['thetaS'][idx]-static['thetaR'][idx]))
layer['UStoreLayerDepth'][k,idx] = layer['UStoreLayerDepth'][k,idx] - du
if k > 0:
layer['UStoreLayerDepth'][k-1,idx] = layer['UStoreLayerDepth'][k-1,idx] + du
Ksat = layer['KsatVerFrac'][len(L)-1,idx] * static['KsatVer'][idx] *
|
np.exp(-static['f'][idx] * dyn['zi'][idx])
|
numpy.exp
|
import numpy as np
from r_support import matrix, logger
def get_num_batches(n, batch_size):
return int(np.ceil(n * 1.0 / batch_size))
def get_sgd_batch(x, y, i, batch_size, shuffled_idxs=None):
s = i * batch_size
e = min(x.shape[0], (i + 1) * batch_size)
if shuffled_idxs is None:
idxs = np.arange(s, e)
else:
idxs = shuffled_idxs[np.arange(s, e)]
return matrix(x[idxs, :], ncol=x.shape[1]), y[idxs]
def avg_loss_check(losses, epoch, n=20, eps=1e-6):
if epoch < n + 1:
return False
avg1 = np.mean(losses[(epoch-1-n):(epoch-1)])
avg2 = np.mean(losses[(epoch-n):(epoch)])
if np.abs(avg1 - avg2) < eps:
return True
return False
def debug_log_sgd_losses(sgd_type, losses, epoch, n=20):
if False:
# disable logging -- should be used in PRODUCTION
return
elif True:
# minimal info
logger.debug("[%s] epochs: %d; avg last %d losses:%f" %
(sgd_type, epoch, n, np.mean(losses[(epoch-min(n, epoch)):(epoch)])))
else:
# maximum info
logger.debug("[%s] epochs: %d; avg last %d losses:%f\n%s\n%s" %
(sgd_type, epoch, n, np.mean(losses[(epoch-min(n, epoch)):(epoch)]),
str(list(losses[0:min(n, epoch)])),
str(list(losses[(epoch-min(n, epoch)):(epoch)]))))
def sgd(w0, x, y, f, grad, learning_rate=0.01,
batch_size=100, max_epochs=1000, eps=1e-6, shuffle=False, rng=None):
n = x.shape[0]
n_batches = get_num_batches(n, batch_size)
w = np.copy(w0)
epoch_losses = np.zeros(max_epochs, dtype=float)
epoch = 0
w_best = np.copy(w0)
loss_best = np.inf
if n <= batch_size:
shuffle = False # no need to shuffle since all instances will be used up in one batch
if shuffle:
shuffled_idxs = np.arange(n)
if rng is None:
np.random.shuffle(shuffled_idxs)
else:
rng.shuffle(shuffled_idxs)
else:
shuffled_idxs = None
while epoch < max_epochs:
losses = np.zeros(n_batches, dtype=float)
for i in range(n_batches):
xi, yi = get_sgd_batch(x, y, i, batch_size, shuffled_idxs=shuffled_idxs)
if xi.shape[0] == 0:
raise ValueError("Batch size of 0")
g = grad(w, xi, yi)
w -= learning_rate * g
losses[i] = f(w, xi, yi)
if False:
g_norm = g.dot(g)
if np.isnan(g_norm) or np.isinf(g_norm):
logger.debug("|grad|=%f, i=%d/%d, epoch:%d" % (g.dot(g), i+1, n_batches, epoch))
logger.debug("|w0|=%f" % w0.dot(w0))
raise ArithmeticError("grad is nan/inf in sgd")
loss = np.mean(losses)
if np.isnan(loss):
logger.debug("loss is nan")
logger.debug("|w|=%f" % w.dot(w))
raise ArithmeticError("loss is nan in sgd")
epoch_losses[epoch] = loss
if loss < loss_best:
# pocket algorithm
np.copyto(w_best, w)
loss_best = loss
epoch += 1
if loss < eps:
break
debug_log_sgd_losses("sgd", epoch_losses, epoch, n=20)
# logger.debug("epochs: %d" % epoch)
# logger.debug("net losses:")
# logger.debug("epoch losses:\n%s" % str(epoch_losses[0:epoch]))
# logger.debug("best loss: %f" % loss_best)
return w_best
def sgdRMSProp(w0, x, y, f, grad, learning_rate=0.01,
batch_size=100, max_epochs=1000, delta=1e-6, ro=0.9, eps=1e-6,
shuffle=False, rng=None):
n = x.shape[0]
n_batches = get_num_batches(n, batch_size)
w = np.copy(w0)
r = np.zeros(len(w0), dtype=w0.dtype) # gradient accumulation variable
epoch_losses = np.zeros(max_epochs, dtype=float)
epoch = 0
w_best = np.copy(w0)
loss_best = np.inf
if n <= batch_size:
# no need to shuffle since all instances will be used up in one batch
shuffle = False
if shuffle:
shuffled_idxs = np.arange(n)
if rng is None:
np.random.shuffle(shuffled_idxs)
else:
rng.shuffle(shuffled_idxs)
else:
shuffled_idxs = None
prev_loss = np.inf
while epoch < max_epochs:
losses = np.zeros(n_batches, dtype=float)
for i in range(n_batches):
xi, yi = get_sgd_batch(x, y, i, batch_size, shuffled_idxs=shuffled_idxs)
g = grad(w, xi, yi)
r[:] = ro * r + (1 - ro) * np.multiply(g, g)
dw_scale = (learning_rate / (np.sqrt(delta + r)))
dw = np.multiply(dw_scale, g)
w[:] = w - dw
losses[i] = f(w, xi, yi)
loss = np.mean(losses)
if np.isnan(loss):
logger.debug("loss is nan")
logger.debug("|w|=%f" % w.dot(w))
raise ArithmeticError("loss is nan in sgd")
epoch_losses[epoch] = loss
if loss < loss_best:
# pocket algorithm
np.copyto(w_best, w)
loss_best = loss
epoch += 1
if (loss < eps or np.abs(loss - prev_loss) < eps or
avg_loss_check(epoch_losses, epoch, n=20, eps=eps)):
break
prev_loss = loss
debug_log_sgd_losses("sgdRMSProp", epoch_losses, epoch, n=20)
# logger.debug("epochs: %d" % epoch)
# logger.debug("net losses:")
# logger.debug("epoch losses:\n%s" % str(epoch_losses[0:epoch]))
# logger.debug("best loss: %f" % loss_best)
return w_best
def sgdMomentum(w0, x, y, f, grad, learning_rate=0.01,
batch_size=100, max_epochs=1000,
alpha=0.9, eps=1e-6,
shuffle=False, rng=None):
n = x.shape[0]
n_batches = get_num_batches(n, batch_size)
w = np.copy(w0)
v = np.zeros(len(w0), dtype=w0.dtype) # velocity
epoch_losses = np.zeros(max_epochs, dtype=float)
epoch = 0
w_best = np.copy(w0)
loss_best = np.inf
if n <= batch_size:
# no need to shuffle since all instances will be used up in one batch
shuffle = False
if shuffle:
shuffled_idxs = np.arange(n)
if rng is None:
np.random.shuffle(shuffled_idxs)
else:
rng.shuffle(shuffled_idxs)
else:
shuffled_idxs = None
prev_loss = np.inf
while epoch < max_epochs:
losses = np.zeros(n_batches, dtype=float)
for i in range(n_batches):
xi, yi = get_sgd_batch(x, y, i, batch_size, shuffled_idxs=shuffled_idxs)
g = grad(w, xi, yi)
v[:] = alpha * v - learning_rate * g
w[:] = w + v
losses[i] = f(w, xi, yi)
loss = np.mean(losses)
if np.isnan(loss):
logger.debug("loss is nan")
logger.debug("|w|=%f" % w.dot(w))
raise ArithmeticError("loss is nan in sgd")
epoch_losses[epoch] = loss
if loss < loss_best:
# pocket algorithm
np.copyto(w_best, w)
loss_best = loss
epoch += 1
if (loss < eps or np.abs(loss - prev_loss) < eps or
avg_loss_check(epoch_losses, epoch, n=20, eps=eps)):
break
prev_loss = loss
debug_log_sgd_losses("sgdMomentum", epoch_losses, epoch, n=20)
# logger.debug("epochs: %d" % epoch)
# logger.debug("net losses:")
# logger.debug("epoch losses:\n%s" % str(epoch_losses[0:epoch]))
# logger.debug("best loss: %f" % loss_best)
return w_best
def sgdRMSPropNestorov(w0, x, y, f, grad, learning_rate=0.01,
batch_size=100, max_epochs=1000,
alpha=0.9, delta=1e-6, ro=0.9, eps=1e-6,
shuffle=False, rng=None):
n = x.shape[0]
n_batches = get_num_batches(n, batch_size)
w = np.copy(w0)
v = np.zeros(len(w0), dtype=w0.dtype) # velocity
r = np.zeros(len(w0), dtype=w0.dtype) # gradient accumulation variable
epoch_losses = np.zeros(max_epochs, dtype=float)
epoch = 0
w_best = np.copy(w0)
loss_best = np.inf
if n <= batch_size:
# no need to shuffle since all instances will be used up in one batch
shuffle = False
if shuffle:
shuffled_idxs = np.arange(n)
if rng is None:
np.random.shuffle(shuffled_idxs)
else:
rng.shuffle(shuffled_idxs)
else:
shuffled_idxs = None
prev_loss = np.inf
while epoch < max_epochs:
losses = np.zeros(n_batches, dtype=float)
for i in range(n_batches):
xi, yi = get_sgd_batch(x, y, i, batch_size, shuffled_idxs=shuffled_idxs)
tw = w + alpha * v
g = grad(tw, xi, yi)
r[:] = ro * r + (1 - ro) * np.multiply(g, g)
dw_scale = (learning_rate / (np.sqrt(delta + r)))
v = alpha * v - np.multiply(dw_scale, g)
w[:] = w + v
losses[i] = f(w, xi, yi)
loss = np.mean(losses)
if np.isnan(loss):
logger.debug("loss is nan")
logger.debug("|w|=%f" % w.dot(w))
raise ArithmeticError("loss is nan in sgd")
epoch_losses[epoch] = loss
if loss < loss_best:
# pocket algorithm
np.copyto(w_best, w)
loss_best = loss
epoch += 1
if (loss < eps or np.abs(loss - prev_loss) < eps or
avg_loss_check(epoch_losses, epoch, n=20, eps=eps)):
break
prev_loss = loss
debug_log_sgd_losses("sgdRMSPropNestorov", epoch_losses, epoch, n=20)
# logger.debug("epochs: %d" % epoch)
# logger.debug("net losses:")
# logger.debug("epoch losses:\n%s" % str(epoch_losses[0:epoch]))
# logger.debug("best loss: %f" % loss_best)
return w_best
def sgdAdam(w0, x, y, f, grad, learning_rate=0.01,
batch_size=100, max_epochs=1000, delta=1e-8,
ro1=0.9, ro2=0.999, eps=1e-6,
shuffle=False, rng=None):
n = x.shape[0]
n_batches = get_num_batches(n, batch_size)
w = np.copy(w0)
s = np.zeros(len(w0), dtype=w0.dtype) # first moment variable
s_hat = np.zeros(len(w0), dtype=w0.dtype) # first moment corrected for bias
r = np.zeros(len(w0), dtype=w0.dtype) # second moment variable
r_hat = np.zeros(len(w0), dtype=w0.dtype) # second moment corrected for bias
t = 0 # time step
epoch_losses = np.zeros(max_epochs, dtype=float)
epoch = 0
w_best = np.copy(w0)
loss_best = np.inf
if n <= batch_size:
# no need to shuffle since all instances will be used up in one batch
shuffle = False
if shuffle:
shuffled_idxs = np.arange(n)
if rng is None:
np.random.shuffle(shuffled_idxs)
else:
rng.shuffle(shuffled_idxs)
else:
shuffled_idxs = None
prev_loss = np.inf
while epoch < max_epochs:
losses = np.zeros(n_batches, dtype=float)
for i in range(n_batches):
xi, yi = get_sgd_batch(x, y, i, batch_size, shuffled_idxs=shuffled_idxs)
g = grad(w, xi, yi)
t += 1
s[:] = ro1 * s + (1 - ro1) * g
r[:] = ro2 * r + (1 - ro2) * np.multiply(g, g)
# correct bias in first moment
s_hat[:] = (1./(1 - ro1 ** t)) * s
# correct bias in second moment
r_hat[:] = (1./(1 - ro2 ** t)) * r
dw_scale = (learning_rate / (np.sqrt(delta + r_hat)))
dw = np.multiply(dw_scale, s_hat)
w[:] = w - dw
losses[i] = f(w, xi, yi)
loss = np.mean(losses)
if np.isnan(loss):
logger.debug("loss is nan")
logger.debug("|w|=%f" % w.dot(w))
raise ArithmeticError("loss is nan in sgd")
epoch_losses[epoch] = loss
if loss < loss_best:
# pocket algorithm
|
np.copyto(w_best, w)
|
numpy.copyto
|
# Test script for Uncertainty Calibration in presence of common corruptions
# Evaluate calibration on clean and corrupted data
#
# Last updated: Dec 30 2021
import sys
import numpy as np
import torch
from torchvision import datasets, transforms
from DataLoad import *
from DiGN import DiGN
args = sys.argv[1:]
dataset = args[0]
architecture = args[1]
batch_size = int(args[2])
ensemble_eval = (args[3]=="True")
train_alg = args[4]
eval_noise = (args[5]=="True")
print('Dataset: %s | Architecture: %s | Batch size: %d' % (dataset, architecture, batch_size))
print('Ensemble_eval: %s | Train alg: %s' % (ensemble_eval, train_alg))
print('Evaluate noise only: %s' % (eval_noise))
if dataset in ['cifar10','cifar100']:
ensemble_stddev = 0.1 # CIFAR10/100
else:
ensemble_stddev = 0.3 # Tiny-ImageNet
if dataset=='cifar10':
data_path = './cifar10'
n_classes = 10
get_loaders = get_loaders_cifar10
corrupt_path = './CIFAR-10-C/'
elif dataset=='cifar100':
data_path = './cifar100'
n_classes = 100
get_loaders = get_loaders_cifar100
corrupt_path = './CIFAR-100-C/'
elif dataset=='tinyimagenet':
data_path = './tiny-imagenet-200'
n_classes = 200
get_loaders = get_loaders_tinyimagenet
corrupt_path = './Tiny-ImageNet-C/'
else:
raise ValueError('dataset not supported.')
if architecture=='resnet18':
arch = 'RN18'
elif architecture=='resnet18wide':
arch = 'WRN18'
elif architecture=='resnet18_64':
arch = 'RN18_64'
elif architecture=='resnet18wide_64':
arch = 'RN18W_64'
elif architecture=='densenet121':
arch = 'DN121'
elif architecture=='inceptionv3':
arch = 'IncV3'
else:
raise ValueError('architecture not supported.')
# data loader for training, eval
train_loader, val_loader = get_loaders(data_path=data_path,
batch_size_train=batch_size, batch_size_val=batch_size, num_workers=4)
print('# train batches = ', len(train_loader), ', # val batches = ', len(val_loader))
# architecture
dign = DiGN(architecture, n_classes=n_classes, dataset=dataset)
# number of runs
M = 3
# ======== Auxiliary Functions ===========
def get_corrupt_loader_cifar(corruption_path_base):
labels = np.load(corruption_path_base+'labels.npy')
if eval_noise:
corruption_list=['speckle_noise','impulse_noise','shot_noise']
else:
corruption_list=['saturate','spatter','gaussian_blur','speckle_noise','jpeg_compression','pixelate','elastic_transform','contrast','brightness','fog','frost','snow','zoom_blur','motion_blur','glass_blur','defocus_blur','impulse_noise','shot_noise'] #,'gaussian_noise']
corruption_list.sort()
x_all = np.zeros((50000*len(corruption_list),3,32,32))
labels_all = np.zeros((50000*len(corruption_list)))
start = 0
for i in range(len(corruption_list)):
x_corruption_i = np.load(corruption_path_base+corruption_list[i]+'.npy')
x_corruption_i =
|
np.moveaxis(x_corruption_i, 3, 1)
|
numpy.moveaxis
|
# Copyright 2020 Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Event segmentation using a Hidden Markov Model
Given an ROI timeseries, this class uses an annealed fitting procedure to
segment the timeseries into events with stable activity patterns. After
learning the signature activity pattern of each event, the model can then be
applied to other datasets to identify a corresponding sequence of events.
Full details are available in:
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>
Discovering event structure in continuous narrative perception and memory
Neuron, Volume 95, Issue 3, 709 - 721.e5
https://doi.org/10.1016/j.neuron.2017.06.041
This class also extends the model described in the Neuron paper:
1) It allows transition matrices that are composed of multiple separate
chains of events rather than a single linear path. This allows a model to
contain patterns for multiple event sequences (e.g. narratives), and
fit probabilities along each of these chains on a new, unlabeled timeseries.
To use this option, pass in an event_chain vector labeling which events
belong to each chain, define event patterns using set_event_patterns(),
then fit to a new dataset with find_events.
2) To obtain better fits when the underlying event structure contains
events that vary substantially in length, the split_merge option allows
the fit() function to re-distribute events during fitting. The number of
merge/split proposals is controlled by split_merge_proposals, which
controls how thorough versus fast the fitting process is.
"""
# Authors: <NAME> and <NAME> (Princeton University)
import numpy as np
from scipy import stats
import logging
import copy
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_is_fitted, check_array
from sklearn.exceptions import NotFittedError
import itertools
from . import _utils as utils # type: ignore
logger = logging.getLogger(__name__)
__all__ = [
"EventSegment",
]
class EventSegment(BaseEstimator):
"""Class for event segmentation of continuous fMRI data
Parameters
----------
n_events: int
Number of segments to learn
step_var: Callable[[int], float] : default 4 * (0.98 ** (step - 1))
The Gaussian variance to use during fitting, as a function of the
number of steps. Should decrease slowly over time.
n_iter: int, default: 500
Maximum number of steps to run during fitting
event_chains: ndarray with length = n_events
Array with unique value for each separate chain of events, each linked
in the order they appear in the array
split_merge: bool, default: False
Determines whether merge/split proposals are used during fitting with
fit(). This can improve fitting performance when events are highly
uneven in size, but requires additional time
split_merge_proposals: int, default: 1
Number of merges and splits to consider at each step. Computation time
scales as O(proposals^2) so this should usually be a small value
Attributes
----------
p_start, p_end: length n_events+1 ndarray
initial and final prior distributions over events
P: n_events+1 by n_events+1 ndarray
HMM transition matrix
ll_ : ndarray with length = number of training datasets
Log-likelihood for training datasets over the course of training
segments_: list of (time by event) ndarrays
Learned (soft) segmentation for training datasets
event_var_ : float
Gaussian variance at the end of learning
event_pat_ : voxel by event ndarray
Learned mean patterns for each event
"""
def _default_var_schedule(step):
return 4 * (0.98 ** (step - 1))
def __init__(self, n_events=2,
step_var=_default_var_schedule,
n_iter=500, event_chains=None,
split_merge=False, split_merge_proposals=1):
self.n_events = n_events
self.step_var = step_var
self.n_iter = n_iter
self.split_merge = split_merge
self.split_merge_proposals = split_merge_proposals
if event_chains is None:
self.event_chains = np.zeros(n_events)
else:
self.event_chains = event_chains
def _fit_validate(self, X):
"""Validate input to fit()
Validate data passed to fit(). Includes a transpose operation to
change the row/column order of X and z-scoring in time.
Parameters
----------
X: time by voxel ndarray, or a list of such ndarrays
fMRI data to be segmented
Returns
-------
X: list of voxel by time ndarrays
"""
if len(np.unique(self.event_chains)) > 1:
raise RuntimeError("Cannot fit chains, use set_event_patterns")
# Copy X into a list and transpose
X = copy.deepcopy(X)
if type(X) is not list:
X = [X]
for i in range(len(X)):
X[i] = check_array(X[i])
X[i] = X[i].T
# Check that number of voxels is consistent across datasets
n_dim = X[0].shape[0]
for i in range(len(X)):
assert (X[i].shape[0] == n_dim)
# Double-check that data is z-scored in time
for i in range(len(X)):
X[i] = stats.zscore(X[i], axis=1, ddof=1)
return X
def fit(self, X, y=None):
"""Learn a segmentation on training data
Fits event patterns and a segmentation to training data. After
running this function, the learned event patterns can be used to
segment other datasets using find_events
Parameters
----------
X: time by voxel ndarray, or a list of such ndarrays
fMRI data to be segmented. If a list is given, then all datasets
are segmented simultaneously with the same event patterns
y: not used (added to comply with BaseEstimator definition)
Returns
-------
self: the EventSegment object
"""
X = self._fit_validate(X)
n_train = len(X)
n_dim = X[0].shape[0]
self.classes_ = np.arange(self.n_events)
# Initialize variables for fitting
log_gamma = []
for i in range(n_train):
log_gamma.append(np.zeros((X[i].shape[1], self.n_events)))
step = 1
best_ll = float("-inf")
self.ll_ = np.empty((0, n_train))
while step <= self.n_iter:
iteration_var = self.step_var(step)
# Based on the current segmentation, compute the mean pattern
# for each event
seg_prob = [np.exp(lg) / np.sum(np.exp(lg), axis=0)
for lg in log_gamma]
mean_pat = np.empty((n_train, n_dim, self.n_events))
for i in range(n_train):
mean_pat[i, :, :] = X[i].dot(seg_prob[i])
mean_pat = np.mean(mean_pat, axis=0)
# Based on the current mean patterns, compute the event
# segmentation
self.ll_ = np.append(self.ll_, np.empty((1, n_train)), axis=0)
for i in range(n_train):
logprob = self._logprob_obs(X[i], mean_pat, iteration_var)
log_gamma[i], self.ll_[-1, i] = self._forward_backward(logprob)
if step > 1 and self.split_merge:
curr_ll = np.mean(self.ll_[-1, :])
self.ll_[-1, :], log_gamma, mean_pat = \
self._split_merge(X, log_gamma, iteration_var, curr_ll)
# If log-likelihood has started decreasing, undo last step and stop
if np.mean(self.ll_[-1, :]) < best_ll:
self.ll_ = self.ll_[:-1, :]
break
self.segments_ = [np.exp(lg) for lg in log_gamma]
self.event_var_ = iteration_var
self.event_pat_ = mean_pat
best_ll = np.mean(self.ll_[-1, :])
logger.debug("Fitting step %d, LL=%f", step, best_ll)
step += 1
return self
def _logprob_obs(self, data, mean_pat, var):
"""Log probability of observing each timepoint under each event model
Computes the log probability of each observed timepoint being
generated by the Gaussian distribution for each event pattern
Parameters
----------
data: voxel by time ndarray
fMRI data on which to compute log probabilities
mean_pat: voxel by event ndarray
Centers of the Gaussians for each event
var: float or 1D array of length equal to the number of events
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance
Returns
-------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
"""
n_vox = data.shape[0]
t = data.shape[1]
# z-score both data and mean patterns in space, so that Gaussians
# are measuring Pearson correlations and are insensitive to overall
# activity changes
data_z = stats.zscore(data, axis=0, ddof=1)
mean_pat_z = stats.zscore(mean_pat, axis=0, ddof=1)
logprob = np.empty((t, self.n_events))
if type(var) is not np.ndarray:
var = var * np.ones(self.n_events)
for k in range(self.n_events):
logprob[:, k] = -0.5 * n_vox * np.log(
2 * np.pi * var[k]) - 0.5 * np.sum(
(data_z.T - mean_pat_z[:, k]).T ** 2, axis=0) / var[k]
logprob /= n_vox
return logprob
def _forward_backward(self, logprob):
"""Runs forward-backward algorithm on observation log probs
Given the log probability of each timepoint being generated by
each event, run the HMM forward-backward algorithm to find the
probability that each timepoint belongs to each event (based on the
transition priors in p_start, p_end, and P)
See https://en.wikipedia.org/wiki/Forward-backward_algorithm for
mathematical details
Parameters
----------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
Returns
-------
log_gamma : time by event ndarray
Log probability of each timepoint belonging to each event
ll : float
Log-likelihood of fit
"""
logprob = copy.copy(logprob)
t = logprob.shape[0]
logprob = np.hstack((logprob, float("-inf") * np.ones((t, 1))))
# Initialize variables
log_scale = np.zeros(t)
log_alpha = np.zeros((t, self.n_events + 1))
log_beta = np.zeros((t, self.n_events + 1))
# Set up transition matrix, with final sink state
self.p_start = np.zeros(self.n_events + 1)
self.p_end = np.zeros(self.n_events + 1)
self.P = np.zeros((self.n_events + 1, self.n_events + 1))
label_ind = np.unique(self.event_chains, return_inverse=True)[1]
n_chains = np.max(label_ind) + 1
# For each chain of events, link them together and then to sink state
for c in range(n_chains):
chain_ind = np.nonzero(label_ind == c)[0]
self.p_start[chain_ind[0]] = 1 / n_chains
self.p_end[chain_ind[-1]] = 1 / n_chains
p_trans = (len(chain_ind) - 1) / t
if p_trans >= 1:
raise ValueError('Too few timepoints')
for i in range(len(chain_ind)):
self.P[chain_ind[i], chain_ind[i]] = 1 - p_trans
if i < len(chain_ind) - 1:
self.P[chain_ind[i], chain_ind[i+1]] = p_trans
else:
self.P[chain_ind[i], -1] = p_trans
self.P[-1, -1] = 1
# Forward pass
for i in range(t):
if i == 0:
log_alpha[0, :] = self._log(self.p_start) + logprob[0, :]
else:
log_alpha[i, :] = self._log(np.exp(log_alpha[i - 1, :])
.dot(self.P)) + logprob[i, :]
log_scale[i] = np.logaddexp.reduce(log_alpha[i, :])
log_alpha[i] -= log_scale[i]
# Backward pass
log_beta[-1, :] = self._log(self.p_end) - log_scale[-1]
for i in reversed(range(t - 1)):
obs_weighted = log_beta[i + 1, :] + logprob[i + 1, :]
offset = np.max(obs_weighted)
log_beta[i, :] = offset + self._log(
np.exp(obs_weighted - offset).dot(self.P.T)) - log_scale[i]
# Combine and normalize
log_gamma = log_alpha + log_beta
log_gamma -= np.logaddexp.reduce(log_gamma, axis=1, keepdims=True)
ll = np.sum(log_scale[:(t - 1)]) + np.logaddexp.reduce(
log_alpha[-1, :] + log_scale[-1] + self._log(self.p_end))
log_gamma = log_gamma[:, :-1]
return log_gamma, ll
def _log(self, x):
"""Modified version of np.log that manually sets values <=0 to -inf
Parameters
----------
x: ndarray of floats
Input to the log function
Returns
-------
log_ma: ndarray of floats
log of x, with x<=0 values replaced with -inf
"""
xshape = x.shape
_x = x.flatten()
y = utils.masked_log(_x)
return y.reshape(xshape)
def set_event_patterns(self, event_pat):
"""Set HMM event patterns manually
Rather than fitting the event patterns automatically using fit(), this
function allows them to be set explicitly. They can then be used to
find corresponding events in a new dataset, using find_events().
Parameters
----------
event_pat: voxel by event ndarray
"""
if event_pat.shape[1] != self.n_events:
raise ValueError(("Number of columns of event_pat must match "
"number of events"))
self.event_pat_ = event_pat.copy()
def find_events(self, testing_data, var=None, scramble=False):
"""Applies learned event segmentation to new testing dataset
After fitting an event segmentation using fit() or setting event
patterns directly using set_event_patterns(), this function finds the
same sequence of event patterns in a new testing dataset.
Parameters
----------
testing_data: timepoint by voxel ndarray
fMRI data to segment based on previously-learned event patterns
var: float or 1D ndarray of length equal to the number of events
default: uses variance that maximized training log-likelihood
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance. If fit() has not previously
been run, this must be specifed (cannot be None).
scramble: bool : default False
If true, the order of the learned events are shuffled before
fitting, to give a null distribution
Returns
-------
segments : time by event ndarray
The resulting soft segmentation. segments[t,e] = probability
that timepoint t is in event e
test_ll : float
Log-likelihood of model fit
"""
if var is None:
if not hasattr(self, 'event_var_'):
raise NotFittedError(("Event variance must be provided, if "
"not previously set by fit()"))
else:
var = self.event_var_
if not hasattr(self, 'event_pat_'):
raise NotFittedError(("The event patterns must first be set "
"by fit() or set_event_patterns()"))
if scramble:
mean_pat = self.event_pat_[:, np.random.permutation(self.n_events)]
else:
mean_pat = self.event_pat_
logprob = self._logprob_obs(testing_data.T, mean_pat, var)
lg, test_ll = self._forward_backward(logprob)
segments = np.exp(lg)
return segments, test_ll
def predict(self, X):
"""Applies learned event segmentation to new testing dataset
Alternative function for segmenting a new dataset after using
fit() to learn a sequence of events, to comply with the sklearn
Classifier interface
Parameters
----------
X: timepoint by voxel ndarray
fMRI data to segment based on previously-learned event patterns
Returns
-------
Event label for each timepoint
"""
check_is_fitted(self, ["event_pat_", "event_var_"])
X = check_array(X)
segments, test_ll = self.find_events(X)
return np.argmax(segments, axis=1)
def calc_weighted_event_var(self, D, weights, event_pat):
"""Computes normalized weighted variance around event pattern
Utility function for computing variance in a training set of weighted
event examples. For each event, the sum of squared differences for all
timepoints from the event pattern is computed, and then the weights
specify how much each of these differences contributes to the
variance (normalized by the number of voxels).
Parameters
----------
D : timepoint by voxel ndarray
fMRI data for which to compute event variances
weights : timepoint by event ndarray
specifies relative weights of timepoints for each event
event_pat : voxel by event ndarray
mean event patterns to compute variance around
Returns
-------
ev_var : ndarray of variances for each event
"""
Dz = stats.zscore(D, axis=1, ddof=1)
ev_var = np.empty(event_pat.shape[1])
for e in range(event_pat.shape[1]):
# Only compute variances for weights > 0.1% of max weight
nz = weights[:, e] > np.max(weights[:, e])/1000
sumsq = np.dot(weights[nz, e],
np.sum(np.square(Dz[nz, :] -
event_pat[:, e]), axis=1))
ev_var[e] = sumsq/(np.sum(weights[nz, e]) -
np.sum(np.square(weights[nz, e])) /
np.sum(weights[nz, e]))
ev_var = ev_var / D.shape[1]
return ev_var
def model_prior(self, t):
"""Returns the prior probability of the HMM
Runs forward-backward without any data, showing the prior distribution
of the model (for comparison with a posterior).
Parameters
----------
t: int
Number of timepoints
Returns
-------
segments : time by event ndarray
segments[t,e] = prior probability that timepoint t is in event e
test_ll : float
Log-likelihood of model (data-independent term)"""
lg, test_ll = self._forward_backward(
|
np.zeros((t, self.n_events))
|
numpy.zeros
|
import pytest
import pyCGM_Single.pycgmStatic as pycgmStatic
import numpy as np
from mock import patch
rounding_precision = 8
class TestPycgmStaticAxis():
"""
This class tests the axis functions in pycgmStatic.py:
staticCalculationHead
pelvisJointCenter
hipJointCenter
hipAxisCenter
kneeJointCenter
ankleJointCenter
footJointCenter
headJC
uncorrect_footaxis
rotaxis_footflat
rotaxis_nonfootflat
findJointC
"""
nan_3d = [np.nan, np.nan, np.nan]
rand_coor = [np.random.randint(0, 10), np.random.randint(0, 10), np.random.randint(0, 10)]
@pytest.mark.parametrize(["head", "expected"], [
# Test from running sample data
([[[244.87227957886893, 326.0240255639856, 1730.4189843948805],
[243.89575702706503, 325.0366593474616, 1730.1515677531293],
[244.89086730509763, 324.80072493605866, 1731.1283433097797]],
[244.89547729492188, 325.0578918457031, 1730.1619873046875]],
0.25992807335420975),
# Test with zeros for all params
([[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [0, 0, 0]],
np.nan),
# Testing when values are added to head[0][0]
([[[-1, 8, 9], [0, 0, 0], [0, 0, 0]], [0, 0, 0]],
1.5707963267948966),
# Testing when values are added to head[0][1]
([[[0, 0, 0], [7, 5, 7], [0, 0, 0]], [0, 0, 0]],
np.nan),
# Testing when values are added to head[0][2]
([[[0, 0, 0], [0, 0, 0], [3, -6, -2]], [0, 0, 0]],
0.0),
# Testing when values are added to head[0]
([[[-1, 8, 9], [7, 5, 7], [3, -6, -2]], [0, 0, 0]],
-1.3521273809209546),
# Testing when values are added to head[1]
([[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [-4, 7, 8]],
0.7853981633974483),
# Testing when values are added to head
([[[-1, 8, 9], [7, 5, 7], [3, -6, -2]], [-4, 7, 8]],
-0.09966865249116204),
# Testing that when head is composed of lists of ints
([[[-1, 8, 9], [7, 5, 7], [3, -6, -2]], [-4, 7, 8]],
-0.09966865249116204),
# Testing that when head is composed of numpy arrays of ints
([np.array([[-1, 8, 9], [7, 5, 7], [3, -6, -2]], dtype='int'), np.array([-4, 7, 8], dtype='int')],
-0.09966865249116204),
# Testing that when head is composed of lists of floats
([[[-1.0, 8.0, 9.0], [7.0, 5.0, 7.0], [3.0, -6.0, -2.0]], [-4.0, 7.0, 8.0]],
-0.09966865249116204),
# Testing that when head is composed of numpy arrays of floats
([np.array([[-1.0, 8.0, 9.0], [7.0, 5.0, 7.0], [3.0, -6.0, -2.0]], dtype='float'), np.array([-4.0, 7.0, 8.0], dtype='float')],
-0.09966865249116204)])
def test_staticCalculationHead(self, head, expected):
"""
This test provides coverage of the staticCalculationHead function in pycgmStatic.py, defined as staticCalculationHead(frame, head)
This test takes 2 parameters:
head: array containing the head axis and head origin
expected: the expected result from calling staticCalculationHead on head
This function first calculates the x, y, z axes of the head by subtracting the given head axes by the head
origin. It then calls headoffCalc on this head axis and a global axis to find the head offset angles.
This test ensures that:
- the head axis and the head origin both have an effect on the final offset angle
- the resulting output is correct when head is composed of lists of ints, numpy arrays of ints, lists of
floats, and numpy arrays of floats.
"""
result = pycgmStatic.staticCalculationHead(None, head)
np.testing.assert_almost_equal(result, expected, rounding_precision)
@pytest.mark.parametrize(["frame", "expected"], [
# Test from running sample data
({'RASI': np.array([357.90066528, 377.69210815, 1034.97253418]),
'LASI': np.array([145.31594849, 405.79052734, 1030.81445312]),
'RPSI': np.array([274.00466919, 205.64402771, 1051.76452637]),
'LPSI': np.array([189.15231323, 214.86122131, 1052.73486328])},
[np.array([251.60830688, 391.74131775, 1032.89349365]),
np.array([[251.74063624, 392.72694721, 1032.78850073], [250.61711554, 391.87232862, 1032.8741063], [251.60295336, 391.84795134, 1033.88777762]]),
np.array([231.57849121, 210.25262451, 1052.24969482])]),
# Test with zeros for all params
({'SACR': np.array([0, 0, 0]), 'RASI': np.array([0, 0, 0]), 'LASI': np.array([0, 0, 0]),
'RPSI': np.array([0, 0, 0]), 'LPSI': np.array([0, 0, 0])},
[
|
np.array([0, 0, 0])
|
numpy.array
|
import concurrent.futures
import os
import pathlib
from typing import List
import cv2
import numpy as np
import pandas as pd
import torch
import torchvision
def compute_stats(fpath):
img = cv2.imread(str(fpath))
img = img.reshape(-1, 3).astype(np.float32) / 255.0
sum_ = np.sum(img, axis=0)
sum_sq = np.sum(img**2, axis=0)
n_pixels = img.shape[0]
return sum_, sum_sq, n_pixels
def dataset_stats(image_dir: pathlib.Path, labels_fpath: pathlib.Path):
"""
Compute mean and stdev of each channel of an image dataset.
https://kozodoi.me/python/deep%20learning/pytorch/tutorial/2021/03/08/image-mean-std.html
"""
df = pd.read_parquet(labels_fpath)
fnames = df["id"].unique()
sum_ =
|
np.zeros((3,))
|
numpy.zeros
|
import numpy as np
from .creplace import point_op
from .utils import rcosFn
from .image_stats import var
def ramp(size, direction=0, slope=1, intercept=0, origin=None):
'''make a ramp matrix
Compute a matrix containing samples of a ramp function in a given direction.
Arguments
---------
size : `int` or `tuple`
if an int, we assume the ramp should be of dimensions `(size, size)`. if a tuple, must be a
2-tuple of ints specifying the dimensions
direction : `float`
the direction of the ramp's gradient direction, in radians, clockwise from the X-axis
slope : `float`
the slope of the ramp (per pixel)
intercept : `intercept`
the value of the ramp at the origin
origin : `int`, `tuple`, or None
the origin of the matrix. if an int, we assume the origin is at `(origin, origin)`. if a
tuple, must be a 2-tuple of ints specifying the origin (where `(0, 0)` is the upper left).
if None, we assume the origin lies at the center of the matrix, `(size-1)/2`.
Returns
-------
res : `np.array`
the ramp matrix
'''
if not hasattr(size, '__iter__'):
size = (size, size)
if origin is None:
# TODO understand why minus one (not plus)
origin = ((size[0] - 1)/2., (size[1] - 1)/2.)
# origin = ( (size[0] + 1)/2., (size[1] + 1)/2. )
elif not hasattr(origin, '__iter__'):
origin = (origin, origin)
xinc = slope * np.cos(direction)
yinc = slope *
|
np.sin(direction)
|
numpy.sin
|
import numpy as np
import os
import random
from classifiers.Images import load_transform, get_shuffled_images
class OMNIGLOTGenerator(object):
"""Docstring for OmniglotGenerator"""
def __init__(self, data_folder, letter_swap=1, batch_size=1, classes=5, samples_per_class=10, max_rotation=0., max_shift=0., img_size=(20,20), number_of_classes=30, max_iter=None, only_labels_and_images=False):
super(OMNIGLOTGenerator, self).__init__()
self.data_folder = data_folder
self.letter_swap = letter_swap
self.batch_size = batch_size
self.number_of_classes = number_of_classes
self.classes = classes
self.samples_per_class = samples_per_class
self.max_rotation = max_rotation
self.max_shift = max_shift
self.img_size = img_size
self.max_iter = max_iter
self.num_iter = 0
self.only_labels_and_images = only_labels_and_images
self.character_folders = [os.path.join(self.data_folder, family, character) \
for family in os.listdir(self.data_folder) \
if os.path.isdir(os.path.join(self.data_folder, family)) \
for character in os.listdir(os.path.join(self.data_folder, family))]
print(self.character_folders)
self.working_characters = random.sample(self.character_folders, self.classes)
self.working_labels =
|
np.random.choice(self.number_of_classes, self.classes, replace=False)
|
numpy.random.choice
|
import unittest
import numpy
from cqcpy import test_utils
from cqcpy import spin_utils
from kelvin import ft_cc_equations
from kelvin import quadrature
class FTamplEquationsTest(unittest.TestCase):
def setUp(self):
self.thresh = 1e-12
self.n = 5
self.ng = 4
self.beta = 2.0
def test_ccsd_stanton(self):
ng = self.ng
n = self.n
T1old, T2old = test_utils.make_random_ft_T(ng, n)
L1old, L2old = test_utils.make_random_ft_T(ng, n)
F, I = test_utils.make_random_integrals(n, n)
D1, D2 = test_utils.make_random_ft_D(n)
ti, g, G = quadrature.simpsons(ng, self.beta)
T1sim, T2sim = ft_cc_equations.ccsd_simple(
F, I, T1old, T2old, D1, D2, ti, ng, G)
T1stn, T2stn = ft_cc_equations.ccsd_stanton(
F, I, T1old, T2old, D1, D2, ti, ng, G)
diff1 = numpy.linalg.norm(T1stn - T1sim)
diff2 = numpy.linalg.norm(T2stn - T2sim)
s1 = diff1 < self.thresh*numpy.sqrt(T1sim.size)
s2 = diff2 < self.thresh*numpy.sqrt(T2sim.size)
e1 = "Error in Stanton FT T1: {}".format(diff1)
e2 = "Error in Stanton FT T2: {}".format(diff2)
self.assertTrue(s1, e1)
self.assertTrue(s2, e2)
def test_uccsd(self):
ng = self.ng
na = self.n
nb = self.n
n = na + nb
# unrestricted integrals
Fa = test_utils.make_random_F(na, na)
Fb = test_utils.make_random_F(nb, nb)
Ia = test_utils.make_random_I_anti(na, na)
Ib = test_utils.make_random_I_anti(nb, nb)
Iabab = test_utils.make_random_Ifull_gen(
na, na, nb, nb, na, na, nb, nb)
# Full antisymmetric spin-orbital tensor
I = spin_utils.int_to_spin2(Ia, Ib, Iabab, na, na, nb, nb)
F = spin_utils.F_to_spin(Fa, Fb, na, na, nb, nb)
T1a = numpy.zeros((ng, na, na))
T1b = numpy.zeros((ng, nb, nb))
for i in range(ng):
T1at, T1bt = test_utils.make_random_T1_spatial(na, na, nb, nb)
T1a[i] = T1at
T1b[i] = T1bt
T2aa = numpy.zeros((ng, na, na, na, na))
T2ab = numpy.zeros((ng, na, nb, na, nb))
T2bb =
|
numpy.zeros((ng, nb, nb, nb, nb))
|
numpy.zeros
|
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import copy
import warnings
import re
import pandas as pd
pd.set_option('use_inf_as_na', True)
import numpy as np
from joblib import Memory
from xgboost import XGBClassifier
from sklearn import model_selection
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from eli5.sklearn import PermutationImportance
from joblib import Parallel, delayed
import multiprocessing
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
# this block of code is for the connection between the server, the database, and the client (plus routing)
# access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []\
global StanceTest
StanceTest = False
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global yData
yData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerfCrossMutr
allParametersPerfCrossMutr = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global keyFirstTime
keyFirstTime = True
global target_namesLoc
target_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global fileName
fileName = []
global listofTransformations
listofTransformations = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
return 'The reset was done!'
# retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def retrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
global DataResultsRawExternal
global DataRawLengthExternal
global fileName
fileName = []
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global previousState
previousState = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerfCrossMutr
allParametersPerfCrossMutr = []
global HistoryPreservation
HistoryPreservation = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global keyFirstTime
keyFirstTime = True
global target_namesLoc
target_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global listofTransformations
listofTransformations = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
target_names.append('Healthy')
target_names.append('Diseased')
elif data['fileName'] == 'biodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
CollectionDBExternal = mongo.db.biodegCExt.find()
target_names.append('Non-biodegr.')
target_names.append('Biodegr.')
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
target_names.append('Negative')
target_names.append('Positive')
elif data['fileName'] == 'MaterialC':
CollectionDB = mongo.db.MaterialC.find()
target_names.append('Cylinder')
target_names.append('Disk')
target_names.append('Flatellipsold')
target_names.append('Longellipsold')
target_names.append('Sphere')
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
target_names.append('No-use')
target_names.append('Long-term')
target_names.append('Short-term')
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
target_names.append('Van')
target_names.append('Car')
target_names.append('Bus')
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
target_names.append('Fine')
target_names.append('Superior')
target_names.append('Inferior')
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
DataResultsRawExternal = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
for index, item in enumerate(CollectionDBExternal):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawExternal.append(item)
DataRawLengthExternal = len(DataResultsRawExternal)
dataSetSelection()
return 'Everything is okay'
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def sendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
global XDataStoredOriginal
XDataStoredOriginal = XData.copy()
global finalResultsData
finalResultsData = XData.copy()
global XDataNoRemoval
XDataNoRemoval = XData.copy()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.copy()
return 'Processed uploaded data set'
def dataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global XDataExternal, yDataExternal
XDataExternal = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResultsExternal = copy.deepcopy(DataResultsRawExternal)
for dictionary in DataResultsRawExternal:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawExternal.sort(key=lambda x: x[target], reverse=True)
DataResultsExternal.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsExternal:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsExternal = [o[target] for o in DataResultsRawExternal]
AllTargetsFloatValuesExternal = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsExternal):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesExternal.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesExternal.append(Class)
previous = value
ArrayDataResultsExternal = pd.DataFrame.from_dict(DataResultsExternal)
XDataExternal, yDataExternal = ArrayDataResultsExternal, AllTargetsFloatValuesExternal
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
AllTargetsFloatValues.append(Class)
previous = value
dfRaw = pd.DataFrame.from_dict(DataResultsRaw)
# OneTimeTemp = copy.deepcopy(dfRaw)
# OneTimeTemp.drop(columns=['_id', 'InstanceID'])
# column_names = ['volAc', 'chlorides', 'density', 'fixAc' , 'totalSuDi' , 'citAc', 'resSu' , 'pH' , 'sulphates', 'freeSulDi' ,'alcohol', 'quality*']
# OneTimeTemp = OneTimeTemp.reindex(columns=column_names)
# OneTimeTemp.to_csv('dataExport.csv', index=False)
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global keepOriginalFeatures
global OrignList
if (data['fileName'] == 'biodegC'):
keepOriginalFeatures = XData.copy()
storeNewColumns = []
for col in keepOriginalFeatures.columns:
newCol = col.replace("-", "_")
storeNewColumns.append(newCol.replace("_",""))
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(storeNewColumns)]
columnsNewGen = keepOriginalFeatures.columns.values.tolist()
OrignList = keepOriginalFeatures.columns.values.tolist()
else:
keepOriginalFeatures = XData.copy()
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(keepOriginalFeatures.columns)]
columnsNewGen = keepOriginalFeatures.columns.values.tolist()
OrignList = keepOriginalFeatures.columns.values.tolist()
XData.columns = ['F'+str(idx+1) for idx, col in enumerate(XData.columns)]
XDataTest.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataTest.columns)]
XDataExternal.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataExternal.columns)]
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
global XDataStoredOriginal
XDataStoredOriginal = XData.copy()
global finalResultsData
finalResultsData = XData.copy()
global XDataNoRemoval
XDataNoRemoval = XData.copy()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.copy()
warnings.simplefilter('ignore')
executeModel([], 0, '')
return 'Everything is okay'
def create_global_function():
global estimator
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def estimator(n_estimators, eta, max_depth, subsample, colsample_bytree):
# initialize model
print('loopModels')
n_estimators = int(n_estimators)
max_depth = int(max_depth)
model = XGBClassifier(n_estimators=n_estimators, eta=eta, max_depth=max_depth, subsample=subsample, colsample_bytree=colsample_bytree, n_jobs=-1, random_state=RANDOM_SEED, silent=True, verbosity = 0, use_label_encoder=False)
# set in cross-validation
result = cross_validate(model, XData, yData, cv=crossValidation, scoring='accuracy')
# result is mean of test_score
return np.mean(result['test_score'])
# check this issue later because we are not getting the same results
def executeModel(exeCall, flagEx, nodeTransfName):
global XDataTest, yDataTest
global XDataExternal, yDataExternal
global keyFirstTime
global estimator
global yPredictProb
global scores
global featureImportanceData
global XData
global XDataStored
global previousState
global columnsNewGen
global columnsNames
global listofTransformations
global XDataStoredOriginal
global finalResultsData
global OrignList
global tracker
global XDataNoRemoval
global XDataNoRemovalOrig
columnsNames = []
scores = []
if (len(exeCall) == 0):
if (flagEx == 3):
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
OrignList = columnsNewGen
elif (flagEx == 2):
XData = XDataStored.copy()
XDataStoredOriginal = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
columnsNewGen = OrignList
else:
XData = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
XDataStoredOriginal = XDataStored.copy()
else:
if (flagEx == 4):
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
#XDataStoredOriginal = XDataStored.copy()
elif (flagEx == 2):
XData = XDataStored.copy()
XDataStoredOriginal = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
columnsNewGen = OrignList
else:
XData = XDataStored.copy()
#XDataNoRemoval = XDataNoRemovalOrig.copy()
XDataStoredOriginal = XDataStored.copy()
# Bayesian Optimization CHANGE INIT_POINTS!
if (keyFirstTime):
create_global_function()
params = {"n_estimators": (5, 200), "eta": (0.05, 0.3), "max_depth": (6,12), "subsample": (0.8,1), "colsample_bytree": (0.8,1)}
bayesopt = BayesianOptimization(estimator, params, random_state=RANDOM_SEED)
bayesopt.maximize(init_points=20, n_iter=5, acq='ucb') # 20 and 5
bestParams = bayesopt.max['params']
estimator = XGBClassifier(n_estimators=int(bestParams.get('n_estimators')), eta=bestParams.get('eta'), max_depth=int(bestParams.get('max_depth')), subsample=bestParams.get('subsample'), colsample_bytree=bestParams.get('colsample_bytree'), probability=True, random_state=RANDOM_SEED, silent=True, verbosity = 0, use_label_encoder=False)
columnsNewGen = OrignList
if (len(exeCall) != 0):
if (flagEx == 1):
currentColumnsDeleted = []
for uniqueValue in exeCall:
currentColumnsDeleted.append(tracker[uniqueValue])
for column in XData.columns:
if (column in currentColumnsDeleted):
XData = XData.drop(column, axis=1)
XDataStoredOriginal = XDataStoredOriginal.drop(column, axis=1)
elif (flagEx == 2):
columnsKeepNew = []
columns = XDataGen.columns.values.tolist()
for indx, col in enumerate(columns):
if indx in exeCall:
columnsKeepNew.append(col)
columnsNewGen.append(col)
XDataTemp = XDataGen[columnsKeepNew]
XData[columnsKeepNew] = XDataTemp.values
XDataStoredOriginal[columnsKeepNew] = XDataTemp.values
XDataNoRemoval[columnsKeepNew] = XDataTemp.values
elif (flagEx == 4):
splittedCol = nodeTransfName.split('_')
for col in XDataNoRemoval.columns:
splitCol = col.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
storeRenamedColumn = col
XData.rename(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
XDataNoRemoval.rename(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
currentColumn = columnsNewGen[exeCall[0]]
subString = currentColumn[currentColumn.find("(")+1:currentColumn.find(")")]
replacement = currentColumn.replace(subString, nodeTransfName)
for ind, column in enumerate(columnsNewGen):
splitCol = column.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
columnsNewGen[ind] = columnsNewGen[ind].replace(storeRenamedColumn, nodeTransfName)
if (len(splittedCol) == 1):
XData[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
XDataNoRemoval[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
else:
if (splittedCol[1] == 'r'):
XData[nodeTransfName] = XData[nodeTransfName].round()
elif (splittedCol[1] == 'b'):
number_of_bins = np.histogram_bin_edges(XData[nodeTransfName], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.append(index)
XData[nodeTransfName] = pd.cut(XData[nodeTransfName], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XData[nodeTransfName] = pd.to_numeric(XData[nodeTransfName], downcast='signed')
elif (splittedCol[1] == 'zs'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].mean())/XData[nodeTransfName].std()
elif (splittedCol[1] == 'mms'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].min())/(XData[nodeTransfName].max()-XData[nodeTransfName].min())
elif (splittedCol[1] == 'l2'):
dfTemp = []
dfTemp = np.log2(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'l1p'):
dfTemp = []
dfTemp = np.log1p(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'l10'):
dfTemp = []
dfTemp = np.log10(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'e2'):
dfTemp = []
dfTemp = np.exp2(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'em1'):
dfTemp = []
dfTemp = np.expm1(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'p2'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 2)
elif (splittedCol[1] == 'p3'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 3)
else:
XData[nodeTransfName] = np.power(XData[nodeTransfName], 4)
XDataNoRemoval[nodeTransfName] = XData[nodeTransfName]
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
columnsNamesLoc = XData.columns.values.tolist()
for col in columnsNamesLoc:
splittedCol = col.split('_')
if (len(splittedCol) == 1):
for tran in listofTransformations:
columnsNames.append(splittedCol[0]+'_'+tran)
else:
for tran in listofTransformations:
if (splittedCol[1] == tran):
columnsNames.append(splittedCol[0])
else:
columnsNames.append(splittedCol[0]+'_'+tran)
featureImportanceData = estimatorFeatureSelection(XDataNoRemoval, estimator)
tracker = []
for value in columnsNewGen:
value = value.split(' ')
if (len(value) > 1):
tracker.append(value[1])
else:
tracker.append(value[0])
estimator.fit(XData, yData)
yPredict = estimator.predict(XData)
yPredictProb = cross_val_predict(estimator, XData, yData, cv=crossValidation, method='predict_proba')
num_cores = multiprocessing.cpu_count()
inputsSc = ['accuracy','precision_weighted','recall_weighted']
flat_results = Parallel(n_jobs=num_cores)(delayed(solve)(estimator,XData,yData,crossValidation,item,index) for index, item in enumerate(inputsSc))
scoresAct = [item for sublist in flat_results for item in sublist]
#print(scoresAct)
# if (StanceTest):
# y_pred = estimator.predict(XDataTest)
# print('Test data set')
# print(classification_report(yDataTest, y_pred))
# y_pred = estimator.predict(XDataExternal)
# print('External data set')
# print(classification_report(yDataExternal, y_pred))
howMany = 0
if (keyFirstTime):
previousState = scoresAct
keyFirstTime = False
howMany = 3
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
finalResultsData = XData.copy()
if (keyFirstTime == False):
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
previousState[0] = scoresAct[0]
previousState[1] = scoresAct[1]
howMany = 3
#elif ((scoresAct[2]-scoresAct[3]) > (previousState[2]-previousState[3])):
previousState[2] = scoresAct[2]
previousState[3] = scoresAct[3]
#howMany = howMany + 1
#elif ((scoresAct[4]-scoresAct[5]) > (previousState[4]-previousState[5])):
previousState[4] = scoresAct[4]
previousState[5] = scoresAct[5]
#howMany = howMany + 1
#else:
#pass
scores = scoresAct + previousState
if (howMany == 3):
scores.append(1)
else:
scores.append(0)
return 'Everything Okay'
@app.route('/data/RequestBestFeatures', methods=["GET", "POST"])
def BestFeat():
global finalResultsData
finalResultsDataJSON = finalResultsData.to_json()
response = {
'finalResultsData': finalResultsDataJSON
}
return jsonify(response)
def featFun (clfLocalPar,DataLocalPar,yDataLocalPar):
PerFeatureAccuracyLocalPar = []
scores = model_selection.cross_val_score(clfLocalPar, DataLocalPar, yDataLocalPar, cv=None, n_jobs=-1)
PerFeatureAccuracyLocalPar.append(scores.mean())
return PerFeatureAccuracyLocalPar
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def estimatorFeatureSelection(Data, clf):
resultsFS = []
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
ImpurityFS = []
RankingFS = []
estim = clf.fit(Data, yData)
importances = clf.feature_importances_
# std = np.std([tree.feature_importances_ for tree in estim.feature_importances_],
# axis=0)
maxList = max(importances)
minList = min(importances)
for f in range(Data.shape[1]):
ImpurityFS.append((importances[f] - minList) / (maxList - minList))
estim = LogisticRegression(n_jobs = -1, random_state=RANDOM_SEED)
selector = RFECV(estimator=estim, n_jobs = -1, step=1, cv=crossValidation)
selector = selector.fit(Data, yData)
RFEImp = selector.ranking_
for f in range(Data.shape[1]):
if (RFEImp[f] == 1):
RankingFS.append(0.95)
elif (RFEImp[f] == 2):
RankingFS.append(0.85)
elif (RFEImp[f] == 3):
RankingFS.append(0.75)
elif (RFEImp[f] == 4):
RankingFS.append(0.65)
elif (RFEImp[f] == 5):
RankingFS.append(0.55)
elif (RFEImp[f] == 6):
RankingFS.append(0.45)
elif (RFEImp[f] == 7):
RankingFS.append(0.35)
elif (RFEImp[f] == 8):
RankingFS.append(0.25)
elif (RFEImp[f] == 9):
RankingFS.append(0.15)
else:
RankingFS.append(0.05)
perm = PermutationImportance(clf, cv=None, refit = True, n_iter = 25).fit(Data, yData)
permList.append(perm.feature_importances_)
n_feats = Data.shape[1]
num_cores = multiprocessing.cpu_count()
print("Parallelization Initilization")
flat_results = Parallel(n_jobs=num_cores)(delayed(featFun)(clf,Data.values[:, i].reshape(-1, 1),yData) for i in range(n_feats))
PerFeatureAccuracy = [item for sublist in flat_results for item in sublist]
# for i in range(n_feats):
# scoresHere = model_selection.cross_val_score(clf, Data.values[:, i].reshape(-1, 1), yData, cv=None, n_jobs=-1)
# PerFeatureAccuracy.append(scoresHere.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
clf.fit(Data, yData)
yPredict = clf.predict(Data)
yPredict = np.nan_to_num(yPredict)
RankingFSDF = pd.DataFrame(RankingFS)
RankingFSDF = RankingFSDF.to_json()
ImpurityFSDF = pd.DataFrame(ImpurityFS)
ImpurityFSDF = ImpurityFSDF.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
if (perm_imp_eli5PD.empty):
for col in Data.columns:
perm_imp_eli5PD.append({0:0})
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=f_classif, k='all')
fit = bestfeatures.fit(Data,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(Data.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
resultsFS.append(featureScores)
resultsFS.append(ImpurityFSDF)
resultsFS.append(perm_imp_eli5PD)
resultsFS.append(PerFeatureAccuracyPandas)
resultsFS.append(RankingFSDF)
return resultsFS
@app.route('/data/sendFeatImp', methods=["GET", "POST"])
def sendFeatureImportance():
global featureImportanceData
response = {
'Importance': featureImportanceData
}
return jsonify(response)
@app.route('/data/sendFeatImpComp', methods=["GET", "POST"])
def sendFeatureImportanceComp():
global featureCompareData
global columnsKeep
response = {
'ImportanceCompare': featureCompareData,
'FeatureNames': columnsKeep
}
return jsonify(response)
def solve(sclf,XData,yData,crossValidation,scoringIn,loop):
scoresLoc = []
temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring=scoringIn, n_jobs=-1)
scoresLoc.append(temp.mean())
scoresLoc.append(temp.std())
return scoresLoc
@app.route('/data/sendResults', methods=["GET", "POST"])
def sendFinalResults():
global scores
response = {
'ValidResults': scores
}
return jsonify(response)
def Transformation(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5):
# XDataNumericColumn = XData.select_dtypes(include='number')
XDataNumeric = XDataStoredOriginal.select_dtypes(include='number')
columns = list(XDataNumeric)
global packCorrTransformed
packCorrTransformed = []
for count, i in enumerate(columns):
dicTransf = {}
splittedCol = columnsNames[(count)*len(listofTransformations)+0].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = XDataNumericCopy[i].round()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+1].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
number_of_bins = np.histogram_bin_edges(XDataNumericCopy[i], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.append(index)
XDataNumericCopy[i] = pd.cut(XDataNumericCopy[i], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XDataNumericCopy[i] = pd.to_numeric(XDataNumericCopy[i], downcast='signed')
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+2].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].mean())/XDataNumericCopy[i].std()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+3].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].min())/(XDataNumericCopy[i].max()-XDataNumericCopy[i].min())
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+4].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log2(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+5].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log1p(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+6].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log10(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+7].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.exp2(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
if (np.isinf(dfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+8].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.expm1(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
if (np.isinf(dfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+9].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 2)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+10].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 3)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+11].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 4)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
packCorrTransformed.append(dicTransf)
return 'Everything Okay'
def NewComputationTransf(DataRows1, DataRows2, DataRows3, DataRows4, DataRows5, quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, feature, count, flagInf):
corrMatrix1 = DataRows1.corr()
corrMatrix1 = corrMatrix1.abs()
corrMatrix2 = DataRows2.corr()
corrMatrix2 = corrMatrix2.abs()
corrMatrix3 = DataRows3.corr()
corrMatrix3 = corrMatrix3.abs()
corrMatrix4 = DataRows4.corr()
corrMatrix4 = corrMatrix4.abs()
corrMatrix5 = DataRows5.corr()
corrMatrix5 = corrMatrix5.abs()
corrMatrix1 = corrMatrix1.loc[[feature]]
corrMatrix2 = corrMatrix2.loc[[feature]]
corrMatrix3 = corrMatrix3.loc[[feature]]
corrMatrix4 = corrMatrix4.loc[[feature]]
corrMatrix5 = corrMatrix5.loc[[feature]]
DataRows1 = DataRows1.reset_index(drop=True)
DataRows2 = DataRows2.reset_index(drop=True)
DataRows3 = DataRows3.reset_index(drop=True)
DataRows4 = DataRows4.reset_index(drop=True)
DataRows5 = DataRows5.reset_index(drop=True)
targetRows1 = [yData[i] for i in quadrant1]
targetRows2 = [yData[i] for i in quadrant2]
targetRows3 = [yData[i] for i in quadrant3]
targetRows4 = [yData[i] for i in quadrant4]
targetRows5 = [yData[i] for i in quadrant5]
targetRows1Arr = np.array(targetRows1)
targetRows2Arr = np.array(targetRows2)
targetRows3Arr = np.array(targetRows3)
targetRows4Arr = np.array(targetRows4)
targetRows5Arr = np.array(targetRows5)
uniqueTarget1 = unique(targetRows1)
uniqueTarget2 = unique(targetRows2)
uniqueTarget3 = unique(targetRows3)
uniqueTarget4 = unique(targetRows4)
uniqueTarget5 = unique(targetRows5)
if (len(targetRows1Arr) > 0):
onehotEncoder1 = OneHotEncoder(sparse=False)
targetRows1Arr = targetRows1Arr.reshape(len(targetRows1Arr), 1)
onehotEncoder1 = onehotEncoder1.fit_transform(targetRows1Arr)
hotEncoderDF1 = pd.DataFrame(onehotEncoder1)
concatDF1 = pd.concat([DataRows1, hotEncoderDF1], axis=1)
corrMatrixComb1 = concatDF1.corr()
corrMatrixComb1 = corrMatrixComb1.abs()
corrMatrixComb1 = corrMatrixComb1.iloc[:,-len(uniqueTarget1):]
DataRows1 = DataRows1.replace([np.inf, -np.inf], np.nan)
DataRows1 = DataRows1.fillna(0)
X1 = add_constant(DataRows1)
X1 = X1.replace([np.inf, -np.inf], np.nan)
X1 = X1.fillna(0)
VIF1 = pd.Series([variance_inflation_factor(X1.values, i)
for i in range(X1.shape[1])],
index=X1.columns)
if (flagInf == False):
VIF1 = VIF1.replace([np.inf, -np.inf], np.nan)
VIF1 = VIF1.fillna(0)
VIF1 = VIF1.loc[[feature]]
else:
VIF1 = pd.Series()
if ((len(targetRows1Arr) > 2) and (flagInf == False)):
MI1 = mutual_info_classif(DataRows1, targetRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI1List = MI1.tolist()
MI1List = MI1List[count]
else:
MI1List = []
else:
corrMatrixComb1 = pd.DataFrame()
VIF1 = pd.Series()
MI1List = []
if (len(targetRows2Arr) > 0):
onehotEncoder2 = OneHotEncoder(sparse=False)
targetRows2Arr = targetRows2Arr.reshape(len(targetRows2Arr), 1)
onehotEncoder2 = onehotEncoder2.fit_transform(targetRows2Arr)
hotEncoderDF2 = pd.DataFrame(onehotEncoder2)
concatDF2 = pd.concat([DataRows2, hotEncoderDF2], axis=1)
corrMatrixComb2 = concatDF2.corr()
corrMatrixComb2 = corrMatrixComb2.abs()
corrMatrixComb2 = corrMatrixComb2.iloc[:,-len(uniqueTarget2):]
DataRows2 = DataRows2.replace([np.inf, -np.inf], np.nan)
DataRows2 = DataRows2.fillna(0)
X2 = add_constant(DataRows2)
X2 = X2.replace([np.inf, -np.inf], np.nan)
X2 = X2.fillna(0)
VIF2 = pd.Series([variance_inflation_factor(X2.values, i)
for i in range(X2.shape[1])],
index=X2.columns)
if (flagInf == False):
VIF2 = VIF2.replace([np.inf, -np.inf], np.nan)
VIF2 = VIF2.fillna(0)
VIF2 = VIF2.loc[[feature]]
else:
VIF2 = pd.Series()
if ((len(targetRows2Arr) > 2) and (flagInf == False)):
MI2 = mutual_info_classif(DataRows2, targetRows2Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI2List = MI2.tolist()
MI2List = MI2List[count]
else:
MI2List = []
else:
corrMatrixComb2 = pd.DataFrame()
VIF2 = pd.Series()
MI2List = []
if (len(targetRows3Arr) > 0):
onehotEncoder3 = OneHotEncoder(sparse=False)
targetRows3Arr = targetRows3Arr.reshape(len(targetRows3Arr), 1)
onehotEncoder3 = onehotEncoder3.fit_transform(targetRows3Arr)
hotEncoderDF3 = pd.DataFrame(onehotEncoder3)
concatDF3 = pd.concat([DataRows3, hotEncoderDF3], axis=1)
corrMatrixComb3 = concatDF3.corr()
corrMatrixComb3 = corrMatrixComb3.abs()
corrMatrixComb3 = corrMatrixComb3.iloc[:,-len(uniqueTarget3):]
DataRows3 = DataRows3.replace([np.inf, -np.inf], np.nan)
DataRows3 = DataRows3.fillna(0)
X3 = add_constant(DataRows3)
X3 = X3.replace([np.inf, -np.inf], np.nan)
X3 = X3.fillna(0)
if (flagInf == False):
VIF3 = pd.Series([variance_inflation_factor(X3.values, i)
for i in range(X3.shape[1])],
index=X3.columns)
VIF3 = VIF3.replace([np.inf, -np.inf], np.nan)
VIF3 = VIF3.fillna(0)
VIF3 = VIF3.loc[[feature]]
else:
VIF3 = pd.Series()
if ((len(targetRows3Arr) > 2) and (flagInf == False)):
MI3 = mutual_info_classif(DataRows3, targetRows3Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI3List = MI3.tolist()
MI3List = MI3List[count]
else:
MI3List = []
else:
corrMatrixComb3 = pd.DataFrame()
VIF3 = pd.Series()
MI3List = []
if (len(targetRows4Arr) > 0):
onehotEncoder4 = OneHotEncoder(sparse=False)
targetRows4Arr = targetRows4Arr.reshape(len(targetRows4Arr), 1)
onehotEncoder4 = onehotEncoder4.fit_transform(targetRows4Arr)
hotEncoderDF4 = pd.DataFrame(onehotEncoder4)
concatDF4 = pd.concat([DataRows4, hotEncoderDF4], axis=1)
corrMatrixComb4 = concatDF4.corr()
corrMatrixComb4 = corrMatrixComb4.abs()
corrMatrixComb4 = corrMatrixComb4.iloc[:,-len(uniqueTarget4):]
DataRows4 = DataRows4.replace([np.inf, -np.inf], np.nan)
DataRows4 = DataRows4.fillna(0)
X4 = add_constant(DataRows4)
X4 = X4.replace([np.inf, -np.inf], np.nan)
X4 = X4.fillna(0)
if (flagInf == False):
VIF4 = pd.Series([variance_inflation_factor(X4.values, i)
for i in range(X4.shape[1])],
index=X4.columns)
VIF4 = VIF4.replace([np.inf, -np.inf], np.nan)
VIF4 = VIF4.fillna(0)
VIF4 = VIF4.loc[[feature]]
else:
VIF4 = pd.Series()
if ((len(targetRows4Arr) > 2) and (flagInf == False)):
MI4 = mutual_info_classif(DataRows4, targetRows4Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI4List = MI4.tolist()
MI4List = MI4List[count]
else:
MI4List = []
else:
corrMatrixComb4 = pd.DataFrame()
VIF4 = pd.Series()
MI4List = []
if (len(targetRows5Arr) > 0):
onehotEncoder5 = OneHotEncoder(sparse=False)
targetRows5Arr = targetRows5Arr.reshape(len(targetRows5Arr), 1)
onehotEncoder5 = onehotEncoder5.fit_transform(targetRows5Arr)
hotEncoderDF5 = pd.DataFrame(onehotEncoder5)
concatDF5 = pd.concat([DataRows5, hotEncoderDF5], axis=1)
corrMatrixComb5 = concatDF5.corr()
corrMatrixComb5 = corrMatrixComb5.abs()
corrMatrixComb5 = corrMatrixComb5.iloc[:,-len(uniqueTarget5):]
DataRows5 = DataRows5.replace([np.inf, -np.inf], np.nan)
DataRows5 = DataRows5.fillna(0)
X5 = add_constant(DataRows5)
X5 = X5.replace([np.inf, -np.inf], np.nan)
X5 = X5.fillna(0)
if (flagInf == False):
VIF5 = pd.Series([variance_inflation_factor(X5.values, i)
for i in range(X5.shape[1])],
index=X5.columns)
VIF5 = VIF5.replace([np.inf, -np.inf], np.nan)
VIF5 = VIF5.fillna(0)
VIF5 = VIF5.loc[[feature]]
else:
VIF5 = pd.Series()
if ((len(targetRows5Arr) > 2) and (flagInf == False)):
MI5 = mutual_info_classif(DataRows5, targetRows5Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI5List = MI5.tolist()
MI5List = MI5List[count]
else:
MI5List = []
else:
corrMatrixComb5 = pd.DataFrame()
VIF5 = pd.Series()
MI5List = []
if(corrMatrixComb1.empty):
corrMatrixComb1 = pd.DataFrame()
else:
corrMatrixComb1 = corrMatrixComb1.loc[[feature]]
if(corrMatrixComb2.empty):
corrMatrixComb2 = pd.DataFrame()
else:
corrMatrixComb2 = corrMatrixComb2.loc[[feature]]
if(corrMatrixComb3.empty):
corrMatrixComb3 = pd.DataFrame()
else:
corrMatrixComb3 = corrMatrixComb3.loc[[feature]]
if(corrMatrixComb4.empty):
corrMatrixComb4 = pd.DataFrame()
else:
corrMatrixComb4 = corrMatrixComb4.loc[[feature]]
if(corrMatrixComb5.empty):
corrMatrixComb5 = pd.DataFrame()
else:
corrMatrixComb5 = corrMatrixComb5.loc[[feature]]
targetRows1ArrDF = pd.DataFrame(targetRows1Arr)
targetRows2ArrDF = pd.DataFrame(targetRows2Arr)
targetRows3ArrDF = pd.DataFrame(targetRows3Arr)
targetRows4ArrDF = pd.DataFrame(targetRows4Arr)
targetRows5ArrDF = pd.DataFrame(targetRows5Arr)
concatAllDF1 = pd.concat([DataRows1, targetRows1ArrDF], axis=1)
concatAllDF2 = pd.concat([DataRows2, targetRows2ArrDF], axis=1)
concatAllDF3 = pd.concat([DataRows3, targetRows3ArrDF], axis=1)
concatAllDF4 = pd.concat([DataRows4, targetRows4ArrDF], axis=1)
concatAllDF5 = pd.concat([DataRows5, targetRows5ArrDF], axis=1)
corrMatrixCombTotal1 = concatAllDF1.corr()
corrMatrixCombTotal1 = corrMatrixCombTotal1.abs()
corrMatrixCombTotal2 = concatAllDF2.corr()
corrMatrixCombTotal2 = corrMatrixCombTotal2.abs()
corrMatrixCombTotal3 = concatAllDF3.corr()
corrMatrixCombTotal3 = corrMatrixCombTotal3.abs()
corrMatrixCombTotal4 = concatAllDF4.corr()
corrMatrixCombTotal4 = corrMatrixCombTotal4.abs()
corrMatrixCombTotal5 = concatAllDF5.corr()
corrMatrixCombTotal5 = corrMatrixCombTotal5.abs()
corrMatrixCombTotal1 = corrMatrixCombTotal1.loc[[feature]]
corrMatrixCombTotal1 = corrMatrixCombTotal1.iloc[:,-1]
corrMatrixCombTotal2 = corrMatrixCombTotal2.loc[[feature]]
corrMatrixCombTotal2 = corrMatrixCombTotal2.iloc[:,-1]
corrMatrixCombTotal3 = corrMatrixCombTotal3.loc[[feature]]
corrMatrixCombTotal3 = corrMatrixCombTotal3.iloc[:,-1]
corrMatrixCombTotal4 = corrMatrixCombTotal4.loc[[feature]]
corrMatrixCombTotal4 = corrMatrixCombTotal4.iloc[:,-1]
corrMatrixCombTotal5 = corrMatrixCombTotal5.loc[[feature]]
corrMatrixCombTotal5 = corrMatrixCombTotal5.iloc[:,-1]
corrMatrixCombTotal1 = pd.concat([corrMatrixCombTotal1.tail(1)])
corrMatrixCombTotal2 = pd.concat([corrMatrixCombTotal2.tail(1)])
corrMatrixCombTotal3 = pd.concat([corrMatrixCombTotal3.tail(1)])
corrMatrixCombTotal4 = pd.concat([corrMatrixCombTotal4.tail(1)])
corrMatrixCombTotal5 = pd.concat([corrMatrixCombTotal5.tail(1)])
packCorrLoc = []
packCorrLoc.append(corrMatrix1.to_json())
packCorrLoc.append(corrMatrix2.to_json())
packCorrLoc.append(corrMatrix3.to_json())
packCorrLoc.append(corrMatrix4.to_json())
packCorrLoc.append(corrMatrix5.to_json())
packCorrLoc.append(corrMatrixComb1.to_json())
packCorrLoc.append(corrMatrixComb2.to_json())
packCorrLoc.append(corrMatrixComb3.to_json())
packCorrLoc.append(corrMatrixComb4.to_json())
packCorrLoc.append(corrMatrixComb5.to_json())
packCorrLoc.append(corrMatrixCombTotal1.to_json())
packCorrLoc.append(corrMatrixCombTotal2.to_json())
packCorrLoc.append(corrMatrixCombTotal3.to_json())
packCorrLoc.append(corrMatrixCombTotal4.to_json())
packCorrLoc.append(corrMatrixCombTotal5.to_json())
packCorrLoc.append(VIF1.to_json())
packCorrLoc.append(VIF2.to_json())
packCorrLoc.append(VIF3.to_json())
packCorrLoc.append(VIF4.to_json())
packCorrLoc.append(VIF5.to_json())
packCorrLoc.append(json.dumps(MI1List))
packCorrLoc.append(json.dumps(MI2List))
packCorrLoc.append(json.dumps(MI3List))
packCorrLoc.append(json.dumps(MI4List))
packCorrLoc.append(json.dumps(MI5List))
return packCorrLoc
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/thresholdDataSpace', methods=["GET", "POST"])
def Seperation():
thresholds = request.get_data().decode('utf8').replace("'", '"')
thresholds = json.loads(thresholds)
thresholdsPos = thresholds['PositiveValue']
thresholdsNeg = thresholds['NegativeValue']
getCorrectPrediction = []
for index, value in enumerate(yPredictProb):
getCorrectPrediction.append(value[yData[index]]*100)
quadrant1 = []
quadrant2 = []
quadrant3 = []
quadrant4 = []
quadrant5 = []
probabilityPredictions = []
for index, value in enumerate(getCorrectPrediction):
if (value > 50 and value > thresholdsPos):
quadrant1.append(index)
elif (value > 50 and value <= thresholdsPos):
quadrant2.append(index)
elif (value <= 50 and value > thresholdsNeg):
quadrant3.append(index)
else:
quadrant4.append(index)
quadrant5.append(index)
probabilityPredictions.append(value)
# Main Features
DataRows1 = XData.iloc[quadrant1, :]
DataRows2 = XData.iloc[quadrant2, :]
DataRows3 = XData.iloc[quadrant3, :]
DataRows4 = XData.iloc[quadrant4, :]
DataRows5 = XData.iloc[quadrant5, :]
Transformation(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5)
corrMatrix1 = DataRows1.corr()
corrMatrix1 = corrMatrix1.abs()
corrMatrix2 = DataRows2.corr()
corrMatrix2 = corrMatrix2.abs()
corrMatrix3 = DataRows3.corr()
corrMatrix3 = corrMatrix3.abs()
corrMatrix4 = DataRows4.corr()
corrMatrix4 = corrMatrix4.abs()
corrMatrix5 = DataRows5.corr()
corrMatrix5 = corrMatrix5.abs()
DataRows1 = DataRows1.reset_index(drop=True)
DataRows2 = DataRows2.reset_index(drop=True)
DataRows3 = DataRows3.reset_index(drop=True)
DataRows4 = DataRows4.reset_index(drop=True)
DataRows5 = DataRows5.reset_index(drop=True)
targetRows1 = [yData[i] for i in quadrant1]
targetRows2 = [yData[i] for i in quadrant2]
targetRows3 = [yData[i] for i in quadrant3]
targetRows4 = [yData[i] for i in quadrant4]
targetRows5 = [yData[i] for i in quadrant5]
targetRows1Arr = np.array(targetRows1)
targetRows2Arr = np.array(targetRows2)
targetRows3Arr = np.array(targetRows3)
targetRows4Arr = np.array(targetRows4)
targetRows5Arr = np.array(targetRows5)
uniqueTarget1 = unique(targetRows1)
uniqueTarget2 = unique(targetRows2)
uniqueTarget3 = unique(targetRows3)
uniqueTarget4 = unique(targetRows4)
uniqueTarget5 = unique(targetRows5)
if (len(targetRows1Arr) > 0):
onehotEncoder1 = OneHotEncoder(sparse=False)
targetRows1Arr = targetRows1Arr.reshape(len(targetRows1Arr), 1)
onehotEncoder1 = onehotEncoder1.fit_transform(targetRows1Arr)
hotEncoderDF1 = pd.DataFrame(onehotEncoder1)
concatDF1 = pd.concat([DataRows1, hotEncoderDF1], axis=1)
corrMatrixComb1 = concatDF1.corr()
corrMatrixComb1 = corrMatrixComb1.abs()
corrMatrixComb1 = corrMatrixComb1.iloc[:,-len(uniqueTarget1):]
DataRows1 = DataRows1.replace([np.inf, -np.inf], np.nan)
DataRows1 = DataRows1.fillna(0)
X1 = add_constant(DataRows1)
X1 = X1.replace([np.inf, -np.inf], np.nan)
X1 = X1.fillna(0)
VIF1 = pd.Series([variance_inflation_factor(X1.values, i)
for i in range(X1.shape[1])],
index=X1.columns)
VIF1 = VIF1.replace([np.inf, -np.inf], np.nan)
VIF1 = VIF1.fillna(0)
if (len(targetRows1Arr) > 2):
MI1 = mutual_info_classif(DataRows1, targetRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI1List = MI1.tolist()
else:
MI1List = []
else:
corrMatrixComb1 = pd.DataFrame()
VIF1 = pd.Series()
MI1List = []
if (len(targetRows2Arr) > 0):
onehotEncoder2 = OneHotEncoder(sparse=False)
targetRows2Arr = targetRows2Arr.reshape(len(targetRows2Arr), 1)
onehotEncoder2 = onehotEncoder2.fit_transform(targetRows2Arr)
hotEncoderDF2 = pd.DataFrame(onehotEncoder2)
concatDF2 = pd.concat([DataRows2, hotEncoderDF2], axis=1)
corrMatrixComb2 = concatDF2.corr()
corrMatrixComb2 = corrMatrixComb2.abs()
corrMatrixComb2 = corrMatrixComb2.iloc[:,-len(uniqueTarget2):]
DataRows2 = DataRows2.replace([np.inf, -np.inf], np.nan)
DataRows2 = DataRows2.fillna(0)
X2 = add_constant(DataRows2)
X2 = X2.replace([np.inf, -np.inf], np.nan)
X2 = X2.fillna(0)
VIF2 = pd.Series([variance_inflation_factor(X2.values, i)
for i in range(X2.shape[1])],
index=X2.columns)
VIF2 = VIF2.replace([np.inf, -np.inf], np.nan)
VIF2 = VIF2.fillna(0)
if (len(targetRows2Arr) > 2):
MI2 = mutual_info_classif(DataRows2, targetRows2Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI2List = MI2.tolist()
else:
MI2List = []
else:
corrMatrixComb2 = pd.DataFrame()
VIF2 = pd.Series()
MI2List = []
if (len(targetRows3Arr) > 0):
onehotEncoder3 = OneHotEncoder(sparse=False)
targetRows3Arr = targetRows3Arr.reshape(len(targetRows3Arr), 1)
onehotEncoder3 = onehotEncoder3.fit_transform(targetRows3Arr)
hotEncoderDF3 = pd.DataFrame(onehotEncoder3)
concatDF3 = pd.concat([DataRows3, hotEncoderDF3], axis=1)
corrMatrixComb3 = concatDF3.corr()
corrMatrixComb3 = corrMatrixComb3.abs()
corrMatrixComb3 = corrMatrixComb3.iloc[:,-len(uniqueTarget3):]
DataRows3 = DataRows3.replace([np.inf, -np.inf], np.nan)
DataRows3 = DataRows3.fillna(0)
X3 = add_constant(DataRows3)
X3 = X3.replace([np.inf, -np.inf], np.nan)
X3 = X3.fillna(0)
VIF3 = pd.Series([variance_inflation_factor(X3.values, i)
for i in range(X3.shape[1])],
index=X3.columns)
VIF3 = VIF3.replace([np.inf, -np.inf], np.nan)
VIF3 = VIF3.fillna(0)
if (len(targetRows3Arr) > 2):
MI3 = mutual_info_classif(DataRows3, targetRows3Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI3List = MI3.tolist()
else:
MI3List = []
else:
corrMatrixComb3 = pd.DataFrame()
VIF3 = pd.Series()
MI3List = []
if (len(targetRows4Arr) > 0):
onehotEncoder4 = OneHotEncoder(sparse=False)
targetRows4Arr = targetRows4Arr.reshape(len(targetRows4Arr), 1)
onehotEncoder4 = onehotEncoder4.fit_transform(targetRows4Arr)
hotEncoderDF4 = pd.DataFrame(onehotEncoder4)
concatDF4 = pd.concat([DataRows4, hotEncoderDF4], axis=1)
corrMatrixComb4 = concatDF4.corr()
corrMatrixComb4 = corrMatrixComb4.abs()
corrMatrixComb4 = corrMatrixComb4.iloc[:,-len(uniqueTarget4):]
DataRows4 = DataRows4.replace([np.inf, -np.inf], np.nan)
DataRows4 = DataRows4.fillna(0)
X4 = add_constant(DataRows4)
X4 = X4.replace([np.inf, -np.inf], np.nan)
X4 = X4.fillna(0)
VIF4 = pd.Series([variance_inflation_factor(X4.values, i)
for i in range(X4.shape[1])],
index=X4.columns)
VIF4 = VIF4.replace([np.inf, -np.inf], np.nan)
VIF4 = VIF4.fillna(0)
if (len(targetRows4Arr) > 2):
MI4 = mutual_info_classif(DataRows4, targetRows4Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI4List = MI4.tolist()
else:
MI4List = []
else:
corrMatrixComb4 = pd.DataFrame()
VIF4 = pd.Series()
MI4List = []
if (len(targetRows5Arr) > 0):
onehotEncoder5 = OneHotEncoder(sparse=False)
targetRows5Arr = targetRows5Arr.reshape(len(targetRows5Arr), 1)
onehotEncoder5 = onehotEncoder5.fit_transform(targetRows5Arr)
hotEncoderDF5 = pd.DataFrame(onehotEncoder5)
concatDF5 = pd.concat([DataRows5, hotEncoderDF5], axis=1)
corrMatrixComb5 = concatDF5.corr()
corrMatrixComb5 = corrMatrixComb5.abs()
corrMatrixComb5 = corrMatrixComb5.iloc[:,-len(uniqueTarget5):]
DataRows5 = DataRows5.replace([np.inf, -np.inf], np.nan)
DataRows5 = DataRows5.fillna(0)
X5 = add_constant(DataRows5)
X5 = X5.replace([np.inf, -np.inf], np.nan)
X5 = X5.fillna(0)
VIF5 = pd.Series([variance_inflation_factor(X5.values, i)
for i in range(X5.shape[1])],
index=X5.columns)
VIF5 = VIF5.replace([np.inf, -np.inf], np.nan)
VIF5 = VIF5.fillna(0)
if (len(targetRows5Arr) > 2):
MI5 = mutual_info_classif(DataRows5, targetRows5Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI5List = MI5.tolist()
else:
MI5List = []
else:
corrMatrixComb5 = pd.DataFrame()
VIF5 = pd.Series()
MI5List = []
targetRows1ArrDF = pd.DataFrame(targetRows1Arr)
targetRows2ArrDF = pd.DataFrame(targetRows2Arr)
targetRows3ArrDF = pd.DataFrame(targetRows3Arr)
targetRows4ArrDF = pd.DataFrame(targetRows4Arr)
targetRows5ArrDF = pd.DataFrame(targetRows5Arr)
concatAllDF1 = pd.concat([DataRows1, targetRows1ArrDF], axis=1)
concatAllDF2 = pd.concat([DataRows2, targetRows2ArrDF], axis=1)
concatAllDF3 = pd.concat([DataRows3, targetRows3ArrDF], axis=1)
concatAllDF4 = pd.concat([DataRows4, targetRows4ArrDF], axis=1)
concatAllDF5 = pd.concat([DataRows5, targetRows5ArrDF], axis=1)
corrMatrixCombTotal1 = concatAllDF1.corr()
corrMatrixCombTotal1 = corrMatrixCombTotal1.abs()
corrMatrixCombTotal2 = concatAllDF2.corr()
corrMatrixCombTotal2 = corrMatrixCombTotal2.abs()
corrMatrixCombTotal3 = concatAllDF3.corr()
corrMatrixCombTotal3 = corrMatrixCombTotal3.abs()
corrMatrixCombTotal4 = concatAllDF4.corr()
corrMatrixCombTotal4 = corrMatrixCombTotal4.abs()
corrMatrixCombTotal5 = concatAllDF5.corr()
corrMatrixCombTotal5 = corrMatrixCombTotal5.abs()
corrMatrixCombTotal1 = pd.concat([corrMatrixCombTotal1.tail(1)])
corrMatrixCombTotal2 = pd.concat([corrMatrixCombTotal2.tail(1)])
corrMatrixCombTotal3 = pd.concat([corrMatrixCombTotal3.tail(1)])
corrMatrixCombTotal4 = pd.concat([corrMatrixCombTotal4.tail(1)])
corrMatrixCombTotal5 = pd.concat([corrMatrixCombTotal5.tail(1)])
global packCorr
packCorr = []
packCorr.append(json.dumps(columnsNewGen))
packCorr.append(json.dumps(target_names))
packCorr.append(json.dumps(probabilityPredictions))
packCorr.append(corrMatrix1.to_json())
packCorr.append(corrMatrix2.to_json())
packCorr.append(corrMatrix3.to_json())
packCorr.append(corrMatrix4.to_json())
packCorr.append(corrMatrix5.to_json())
packCorr.append(corrMatrixComb1.to_json())
packCorr.append(corrMatrixComb2.to_json())
packCorr.append(corrMatrixComb3.to_json())
packCorr.append(corrMatrixComb4.to_json())
packCorr.append(corrMatrixComb5.to_json())
packCorr.append(corrMatrixCombTotal1.to_json())
packCorr.append(corrMatrixCombTotal2.to_json())
packCorr.append(corrMatrixCombTotal3.to_json())
packCorr.append(corrMatrixCombTotal4.to_json())
packCorr.append(corrMatrixCombTotal5.to_json())
packCorr.append(json.dumps(uniqueTarget1))
packCorr.append(json.dumps(uniqueTarget2))
packCorr.append(json.dumps(uniqueTarget3))
packCorr.append(json.dumps(uniqueTarget4))
packCorr.append(json.dumps(uniqueTarget5))
packCorr.append(VIF1.to_json())
packCorr.append(VIF2.to_json())
packCorr.append(VIF3.to_json())
packCorr.append(VIF4.to_json())
packCorr.append(VIF5.to_json())
packCorr.append(json.dumps(MI1List))
packCorr.append(json.dumps(MI2List))
packCorr.append(json.dumps(MI3List))
packCorr.append(json.dumps(MI4List))
packCorr.append(json.dumps(MI5List))
packCorr.append(list(tracker))
packCorr.append(list(XData.columns.values.tolist()))
packCorr.append(json.dumps(columnsNames))
return 'Everything Okay'
@app.route('/data/returnCorrelationsTransformed', methods=["GET", "POST"])
def SendCorrelTransformed():
global packCorrTransformed
response = {
'correlResulTranformed': packCorrTransformed
}
return jsonify(response)
@app.route('/data/returnCorrelations', methods=["GET", "POST"])
def SendCorrel():
global packCorr
response = {
'correlResul': packCorr
}
return jsonify(response)
def unique(list1):
# intilize a null list
unique_list = []
# traverse for all elements
for x in list1:
# check if exists in unique_list or not
if x not in unique_list:
unique_list.append(x)
return unique_list
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/AddRemFun', methods=["GET", "POST"])
def ManipulFeat():
featureProcess = request.get_data().decode('utf8').replace("'", '"')
featureProcess = json.loads(featureProcess)
featureProcessExtract = featureProcess['featureAddRem']
executeModel(featureProcessExtract, 1, '')
return 'Okay'
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/AddRemGenFun', methods=["GET", "POST"])
def ManipulFeatGen():
featureProcess = request.get_data().decode('utf8').replace("'", '"')
featureProcess = json.loads(featureProcess)
featureProcessExtract = featureProcess['featureAddRemGen']
executeModel(featureProcessExtract, 2, '')
return 'Okay'
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/compareFun', methods=["GET", "POST"])
def CompareFunPy():
global featureCompareData
global columnsKeep
global XDataGen
global IDsToCompare
global columnsNewGen
retrieveComparison = request.get_data().decode('utf8').replace("'", '"')
retrieveComparison = json.loads(retrieveComparison)
compareMode = retrieveComparison['compareNumber']
IDsToCompare = retrieveComparison['getIDs']
XDataGen = XDataStored.copy()
columns = XData.columns.values.tolist()
#columnsOriganl = XDataNoRemoval.columns.values.tolist()
columnsKeep = []
columnsKeepNonOrig = []
columnsKeepID = []
for indx, col in enumerate(columns):
if indx in IDsToCompare:
columnsKeepNonOrig.append(col)
columnExtracted = re.findall('\d+', col)
columnsKeep.append(columnsNewGen[int(columnExtracted[0]) - 1])
columnsKeepID.append(str(col))
if (compareMode == 1):
XDataGen = XData[columnsKeepNonOrig]
feat1 = XDataGen.iloc[:,0]
feat2 = XDataGen.iloc[:,1]
XDataGen[columnsKeepID[0]+'+'+columnsKeepID[1]] = feat1 + feat2
XDataGen['|'+columnsKeepID[0]+'-'+columnsKeepID[1]+'|'] = abs(feat1 - feat2)
XDataGen[columnsKeepID[0]+'x'+columnsKeepID[1]] = feat1 * feat2
XDataGen[columnsKeepID[0]+'/'+columnsKeepID[1]] = feat1 / feat2
XDataGen[columnsKeepID[1]+'/'+columnsKeepID[0]] = feat2 / feat1
columnsKeep.append(columnsKeepID[0]+'+'+columnsKeepID[1])
columnsKeep.append('|'+columnsKeepID[0]+'-'+columnsKeepID[1]+'|')
columnsKeep.append(columnsKeepID[0]+'x'+columnsKeepID[1])
columnsKeep.append(columnsKeepID[0]+'/'+columnsKeepID[1])
columnsKeep.append(columnsKeepID[1]+'/'+columnsKeepID[0])
elif (compareMode == 2):
XDataGen = XData[columnsKeepNonOrig]
feat1 = XDataGen.iloc[:,0]
feat2 = XDataGen.iloc[:,1]
feat3 = XDataGen.iloc[:,2]
XDataGen[columnsKeepID[0]+'+'+columnsKeepID[1]] = feat1 + feat2
XDataGen[columnsKeepID[1]+'+'+columnsKeepID[2]] = feat2 + feat3
XDataGen[columnsKeepID[0]+'+'+columnsKeepID[2]] = feat1 + feat3
XDataGen[columnsKeepID[0]+'+'+columnsKeepID[1]+'+'+columnsKeepID[2]] = feat1 + feat2 + feat3
XDataGen['|'+columnsKeepID[0]+'-'+columnsKeepID[1]+'|'] = abs(feat1 - feat2)
XDataGen['|'+columnsKeepID[1]+'-'+columnsKeepID[2]+'|'] = abs(feat2 - feat3)
XDataGen['|'+columnsKeepID[0]+'-'+columnsKeepID[2]+'|'] = abs(feat1 - feat3)
XDataGen['|'+columnsKeepID[0]+'-'+columnsKeepID[1]+'-'+columnsKeepID[2]+'|'] = abs(feat1 - feat2 - feat3)
XDataGen[columnsKeepID[0]+'x'+columnsKeepID[1]] = feat1 * feat2
XDataGen[columnsKeepID[1]+'x'+columnsKeepID[2]] = feat2 * feat3
XDataGen[columnsKeepID[0]+'x'+columnsKeepID[2]] = feat1 * feat3
XDataGen[columnsKeepID[0]+'x'+columnsKeepID[1]+'x'+columnsKeepID[2]] = feat1 * feat2 * feat3
XDataGen[columnsKeepID[0]+'/'+columnsKeepID[1]] = feat1 / feat2
XDataGen[columnsKeepID[1]+'/'+columnsKeepID[0]] = feat2 / feat1
XDataGen[columnsKeepID[1]+'/'+columnsKeepID[2]] = feat2 / feat3
XDataGen[columnsKeepID[2]+'/'+columnsKeepID[1]] = feat3 / feat2
XDataGen[columnsKeepID[0]+'/'+columnsKeepID[2]] = feat1 / feat3
XDataGen[columnsKeepID[2]+'/'+columnsKeepID[0]] = feat3 / feat1
XDataGen[columnsKeepID[0]+'/'+columnsKeepID[1]+'/'+columnsKeepID[2]] = feat1 / feat2 / feat3
XDataGen[columnsKeepID[0]+'/'+columnsKeepID[2]+'/'+columnsKeepID[1]] = feat1 / feat3 / feat2
XDataGen[columnsKeepID[1]+'/'+columnsKeepID[2]+'/'+columnsKeepID[0]] = feat2 / feat3 / feat1
XDataGen[columnsKeepID[1]+'/'+columnsKeepID[0]+'/'+columnsKeepID[2]] = feat2 / feat1 / feat3
XDataGen[columnsKeepID[2]+'/'+columnsKeepID[0]+'/'+columnsKeepID[1]] = feat3 / feat1 / feat2
XDataGen[columnsKeepID[2]+'/'+columnsKeepID[1]+'/'+columnsKeepID[0]] = feat3 / feat2 / feat1
columnsKeep.append(columnsKeepID[0]+'+'+columnsKeepID[1])
columnsKeep.append(columnsKeepID[1]+'+'+columnsKeepID[2])
columnsKeep.append(columnsKeepID[0]+'+'+columnsKeepID[2])
columnsKeep.append(columnsKeepID[0]+'+'+columnsKeepID[1]+'+'+columnsKeepID[2])
columnsKeep.append('|'+columnsKeepID[0]+'-'+columnsKeepID[1]+'|')
columnsKeep.append('|'+columnsKeepID[1]+'-'+columnsKeepID[2]+'|')
columnsKeep.append('|'+columnsKeepID[0]+'-'+columnsKeepID[2]+'|')
columnsKeep.append('|'+columnsKeepID[0]+'-'+columnsKeepID[1]+'-'+columnsKeepID[2]+'|')
columnsKeep.append(columnsKeepID[0]+'x'+columnsKeepID[1])
columnsKeep.append(columnsKeepID[1]+'x'+columnsKeepID[2])
columnsKeep.append(columnsKeepID[0]+'x'+columnsKeepID[2])
columnsKeep.append(columnsKeepID[0]+'x'+columnsKeepID[1]+'x'+columnsKeepID[2])
columnsKeep.append(columnsKeepID[0]+'/'+columnsKeepID[1])
columnsKeep.append(columnsKeepID[1]+'/'+columnsKeepID[0])
columnsKeep.append(columnsKeepID[1]+'/'+columnsKeepID[2])
columnsKeep.append(columnsKeepID[2]+'/'+columnsKeepID[1])
columnsKeep.append(columnsKeepID[0]+'/'+columnsKeepID[2])
columnsKeep.append(columnsKeepID[2]+'/'+columnsKeepID[0])
columnsKeep.append(columnsKeepID[0]+'/'+columnsKeepID[1]+'/'+columnsKeepID[2])
columnsKeep.append(columnsKeepID[0]+'/'+columnsKeepID[2]+'/'+columnsKeepID[1])
columnsKeep.append(columnsKeepID[1]+'/'+columnsKeepID[2]+'/'+columnsKeepID[0])
columnsKeep.append(columnsKeepID[1]+'/'+columnsKeepID[0]+'/'+columnsKeepID[2])
columnsKeep.append(columnsKeepID[2]+'/'+columnsKeepID[0]+'/'+columnsKeepID[1])
columnsKeep.append(columnsKeepID[2]+'/'+columnsKeepID[1]+'/'+columnsKeepID[0])
else:
pass
#print(XDataGen)
XDataGen = XDataGen.replace([np.inf, -np.inf], np.nan)
XDataGen = XDataGen.fillna(0)
featureCompareData = estimatorFeatureSelection(XDataGen, estimator)
return 'Okay'
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/storeGeneratedFeatures', methods=["GET", "POST"])
def storeGeneratedFeat():
print('Generate')
executeModel([], 3, '')
return 'Okay'
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/transformation', methods=["GET", "POST"])
def transformFeatures():
print('Transform')
retrieveTransform = request.get_data().decode('utf8').replace("'", '"')
retrieveTransform = json.loads(retrieveTransform)
clickedNodeName = retrieveTransform['nameClicked']
removeNodeID = retrieveTransform['removeNode']
executeModel([removeNodeID[1]], 4, clickedNodeName[0])
return 'Okay'
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/testResults', methods=["GET", "POST"])
def requestTestFun():
global StanceTest
global estimator
global XData
global XDataTest
global XDataExternal
# Feature Selection
XData = XData.drop(['F35','F41','F33','F11','F18','F27','F40','F31','F9','F30','F38','F17','F15','F36','F25','F22', 'F23'], axis=1)
XDataTest = XDataTest.drop(['F35','F41','F33','F11','F18','F27','F40','F31','F9','F30','F38','F17','F15','F36','F25','F22', 'F23'], axis=1)
XDataExternal = XDataExternal.drop(['F35','F41','F33','F11','F18','F27','F40','F31','F9','F30','F38','F17','F15','F36','F25','F22', 'F23'], axis=1)
# Transformation
XData['F26'] = np.power(XData['F26'], 4)
XDataTest['F26'] = np.power(XDataTest['F26'], 4)
XDataExternal['F26'] =
|
np.power(XDataExternal['F26'], 4)
|
numpy.power
|
import cv2
import math
import time
import numpy as np
from . import util
import tensorflow as tf
from .config_reader import config_reader
from scipy.ndimage.filters import gaussian_filter
from tensorflow.keras.models import load_model
import code
import copy
import scipy.ndimage as sn
from PIL import Image
from tqdm import tqdm
from .model_simulated_RGB101 import get_testing_model_resnet101
from .human_seg.human_seg_gt import human_seg_combine_argmax
right_part_idx = [2, 3, 4, 8, 9, 10, 14, 16]
left_part_idx = [5, 6, 7, 11, 12, 13, 15, 17]
human_part = [0,1,2,4,3,6,5,8,7,10,9,12,11,14,13]
human_ori_part = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
seg_num = 15 # current model supports 15 parts only
def recover_flipping_output(oriImg, heatmap_ori_size, paf_ori_size, part_ori_size):
heatmap_ori_size = heatmap_ori_size[:, ::-1, :]
heatmap_flip_size = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
heatmap_flip_size[:,:,left_part_idx] = heatmap_ori_size[:,:,right_part_idx]
heatmap_flip_size[:,:,right_part_idx] = heatmap_ori_size[:,:,left_part_idx]
heatmap_flip_size[:,:,0:2] = heatmap_ori_size[:,:,0:2]
paf_ori_size = paf_ori_size[:, ::-1, :]
paf_flip_size = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
paf_flip_size[:,:,ori_paf_idx] = paf_ori_size[:,:,flip_paf_idx]
paf_flip_size[:,:,x_paf_idx] = paf_flip_size[:,:,x_paf_idx]*-1
part_ori_size = part_ori_size[:, ::-1, :]
part_flip_size = np.zeros((oriImg.shape[0], oriImg.shape[1], 15))
part_flip_size[:,:,human_ori_part] = part_ori_size[:,:,human_part]
return heatmap_flip_size, paf_flip_size, part_flip_size
def recover_flipping_output2(oriImg, part_ori_size):
part_ori_size = part_ori_size[:, ::-1, :]
part_flip_size = np.zeros((oriImg.shape[0], oriImg.shape[1], 15))
part_flip_size[:,:,human_ori_part] = part_ori_size[:,:,human_part]
return part_flip_size
def part_thresholding(seg_argmax):
background = 0.6
head = 0.5
torso = 0.8
rightfoot = 0.55
leftfoot = 0.55
leftthigh = 0.55
rightthigh = 0.55
leftshank = 0.55
rightshank = 0.55
rightupperarm = 0.55
leftupperarm = 0.55
rightforearm = 0.55
leftforearm = 0.55
lefthand = 0.55
righthand = 0.55
part_th = [background, head, torso, leftupperarm ,rightupperarm, leftforearm, rightforearm, lefthand, righthand, leftthigh, rightthigh, leftshank, rightshank, leftfoot, rightfoot]
th_mask = np.zeros(seg_argmax.shape)
for indx in range(15):
part_prediction = (seg_argmax==indx)
part_prediction = part_prediction*part_th[indx]
th_mask += part_prediction
return th_mask
def process (oriImg, flipImg, params, model_params, model):
input_scale = 1.0
multiplier = [x * model_params['boxsize'] / oriImg.shape[0] for x in params['scale_search']]
seg_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 15))
segmap_scale1 = np.zeros((oriImg.shape[0], oriImg.shape[1], seg_num))
segmap_scale2 = np.zeros((oriImg.shape[0], oriImg.shape[1], seg_num))
segmap_scale3 = np.zeros((oriImg.shape[0], oriImg.shape[1], seg_num))
segmap_scale4 = np.zeros((oriImg.shape[0], oriImg.shape[1], seg_num))
segmap_scale5 = np.zeros((oriImg.shape[0], oriImg.shape[1], seg_num))
segmap_scale6 = np.zeros((oriImg.shape[0], oriImg.shape[1], seg_num))
segmap_scale7 = np.zeros((oriImg.shape[0], oriImg.shape[1], seg_num))
segmap_scale8 = np.zeros((oriImg.shape[0], oriImg.shape[1], seg_num))
for m in range(len(multiplier)):
scale = multiplier[m]*input_scale
imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
pad = [ 0,
0,
(imageToTest.shape[0] - model_params['stride']) % model_params['stride'],
(imageToTest.shape[1] - model_params['stride']) % model_params['stride']
]
imageToTest_padded = np.pad(imageToTest, ((0, pad[2]), (0, pad[3]), (0, 0)), mode='constant', constant_values=((0, 0), (0, 0), (0, 0)))
input_img = imageToTest_padded[np.newaxis, ...]
# print( "\tActual size fed into NN: ", input_img.shape)
output_blobs = model.predict(input_img)
seg =
|
np.squeeze(output_blobs[2])
|
numpy.squeeze
|
#!/usr/bin/env python
# coding: utf-8
import numpy as onp
import jax.numpy as np
from jax import vmap
from functools import partial
import time
from jax import jit
from jax import grad
import os
from jax.config import config
import dplex
import iminuit
import matplotlib.pyplot as plt
config.update("jax_enable_x64", True)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def invm_plus(Pb,Pc):
Pbc = Pb + Pc
_Pbc = Pbc * np.array([-1,-1,-1,1])
return np.sum(Pbc * _Pbc,axis=1)
def invm(Pbc):
_Pbc = Pbc * np.array([-1,-1,-1,1])
return np.sum(Pbc * _Pbc,axis=1)
def mcnpz(begin,end):
amp = onp.load("data/mctruth.npz")
# phif223 = amp['phif223'][begin:end,0:2]
# phif222 = amp['phif222'][begin:end,0:2]
# phif221 = amp['phif221'][begin:end,0:2]
# phif201 = amp['phif201'][begin:end,0:2]
# data_phif2 = np.asarray([phif201,phif221,phif222,phif223])
phif001 = amp['phif001'][begin:end,0:2]
phif021 = amp['phif021'][begin:end,0:2]
data_phif0 = np.asarray([phif001,phif021])
mom = onp.load("data/mcmom.npz")
Kp = mom['Kp'][begin:end,:]
Km = mom['Km'][begin:end,:]
Pip = mom['Pip'][begin:end,:]
Pim = mom['Pim'][begin:end,:]
data_f = Pip + Pim
data_phi = Kp + Km
data_phi = invm(data_phi)
data_f = invm(data_f)
return data_phif0,data_phi,data_f
def _BW(m_,w_,Sbc):
gamma=np.sqrt(m_*m_*(m_*m_+w_*w_))
k = np.sqrt(2*np.sqrt(2)*m_*np.abs(w_)*gamma/np.pi/np.sqrt(m_*m_+gamma))
l = Sbc.shape[0]
temp = dplex.dconstruct(m_*m_ - Sbc, -m_*w_*np.ones(l))
return dplex.ddivide(k, temp)
def _phase(_theta, _rho):
return dplex.dconstruct(_rho * np.cos(_theta), _rho * np.sin(_theta))
def BW(phim,phiw,fm,fw,phi,f):
a = np.moveaxis(vmap(partial(_BW,Sbc=phi))(phim,phiw),1,0)
b = np.moveaxis(vmap(partial(_BW,Sbc=f))(fm,fw),1,0)
result = dplex.deinsum('ij,ij->ij',a,b)
return result
def phase(_theta,_rho):
result = vmap(_phase)(_theta,_rho)
return result
def MOD(phim,phiw,fm,fw,const,theta,rho,phif,phi,f):
ph = np.moveaxis(phase(theta,rho), 1, 0)
bw = BW(phim,phiw,fm,fw,phi,f)
_phif = dplex.dtomine(np.einsum('ijk,il->ljk',phif,const))
_phif = dplex.deinsum('ijk,i->ijk',_phif,ph)
_phif = dplex.deinsum('ijk,ij->jk',_phif,bw)
return _phif
def alladd(*mods):
l = (mods[0].shape)[1]
sum = onp.zeros(l*2*2).reshape(2,l,2)
for num in mods:
sum += num
# print(sum.shape)
return np.sum(dplex.dabs(sum),axis=1)
def weight(_phim,_phiw,_f0m,_f0w,_const1,_theta,_rho):
const = np.asarray([[_const1],[1.]])
rho = np.asarray([_rho])
theta = np.asarray([_theta])
phim = np.asarray([_phim])
phiw = np.asarray([_phiw])
f0m = np.asarray([_f0m])
f0w = np.asarray([_f0w])
d_phif0 = MOD(phim,phiw,f0m,f0w,const,theta,rho,data_phif0,data_phi,data_f)
m_phif0 = MOD(phim,phiw,f0m,f0w,const,theta,rho,mc_phif0,mc_phi,mc_f)
d_tmp = alladd(d_phif0)
m_tmp = np.average(alladd(m_phif0))
print("weight")
return d_tmp/m_tmp
def likelihood(_phim,_phiw,_f0m,_f0w,_const1,_theta,_rho):
const = np.asarray([[_const1],[1.]])
rho = np.asarray([_rho])
theta = np.asarray([_theta])
phim = np.asarray([_phim])
phiw = np.asarray([_phiw])
f0m = np.asarray([_f0m])
f0w = np.asarray([_f0w])
d_phif0 = MOD(phim,phiw,f0m,f0w,const,theta,rho,data_phif0,data_phi,data_f)
m_phif0 = MOD(phim,phiw,f0m,f0w,const,theta,rho,mc_phif0,mc_phi,mc_f)
d_tmp = alladd(d_phif0)
m_tmp = np.average(alladd(m_phif0))
return -np.sum(wt*(np.log(d_tmp) - np.log(m_tmp)))
phim = 1.02
phiw = 0.01
f0m = 0.5
f0w = 0.4
f2m = 1.
f2w = 1.
const1 = 5.5 # 该参数拟合的好
const2 = 1
rho = 1.
theta = 0.5
vf0m = []
vf0w = []
vconst = []
ef0m = []
ef0w = []
econst = []
vvalue = []
# offset = onp.random.sample(1000)
ranm =
|
onp.random.randint(10,100,size=500)
|
numpy.random.randint
|
# %% [markdown]
# # Radial velocity fitting
# In this example, we will show how to fit radial velocity data using the _orbits_ Python API.
#
# This example replicates [the case study](https://gallery.exoplanet.codes/tutorials/rv/)
# from _[exoplanet](https://docs.exoplanet.codes/en/latest/)_ (which this package is built upon).
# The _exoplanet_ case study follows [a tutorial](https://radvel.readthedocs.io/en/latest/tutorials/K2-24_Fitting+MCMC.html)
# from [RadVel](https://radvel.readthedocs.io/en/latest/index.html).
# A lot of the nomenclature and some design ideas in _orbits_ are borrowed from RadVel.
# %%
import matplotlib.pyplot as plt
import numpy as np
import orbits.utils as ut
import pandas as pd
from orbits.model import RVModel
# %% [markdown]
# First, we can download the data from RadVel and plot it. We also make a finer
# grid that will be used to plot the model.
# %%
# Download and unpack the data
url = "https://raw.githubusercontent.com/California-Planet-Search/radvel/master/example_data/epic203771098.csv"
data = pd.read_csv(url, index_col=0)
t = np.array(data.t)
vrad = np.array(data.vel)
svrad = np.array(data.errvel)
# Reference time for RV trends later
x_ref = 0.5 * (t.min() + t.max())
# Fine grid for model plots
t_pred = np.linspace(t.min() - 5, t.max() + 5, 1000)
# Plot the data
plt.errorbar(data.t, data.vel, data.errvel, fmt="k.", capsize=2)
plt.xlabel("Time [days]")
plt.ylabel("RV [m/s]")
plt.show()
# %% [markdown]
# Next, we use literature values for the periods and transit times.
# Then we can use _exoplanet_ to estimate the RV semi-amplitude.
# %%
import exoplanet as xo
periods = [20.8851, 42.3633]
period_errs = [0.0003, 0.0006]
tc = [2072.7948, 2082.6251]
tc_errs = [0.0007, 0.0004]
k = xo.estimate_semi_amplitude(periods, t, vrad, svrad, t0s=tc)
print("Semi-amplitude estimate:", k, "m/s")
# %% [markdown]
# _exoplanet_ (and _orbits_) uses the PyMC3 modelling framework. PyMC3 models
# are a bit different than the Python models/functions we are used to. However,
# it has several useful features, including Hamiltonian-Monte Carlo as well as
# several pre-defined distributions that can be used as priors.
# The _exoplanet_ documentation has [a nice introduction to PyMC3](https://docs.exoplanet.codes/en/latest/tutorials/intro-to-pymc3/).
# The [PyMC3 documentation](https://docs.pymc.io/) is also a good resource to
# learn more about PyMC3 and probabilistic programming. The main thing to note
# is that parameters are defined directly as prior distributions.
#
# To define models in PyMC3, we need to be in a model context (using something
# like `with Model():`). However, if we do this, we need to setup relations
# between fitted parameters and parameters of interest manually. The same
# goes for a GP model, the RV signal, and everything that the model contains.
# This is where _orbits_ comes in. With pre-defined model such as `RVModel`,
# we can simply pass fitted parameters to the model and everything else
# (reparametrizations, GP kernels, orbit solver) is set up automatically.
#
# Because _orbits_ was first designed only to be a CLI tool that uses a YAML
# config file, parameters can currently only be passed at initialization
# inside a dictionary. An example dictionary is given below (a YAML version of
# this dictionary is available in `k224.yml`.
# %%
import pymc3_ext as pmx
params = {
# This entry is the first planet with its orbital parameters.
"b": {
"logper": {
"dist": "Normal",
"kwargs": {"mu": np.log(20.8851), "sd": 0.0003 / 20.8851},
},
"tc": {
"dist": "Normal",
"kwargs": {"mu": 2072.7948, "sd": 0.0007},
},
"logk": {
"dist": "Normal",
"kwargs": {
"mu": np.log(k[0]),
"sd": 2.0,
"testval": np.log(k[0]),
},
},
# This parameter is a list with the sqrt(e)*cos(w) and sqrt(e)*sin(w)
# exoplanet defines the UnitDisk prior which can bring performance
# improvement and forces the eccentricity to be < 1.
"secsw": {
"dist": "UnitDisk",
"kwargs": {"shape": 2, "testval": 0.01 * np.ones(2)},
},
},
# This entry is the second planet with its orbital parameters.
"c": {
"logper": {
"dist": "Normal",
"kwargs": {"mu": np.log(42.3633), "sd": 0.0006 / 42.3633},
},
"tc": {
"dist": "Normal",
"kwargs": {"mu": 2082.6251, "sd": 0.0004},
},
"logk": {
"dist": "Normal",
"kwargs": {
"mu": np.log(k[1]),
"sd": 2.0,
"testval": np.log(k[1]),
},
},
"secsw": {
"dist": "UnitDisk",
"kwargs": {"shape": 2, "testval": 0.01 * np.ones(2)},
},
},
# Some parameters affect the whole rv dataset, not just one planet.
"rv": {
"logwn": {
"dist": "DataNormal",
"kwargs": {"data_used": "svrad", "sd": 5.0, "apply": "log"},
},
"gamma": {
"dist": "Normal",
"kwargs": {"mu": 0.0, "sd": 1.0},
},
"dvdt": {
"dist": "Normal",
"kwargs": {"mu": 0.0, "sd": 0.1},
},
"curv": {
"dist": "Normal",
"kwargs": {"mu": 0.0, "sd": 0.01},
},
},
}
# %% [markdown]
# Now that our parameters are defined, we can create a PyMC3 model.
# Note that we can stil modify the model in the model context, as shown below
# with an additional eccentricity prior and an RV predicitive curve.
# %%
with RVModel(t, vrad, svrad, 2, params=params) as model:
xo.eccentricity.vaneylen19(
"ecc_prior",
multi=True,
shape=2,
fixed=True,
observed=model.synth_dict["e"],
)
rv_model_pred = model.calc_rv_model(t_pred, name="pred")
print(model.named_vars)
# %% [markdown]
# However, forcing users to define a big nested dictionary in a notebook or
# script is not great for readability. It also breaks the original PyMC3
# workflow of defining parameters inside the model context. For this reason,
# `orbits` models also support defining parameters in the model conetxt, **with
# a few (IMPORTANT) limitation**:
# 1. Submodels (e.g. planets) are defined at initialization with no parameters.
# To define their parameters, you need to use their context (see below).
# 2. The chi2 or GP likelihood for the data is never called explicitely,
# it is only included in the total posterior if defined. I have not found a
# good way to ensure it is created before sampling or optimization, so for
# now, users must call `add_likelihood` after creating the model manually
# (this is not required when passing a full parameter dictionary: the model
# is then able to create it by itself at initialization).
# There is a warning when this no likelihood is added
# %%
import pymc3 as pm
import pymc3_ext as pmx
from orbits.prior import data_normal_prior
with RVModel(t, vrad, svrad, 2) as model:
print(model.named_vars)
data_normal_prior("logwn", data_used="svrad", sd=5.0, apply="log")
pm.Normal("gamma", mu=0.0, sd=1.0)
pm.Normal("dvdt", mu=0.0, sd=0.1)
pm.Normal("curv", mu=0.0, sd=0.01)
# To add planet parameters, must be in their automatically created submodel
with model.planets["b"]:
pm.Normal("logper", mu=np.log(20.8851), sd=0.0003 / 20.8851)
pm.Normal("tc", mu=2072.7948, sd=0.0007)
pm.Normal("logk", mu=np.log(k[0]), sd=2.0, testval=np.log(k[0]))
pmx.UnitDisk("secsw", shape=2, testval=0.01 * np.ones(2))
with model.planets["c"]:
pm.Normal("logper", mu=np.log(42.3633), sd=0.0006 / 42.3633)
pm.Normal("tc", mu=2082.6251, sd=0.0004)
pm.Normal("logk", mu=np.log(k[1]), sd=2.0, testval=np.log(k[1]))
pmx.UnitDisk("secsw", shape=2, testval=0.01 * np.ones(2))
xo.eccentricity.vaneylen19(
"ecc_prior",
multi=True,
shape=2,
fixed=True,
observed=model.synth_dict["e"],
)
rv_model_pred = model.calc_rv_model(t_pred, name="pred")
model.add_likelihood()
# %% [markdown]
# Now that we have a model, we can plot its prediction. But we only defined
# priors, so how do we evaluate the model ? PyMC3 priors have a `testval` that
# can be used to plot the model. The _pymc3-ext_ package, from the _exoplanet_
# developers, has a built-in `eval_in_model` function to do just this.
# %%
plt.errorbar(t, vrad, yerr=svrad, fmt=".k")
with model:
plt.plot(t_pred, pmx.eval_in_model(model.rv_orbits_pred), "--k", alpha=0.5)
plt.plot(t_pred, pmx.eval_in_model(model.bkg_pred), ":k", alpha=0.5)
plt.plot(t_pred, pmx.eval_in_model(model.rv_model_pred), label="model")
plt.legend(fontsize=10)
plt.xlim(t_pred.min(), t_pred.max())
plt.xlabel("time [days]")
plt.ylabel("radial velocity [m/s]")
plt.title("initial model")
plt.show()
# %% [markdown]
# Our initial test values don't look too good, so we will find the
# maximum a posteriori (MAP) solution using `pymc3`/`pymc3-ext`.
# Sometimes it can help to optimize parameters sequentially, as we do below.
# %%
with model:
# Optimize the offset and trend parameters only
map_soln = pmx.optimize(
start=model.test_point, vars=[model.gamma, model.dvdt, model.curv]
)
opt2_list = [model.gamma, model.dvdt, model.curv, model.logwn]
# Now optimize some planet parameters as well, using previous solution as
# starting point.
for prefix in model.planets:
opt2_list.extend(
[
model[f"{prefix}_logk"],
model[f"{prefix}_tc"],
model[f"{prefix}_logper"],
]
)
map_soln = pmx.optimize(start=map_soln, vars=opt2_list)
# Optimize eccentricity parameters
map_soln = pmx.optimize(
start=map_soln,
vars=[model[f"{prefix}_secsw"] for prefix in model.planets],
)
# Optimize everything
map_soln = pmx.optimize(start=map_soln)
# %% [markdown]
# Let's now plot the MAP solution.
# %%
from orbits import plots
plots.rvplot(t, vrad, svrad, t_soln=t_pred, soln=map_soln, soln_name="pred")
plt.show()
# %% [markdown]
# We can now sample our model posterior to get a better estimate of our
# parameters and their uncertainty. We use _pymc3-ext_ as it wraps the PyMC3
# sampler with more appropriate defaults and tuning strategies (this is taken
# directly from the _exoplanet_ RV tutorial).
# %%
|
np.random.seed(42)
|
numpy.random.seed
|
################################################################################
# #
# This file is part of the Morphomatics library #
# see https://github.com/morphomatics/morphomatics #
# #
# Copyright (C) 2021 Zuse Institute Berlin #
# #
# Morphomatics is distributed under the terms of the ZIB Academic License. #
# see $MORPHOMATICS/LICENSE #
# #
################################################################################
import numpy as np
from ..geom import BezierSpline
from ..geom.BezierSpline import decasteljau
from . import ExponentialBarycenter
from ..manifold import Manifold
from ..manifold.ManoptWrap import ManoptWrap
from pymanopt import Problem
from pymanopt.manifolds import Product
from pymanopt.manifolds.product import _ProductTangentVector
from pymanopt.solvers import SteepestDescent, ConjugateGradient
class RiemannianRegression(object):
"""
Higher-order regression for estimation of relationship between
single explanatory and manifold-valued dependent variable.
The relationship is modeled via intrinsic Bezier splines (morphomatics.manifold.BezierSpline).
See:
<NAME>, <NAME>, <NAME>, <NAME>:
Nonlinear Regression on Manifolds for Shape Analysis using Intrinsic Bézier Splines.
Proc. Medical Image Computing and Computer Assisted Intervention (MICCAI), 2020.
"""
def __init__(self, M: Manifold, Y, param, degrees, iscycle=False, P_init=None, verbosity=2, maxtime=100000,
maxiter=100, mingradnorm=1e-6, minstepsize=1e-10, maxcostevals=5000):
"""Compute regression with Bézier splines for data in a manifold M using pymanopt.
:param M: manifold
:param Y: array containing M-valued data.
:param param: vector with scalars between 0 and the number of intended segments corresponding to the data points in
Y. The integer part determines the segment to which the data point belongs.
:param degrees: vector of length L; the l-th entry is the degree of the l-th segment of the spline. All entries must
be positive. For a closed spline, L > 1, degrees[0] > 2 and degrees[-1] > 2 must hold.
:param iscycle: boolean that determines whether a closed curve C1 spline shall be modeled.
:param P_init: initial guess
:param verbosity: 0 is silent to gives the most information, see pymanopt's problem class
:param maxtime: maximum time for steepest descent
:param maxiter: maximum number of iterations in steepest descent
:param mingradnorm: stop iteration when the norm of the gradient is lower than mingradnorm
:param minstepsize: stop iteration when step the stepsize is smaller than minstepsize
:param maxcostevals: maximum number of allowed cost evaluations
:return P: list of control points of the optimal Bézier spline
"""
degrees = np.atleast_1d(degrees)
self._M = M
self._Y = Y
self._param = param
pymanoptM = ManoptWrap(M)
# Cost
def cost(P):
P = np.stack(P)
control_points = self.full_set(M, P, degrees, iscycle)
return self.sumOfSquared(BezierSpline(M, control_points, iscycle=iscycle), Y, param)
#MMM = Product([M for i in range(degrees[0])]) # for conjugated gradient
# Gradient
def grad(P):
P = np.stack(P)
control_points = self.full_set(M, P, degrees, iscycle)
grad_E = self.gradSumOfSquared(BezierSpline(M, control_points, iscycle=iscycle), Y, param)
grad_E = self.indep_set(grad_E, iscycle)
# return _ProductTangentVector([grad_E[0][i] for i in range(len(grad_E[0]))]) # for conjugated gradient
return np.concatenate(grad_E)
# Solve optimization problem with pymanopt by optimizing over independent control points
if iscycle:
N = Product([pymanoptM] * np.sum(degrees - 1))
else:
N = Product([pymanoptM] * (np.sum(degrees - 1) + 2))
problem = Problem(manifold=N, cost=cost, grad=grad, verbosity=verbosity)
# solver = ConjugateGradient(maxtime=maxtime, maxiter=maxiter, mingradnorm=mingradnorm,
# minstepsize=minstepsize, maxcostevals=maxcostevals, logverbosity=2)
solver = SteepestDescent(maxtime=maxtime, maxiter=maxiter, mingradnorm=mingradnorm,
minstepsize=minstepsize, maxcostevals=maxcostevals, logverbosity=2)
if P_init is None:
P_init = self.initControlPoints(M, Y, param, degrees, iscycle)
P_init = self.indep_set(P_init, iscycle)
P_opt, opt_log = solver.solve(problem, list(np.concatenate(P_init)))
P_opt = self.full_set(M, np.stack(P_opt, axis=0), degrees, iscycle)
self._spline = BezierSpline(M, P_opt, iscycle=iscycle)
self._unexplained_variance = opt_log['final_values']["f(x)"] / len(Y)
@property
def trend(self):
"""
:return: Estimated trajectory encoding relationship between
explanatory and manifold-valued dependent variable.
"""
return self._spline
def unexplained_variance(self):
"""Variance in the data set that is not explained by the regressed Bézier spline.
"""
return self._unexplained_variance
@property
def R2statistic(self):
""" Computes Fletcher's generalized R2 statistic for Bézier spline regression. For the definition see
Fletcher, Geodesic Regression on Riemannian Manifolds (2011), Eq. 7.
:return: generalized R^2 statistic (in [0, 1])
"""
# total variance
total_var = ExponentialBarycenter.total_variance(self._M, list(self._Y))
return 1 - self.unexplained_variance() / total_var
@staticmethod
def initControlPoints(M: Manifold, Y, param, degrees, iscycle=False):
"""Computes an initial choice of control points for the gradient descent steps in non-cyclic Bézier spline
regression.
The control points are initialized "along geodesics" near the data
points such that the differentiability conditions hold.
:param M: manifold
:param Y: array containing M-valued data.
:param param: vector with scalars between 0 and the number of intended segments corresponding to the data points in
Y. The integer part determines the segment to which the data point belongs.
:param degrees: vector of length L; the l-th entry is the degree of the l-th segment of the spline. All entries must
be positive. For a closed spline, L > 1, degrees[0] > 2 and degrees[-1] > 2 must hold.
:param iscylce: boolean that determines whether a closed curve C1 spline shall be modeled.
:return P: list of length L containing arrays of control points. The l-th entry is an
array with degrees(l)+1 elements of M, that are ordered along the first dimension.
"""
assert M.metric and M.connec
degrees = np.atleast_1d(degrees)
assert all(degrees >= 1)
if iscycle:
# check for minimum number of control points
assert degrees.size > 1 and degrees[0] >= 3 and degrees[-1] >= 3
# sort data
ind = np.argsort(param)
param[:] = param[ind]
Y[:] = Y[ind]
data, t = RiemannianRegression.segments_from_data(Y, param)
assert len(data) == degrees.size
P = []
for l, d in enumerate(degrees):
siz = np.array(data[l].shape)
siz[0] = d + 1
Pl = np.zeros(siz)
# first segment
if l == 0:
for i in range(0, d + 1):
Pl[i] = M.connec.geopoint(data[0][0], data[0][-1], i / d)
# initial values for the control points of the remaining segments
else:
# C^1 condition
Pl[0] = P[l - 1][-1]
Pl[1] = M.connec.geopoint(P[l - 1][-2], P[l - 1][-1], 2)
# If there are remaining control points, they are free; we initialize them along a geodesic.
if d > 1:
if l != degrees.size - 1 or not iscycle:
for i in range(2, d + 1):
Pl[i] = M.connec.geopoint(Pl[1], data[l][-1], i / d)
# last segment of closed spline
else:
# C^1 condition
Pl[-1] = P[0][0]
Pl[-2] = M.connec.geopoint(P[0][1], P[0][0], 2)
# d-3 free control points
for i in range(2, d - 1):
# on geodesic between neighbours
Pl[i] = M.connec.geopoint(Pl[1], Pl[-2], (i - 1) / (d - 2))
P.append(Pl)
return P
@staticmethod
def sumOfSquared(B: BezierSpline, Y, param):
"""Computes sum of squared distances between the spline
defined by P and data Y.
:param B: Bézier spline
:param Y: array with data points along first axis
:param param: vector with corresponding parameter values
:return: non-negative scalar
"""
s = 0
for i, t in enumerate(param):
s += B._M.metric.dist(B.eval(t), Y[i]) ** 2
return s
@staticmethod
def gradSumOfSquared(B: BezierSpline, Y, param):
"""Compute the gradient of the sum of squared distances from a manifold-valued Bézier spline to time labeled data
points.
:param B: Bézier spline with K segments
:param Y: array that contains data in the manifold where B is defined (along first axis).
:param param: vector with the sorted parameter values that correspond to the data in Y. All values must be
in [0, B.nsegments].
:return: gradients at the control points of B
"""
assert all(0 <= param) and all(param <= B.nsegments)
assert Y.shape[0] == param.shape[0]
M = B._M
P = B.control_points
L = B.nsegments
# sort data (maybe not necessary)
ind = np.argsort(param)
param[:] = param[ind]
Y[:] = Y[ind]
# Initiate gradients
grad_E = []
for l in range(L):
grad_E.append(np.zeros_like(P[l]))
# Distinct parameters in param with multiplicity
u = np.unique(param)
for t in u:
# First, we sum up all gradients of tau_j(p) = d(p,y_j)^2 that live in the same tangent space; the value t
# appears count[i] times.
grad_dist = np.zeros_like(Y[0]) # would be cleaner with M.zerovec(decasteljau(M, P[ind], t_seg))
ind, t_seg = B.segmentize(t)
for jj in np.nonzero(param == t)[0]:
grad_dist += -2 * M.connec.log(decasteljau(M, P[ind], t_seg), Y[jj])
# add up adjointly transported contributions
grad_E[ind] += B.adjDpB(t, grad_dist)
# Taking care of C1 conditions
for l in range(1, L):
X_plus = grad_E[l][1] # gradient w.r.t. p_l^+
X_l = M.connec.adjDxgeo(P[l][0], P[l][1], 1, X_plus)
X_minus = M.connec.adjDxgeo(P[l - 1][-2], P[l][1], 1, X_plus)
# Final gradients at p_l and p_l^-
grad_E[l - 1][-1] += grad_E[l][0] + X_l
grad_E[l - 1][-2] += X_minus
# Everything that is not needed anymore is set to 0 s.t. it cannot cause
# bugs (here the gradient at p_l^+ and at p_l for the lower segment).
grad_E[l][0] *= 0
grad_E[l][1] *= 0
# Taking care for additional C1 conditions in the case of a closed curve.
if B.iscycle:
X_plus = grad_E[0][1] # gradient w.r.t. p_0^+
X_l = M.connec.adjDxgeo(P[0][0], P[0][1], 1, X_plus)
X_minus = M.connec.adjDxgeo(P[-1][-2], P[0][1], 1, X_plus)
# Final gradients at p_l and p_l^-
grad_E[-1][-1] += grad_E[0][0] + X_l
grad_E[-1][-2] += X_minus
# Everything that is not needed anymore is set to 0 s.t. it cannot cause bugs (here the gradient at p_0 and at
# p_0^+ w.r.t the lower segment).
grad_E[0][0] *= 0
grad_E[0][1] *= 0
return grad_E
@staticmethod
def segments_from_data(Y, param):
"""Divide data according to segments
:param Y: array of values
:param param: vector with corresponding nonnegative values sorted in ascending order
:return: List of data arrays. The l-th entry contains data belonging to the l-th segment;
list with corresponding parameter values. Data at a knot is assigned to the lower segment.
"""
assert np.all(np.diff(param) >= 0) and np.all(param >= 0)
assert Y.shape[0] == param.size
# get the segments the data belongs to
def segment(t):
"""Choose the correct segment and value for the parameter t
:param t: scalar
:return: index of segment, that is, i for t in (i,i+1] (0 if t=0)
"""
# choose correct segment
if t == 0:
ind = 0
elif t == np.round(t):
ind = t - 1
ind = ind.astype(int)
else:
ind = np.floor(t).astype(int)
return ind
# get indices where the new segment begins
s = np.zeros_like(param, int)
for i, t in enumerate(param):
s[i] = segment(t)
_, ind, count = np.unique(s, return_index=True, return_counts=True)
data = []
t = []
for i, d in enumerate(ind):
data.append(Y[d:d + count[i]])
t.append(param[d:d + count[i]])
return data, t
@staticmethod
def full_set(M: Manifold, P, degrees, iscycle):
"""Compute all control points of a C^1 Bézier spline from the independent ones."""
control_points = []
start = 0
siz = np.array(P.shape)
for i, d in enumerate(degrees):
deg = degrees[i]
if i == 0:
if not iscycle:
# all control points of the first segment are independent
control_points.append(np.stack(P[:deg + 1]))
start += deg + 1
else:
# add first two control points
siz[0] = deg + 1
C =
|
np.zeros(siz)
|
numpy.zeros
|
# Created by <NAME>.
import sys
import numpy as np
sys.path.append('../')
from envs import KArmedBandit
import matplotlib.pyplot as plt
'''
Solution to the K-armed bandit problem. A simple bandit algorithm has been
used to solve the problem.
Algoithm available on page 24 of "Reinforcement Learning: An Introduction."
Book reference:
<NAME>. and <NAME>., 2014. Reinforcement Learning:
An Introduction. 1st ed. London: The MIT Press.
'''
n_episodes = 1000
K = 4
epsilon = 0.1
bandit = KArmedBandit(K)
# Store data for plots.
selections = []
# Initialization.
Q =
|
np.zeros(K)
|
numpy.zeros
|
from __future__ import print_function, division
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import astropy.units as u
import astropy.coordinates as coord
from astropy.constants import G
import interact
def atlas_freespace(th=90, seed=51835):
""""""
# impact parameters
M = 1e5*u.Msun
B = 100*u.pc
V = 100*u.km/u.s
phi = coord.Angle(180*u.deg)
theta=coord.Angle(th*u.deg)
Tenc = 1*u.Gyr
T = 1*u.Gyr
dt = 0.1*u.Myr
rs = 0*u.pc
# setup tube
Nstar = 500
wx = 5*u.kpc
wy = 0*u.pc
wz = 0*u.pc
sx = 0*u.km/u.s
np.random.seed(seed)
x = (np.random.rand(Nstar) - 0.5) * wx
y = (np.random.randn(Nstar) - 0.5) * wy
z = (np.random.randn(Nstar) - 0.5) * wz
vx = (np.random.randn(Nstar) - 0.5) * sx
vy = (np.random.randn(Nstar) - 0.5) * sx
vz = (np.random.randn(Nstar) - 0.5) * sx
x1, x2, x3, v1, v2, v3 = interact.interact(M.si.value, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)
stream = {}
stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)
stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)
ms = 8
alpha = 0.3
plt.close()
fig, ax = plt.subplots(2,1,figsize=(8,8), sharex=True)
plt.sca(ax[0])
plt.plot(stream['x'][0], stream['x'][1], 'o', ms=ms, alpha=alpha)
plt.ylabel('y (pc)')
plt.xlim(-3000,3000)
plt.ylim(-1100,20)
plt.title('$\\theta$ = {:3.0f}$^\circ$'.format(th), fontsize='medium')
# circle
phi_ = np.linspace(0,2*np.pi,100)
r = 0.05
x0 = 0.3
y0 = 0.65
x = r*np.cos(phi_) + x0
y = r*np.sin(phi_) + y0
xp = r*np.cos(2*np.pi-theta.rad) + x0
yp = r*np.sin(2*np.pi-theta.rad) + y0
Ns = 9
xs = np.linspace(-1.5*r, 1.5*r, Ns) + x0
ys = np.zeros(Ns) + y0
plt.plot(x, y, '-', color='0.3', alpha=0.5, lw=2, transform=fig.transFigure)
plt.plot(xp, yp, 'o', color='0.3', ms=10, transform=fig.transFigure)
plt.plot(xs, ys, 'o', color='tab:blue', ms=5, alpha=0.5, transform=fig.transFigure)
plt.sca(ax[1])
plt.plot(stream['x'][0], stream['x'][2], 'o', ms=ms, alpha=alpha)
plt.ylabel('z (pc)')
plt.xlabel('x (pc)')
plt.xlim(-3000,3000)
plt.ylim(-30,30)
plt.tight_layout()
plt.savefig('../plots/animations/angles/angles_{:03.0f}.png'.format(th/5))
def phases(seed=8264):
""""""
# impact parameters
M = 1e5*u.Msun
B = 100*u.pc
V = 100*u.km/u.s
phi = coord.Angle(180*u.deg)
#theta = coord.Angle(th*u.deg)
Tenc = 1*u.Gyr
T = 1*u.Gyr
dt = 0.1*u.Myr
rs = 0*u.pc
# setup tube
Nstar = 500
wx = 5*u.kpc
wy = 2*u.pc
wz = 2*u.pc
sx = 0*u.km/u.s
np.random.seed(seed)
x = (np.random.rand(Nstar) - 0.5) * wx
y = (np.random.randn(Nstar) - 0.5) * wy
z = (np.random.randn(Nstar) - 0.5) * wz
vx = (np.random.randn(Nstar) - 0.5) * sx
vy = (np.random.randn(Nstar) - 0.5) * sx
vz = (np.random.randn(Nstar) - 0.5) * sx
angles = [5, 18, 90]
times = [0.01]
for th in angles:
theta = coord.Angle(th*u.deg)
T = B**2*V*np.abs(np.sin(theta.rad))/(2*G*M)
times += [T.to(u.Gyr).value]
times += [4]
times = np.array(times) * u.Gyr
cmap_navy = mpl.colors.LinearSegmentedColormap.from_list('cmap_navy', [(0,'#78aadd'), (1,'#00187f')], N=256)
plt.close()
fig, ax = plt.subplots(5, 3, figsize=(10,10), sharex=True)
for et, T in enumerate(times):
for ea, th in enumerate(angles):
theta = coord.Angle(th*u.deg)
p = (G*M*T/(B**2*V*np.abs(np.sin(theta.rad)))).decompose()
print(et, T, p)
x1, x2, x3, v1, v2, v3 = interact.interact(M.si.value, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)
stream = {}
stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)
stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)
plt.sca(ax[et][ea])
clog = np.log10(p.value)
cmin = np.log10(5e-2)
cmax = np.log10(20)
if clog<cmin: clog = cmin
if clog>cmax: clog = cmax
cindex = (clog - cmin)/(cmax - cmin)
plt.plot(stream['x'][0], stream['x'][1], 'o', color=cmap_navy(cindex), ms=1.5, alpha=0.6)
#plt.plot(stream['x'][0], stream['x'][1], 'o', color=cmap_navy(min(1.,p.value/7)), ms=1.5, alpha=0.6)
txt = plt.text(0.9, 0.15, '$\psi$={:.2f}'.format(p), ha='right', va='center', transform=plt.gca().transAxes, fontsize='small')
txt.set_bbox(dict(facecolor='w', alpha=0.7, ec='none'))
if et==0:
plt.title('$\\theta$ = {:.0f}$^\circ$'.format(th), fontsize='medium')
if et==np.size(times)-1:
plt.xlabel('x [pc]')
if ea==0:
plt.ylabel('y [pc]')
if ea==np.size(angles)-1:
plt.ylabel('T = {:.2f}'.format(T), labelpad=20, fontsize='small', rotation=270)
plt.gca().yaxis.set_label_position('right')
plt.tight_layout(h_pad=0.1, w_pad=0.15)
plt.savefig('../plots/freespace_phases.png')
plt.savefig('../plots/freespace_phases.pdf')
def change_b(seed=7356, th=90, case=0):
""""""
# impact parameters
M = 5e6*u.Msun
B = 100*u.pc
V = 100*u.km/u.s
phi = coord.Angle(180*u.deg)
theta = coord.Angle(th*u.deg)
Tenc = 1*u.Gyr
T = 1*u.Gyr
dt = 0.05*u.Myr
rs = 0*u.pc
# setup tube
Nstar = 1000
wx = 20*u.kpc
wy = 2*u.pc
wz = 2*u.pc
sx = 0*u.km/u.s
np.random.seed(seed)
x = (np.random.rand(Nstar) - 0.5) * wx
y = (np.random.randn(Nstar) - 0.5) * wy
z = (np.random.randn(Nstar) - 0.5) * wz
vx = (np.random.randn(Nstar) - 0.5) * sx
vy = (np.random.randn(Nstar) - 0.5) * sx
vz = (np.random.randn(Nstar) - 0.5) * sx
f_array = np.array([0.5,1,2])
p_array = np.array([0.1,0.3,0.5,1.,2.])
cmap_borange = mpl.colors.LinearSegmentedColormap.from_list('cmap_borange', [(0,'#ff9e00'), (1,'#e63f25')], N=256)
title_main = ['B$_0$, M$_0$', 'B$_0$, V$_0$', 'M$_0$, V$_0$', 'M$_0$, T$_0$']
title_less = ['$\sqrt{0.5}$ B$_0$, 0.5 M$_0$', '$\sqrt{0.5}$ B$_0$, V$_0$ / 0.5', '0.5 M$_0$, 0.5 V$_0$', '0.5 M$_0$, T$_0$ / 0.5']
title_more = ['$\sqrt{2}$ B$_0$, 2 M$_0$', '$\sqrt{2}$ B$_0$, V$_0$ / 2', '2 M$_0$, 2 V$_0$', '2 M$_0$, T$_0$ / 2']
titles = [title_less, title_main, title_more]
plt.close()
fig, ax = plt.subplots(5,3,figsize=(10,6), sharex=True, sharey='row')
for ep, p in enumerate(p_array):
#p = (G*M*T/(B**2*V*np.abs(np.sin(theta.rad)))).decompose()
B_ = np.sqrt(G*M*T/(p*V*np.abs(np.sin(theta.rad)))).to(u.pc)
print(ep, B_)
clog = np.log10(p)
cmin = np.log10(0.1)
cmax = np.log10(5)
if clog<cmin: clog = cmin
if clog>cmax: clog = cmax
cindex = (clog - cmin)/(cmax - cmin)
color = cmap_borange(cindex)
for ef, f in enumerate(f_array[:]):
fsq = np.sqrt(f)
finv = 1/f
#fsqinv = np.sqrt(finv)
if case==0:
x1, x2, x3, v1, v2, v3 = interact.interact(f*M.si.value, fsq*B_.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, (x*0.5/np.sqrt(p)).si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)
elif case==1:
x1, x2, x3, v1, v2, v3 = interact.interact(M.si.value, fsq*B_.si.value, phi.rad, finv*V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)
elif case==2:
x1, x2, x3, v1, v2, v3 = interact.interact(f*M.si.value, B_.si.value, phi.rad, f*V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)
elif case==3:
x1, x2, x3, v1, v2, v3 = interact.interact(f*M.si.value, B_.si.value, phi.rad, V.si.value, theta.rad, finv*Tenc.si.value, finv*T.si.value, dt.si.value, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)
stream = {}
stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)
stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)
plt.sca(ax[ep][ef])
plt.plot(stream['x'][0]/(fsq*B_), stream['x'][1]/(fsq*B_), '.', color=color, ms=3, alpha=0.02)
plt.gca().set_aspect('equal')
if ep==0:
plt.title(titles[ef][case], fontsize='medium')
if ep==np.size(p_array)-1:
plt.xlabel('x / B')
if ef==0:
plt.ylabel('y / B')
if ef==np.size(f_array)-1:
plt.ylabel('$\psi$ = {:.1f}'.format(p), labelpad=20, fontsize='small', rotation=270)
plt.gca().yaxis.set_label_position('right')
if f==1:
txt = plt.text(0.1, 0.15, '$B_0$={:.0f}'.format(B_.to(u.pc)), ha='left', va='center', transform=plt.gca().transAxes, fontsize='small')
txt.set_bbox(dict(facecolor='w', alpha=0.7, ec='none'))
plt.tight_layout(h_pad=0.1, w_pad=0.15)
plt.savefig('../plots/change_bscaled_{}.png'.format(case))
#plt.savefig('../plots/change_b.pdf')
def scaling(seed=98, f=2):
""""""
# impact parameters
M = 1e5*u.Msun
B = 100*u.pc
V = 100*u.km/u.s
phi = coord.Angle(180*u.deg)
theta=coord.Angle(45*u.deg)
Tenc = 1*u.Gyr
T = 10*u.Gyr
dt = 0.1*u.Myr
rs = 0*u.pc
# setup tube
Nstar = 500
wx = 5*u.kpc
wy = 2*u.pc
wz = 0*u.pc
sx = 0*u.km/u.s
np.random.seed(seed)
x = (np.random.rand(Nstar) - 0.5) * wx
y = (np.random.randn(Nstar) - 0.5) * wy
z = (np.random.randn(Nstar) - 0.5) * wz
vx = np.zeros(Nstar)*u.km/u.s
vy = np.zeros(Nstar)*u.km/u.s
vz = np.zeros(Nstar)*u.km/u.s
# limits
print('dense:{:.2g} << 1'.format(rs/B))
print('fast: {:.2g} << 1'.format((G*M/(V**2*B)).decompose()) )
print('thin: {:.2g} << 1'.format((np.sqrt(wy**2 + wz**2)/B).decompose()) )
print('long: {:.2g} >> 1'.format((wx/B).decompose()) )
print('cold: {:.2g} << 1'.format((sx/V).decompose()) )
x1, x2, x3, v1, v2, v3 = interact.interact(M.si.value, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)
stream1 = {}
stream1['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)
stream1['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)
finv = 1/f
fsq = np.sqrt(f)
x1, x2, x3, v1, v2, v3 = interact.interact(f*M.si.value, B.si.value, phi.rad, V.si.value, theta.rad, finv*Tenc.si.value, finv*T.si.value, dt.si.value, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)
stream2 = {}
stream2['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)
stream2['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)
x1, x2, x3, v1, v2, v3 = interact.interact(f*M.si.value, B.si.value, phi.rad, f*V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)
stream3 = {}
stream3['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)
stream3['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)
x1, x2, x3, v1, v2, v3 = interact.interact(M.si.value, B.si.value, phi.rad, f*V.si.value, theta.rad, f*Tenc.si.value, f*T.si.value, dt.si.value, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)
stream4 = {}
stream4['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)
stream4['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)
#x1, x2, x3, v1, v2, v3 = interact.interact(f*M.si.value, f*B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)
#stream5 = {}
#stream5['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)
#stream5['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)
dblue = mpl.cm.Blues(0.8)
lblue = mpl.cm.Blues(0.5)
ms = 2
streams = [stream1, stream2, stream3, stream4]
labels = ['M,T,V', '{0:.1f}M,T/{0:.1f},V'.format(f), '{0:.1f}M,T,{0:.1f}V'.format(f),'M,{0:.1f}T,{0:.1f}V'.format(f), '{0:.1f}M,sqrt{0:.1f}B'.format(f)]
plt.close()
fig, ax = plt.subplots(1,2,figsize=(10,5))
for e, stream in enumerate(streams):
color = mpl.cm.Blues(e/5+0.1)
ms = 14 - 2*e
plt.sca(ax[0])
plt.plot(stream['x'][0], stream['x'][1], 'o', color=color, ms=ms)
plt.sca(ax[1])
plt.plot(stream['x'][0], stream['x'][2], 'o', color=color, ms=ms, label=labels[e])
plt.sca(ax[0])
plt.xlabel('x (pc)')
plt.ylabel('y (pc)')
plt.sca(ax[1])
plt.xlabel('x (pc)')
plt.ylabel('z (pc)')
plt.legend(fontsize='small', loc=1)
plt.tight_layout()
plt.savefig('../plots/scaling_{:.1f}.png'.format(f))
def scaling_norm(seed=473):
""""""
# impact parameters
M = 1e5*u.Msun
B = 100*u.pc
V = 100*u.km/u.s
phi = coord.Angle(180*u.deg)
theta=coord.Angle(45*u.deg)
Tenc = 1*u.Gyr
T = 1*u.Gyr
dt = 0.05*u.Myr
rs = 0*u.pc
# setup tube
Nstar = 500
wx = 5*u.kpc
wy = 2*u.pc
wz = 0*u.pc
sx = 0*u.km/u.s
np.random.seed(seed)
x = (
|
np.random.rand(Nstar)
|
numpy.random.rand
|
"""
Create extinguished grid more segmented dealing with large grids with enough
memory
All functions are now transformed into generators. As a result, any function
allows computation of a grid in an arbitrary number of chunks. This offers the
possibility to generate grids that cannot fit in memory.
.. note::
* dependencies have also been updated accordingly.
* likelihood computations need to be updated to allow computations even if
the full grid does not fit in memory
"""
import numpy as np
import copy
from astropy import units
from tqdm import tqdm
from beast.physicsmodel.stars import stellib
from beast.physicsmodel.grid import SpectralGrid, SEDGrid
from beast.physicsmodel.prior_weights_dust import PriorWeightsDust
# from beast.external.eztables import Table
from astropy.table import Table
from beast.tools.helpers import generator
from beast.tools import helpers
from beast.observationmodel.noisemodel import absflux_covmat
__all__ = [
"gen_spectral_grid_from_stellib_given_points",
"make_extinguished_grid",
"add_spectral_properties",
"calc_absflux_cov_matrices",
]
@generator
def gen_spectral_grid_from_stellib_given_points(
osl, pts, bounds=dict(dlogT=0.1, dlogg=0.3), chunksize=0
):
"""
Generator that reinterpolates a given stellar spectral library on to
an Isochrone grid
It will iterate over a list of `pts` points and generate
`chunksize` models until all the list of points is processed
Parameters
----------
osl: stellib.stellib
a stellar library
pts: dict like structure of points
dictionary like or named data structure of points to interpolate at
must contain logg, logT, logL, and Z
bounds: dict, optional (default={dlogT:0.1, dlogg:0.3})
sensitivity to extrapolation (see grid.get_stellib_boundaries)
chunksize: int, optional (default=0)
number of models to generate at each cycle.
If default <= 0, all models will be returned at once.
Returns
-------
g: SpectralGrid
Spectral grid (in memory) containing the requested list of stars
and associated spectra
"""
helpers.type_checker("osl", osl, stellib.Stellib)
if chunksize <= 0:
yield osl.gen_spectral_grid_from_given_points(pts, bounds=bounds)
else:
try:
# Yield successive n-sized chunks from l, assuming we can take
# slices of the iterator
for chunk_slice in helpers.chunks(list(range(len(pts))), chunksize):
chunk_pts = pts[chunk_slice]
yield osl.gen_spectral_grid_from_given_points(chunk_pts, bounds=bounds)
except Exception as e:
# chunks may not work on this as pts is most likely a Table
print(e)
for chunk_pts in helpers.chunks(pts, chunksize):
yield osl.gen_spectral_grid_from_given_points(chunk_pts, bounds=bounds)
def _make_dust_fA_valid_points_generator(it, min_Rv, max_Rv):
"""
compute the allowed points based on the R(V) versus f_A plane
duplicates effort for all A(V) values, but it is quick compared to
other steps
.. note::
on 2.74: SMC extinction implies f_A = 0. and Rv = 2.74
Parameters
----------
it: an iterable
an initial sequence of points that will be trimmed to only valid ones
min_Rv: float
lower Rv limit
max_Rv: float
upper Rv limit
Returns
-------
npts: int
the actual number of valid points
pts: generator
a generator that only produce valid points
"""
itn = copy.copy(it)
npts = 0
def is_valid(ak, rk, fk):
return (
fk / max_Rv + (1.0 - fk) / 2.74
<= 1.0 / rk
<= fk * 1.0 / min_Rv + (1.0 - fk) / 2.74
)
# explore the full list once
# not very time consuming
for ak, rk, fk in itn:
if is_valid(ak, rk, fk):
npts += 1
# make the iterator
pts = (
(float(ak), float(rk), float(fk)) for ak, rk, fk in it if is_valid(ak, rk, fk)
)
return npts, pts
def apply_distance_grid(specgrid, distances, redshift=0):
"""
Distances are applied to the spectral grid by copying the grid and
applying a scaling factor.
Parameters
----------
project: str
project name
specgrid: grid.SpectralGrid object
spectral grid to transform
distances: list of float
Distances at which models should be shifted
0 means absolute magnitude.
Expecting pc units
redshift: float
Redshift to which wavelengths should be shifted
Default is 0 (rest frame)
"""
g0 = specgrid
# Current length of the grid
N0 = len(g0.grid)
N = N0 * len(distances)
# Make singleton list if a single distance is given
if not hasattr(distances, "__iter__"):
_distances = [distances]
else:
_distances = distances
# Add distance column if multiple distances are specified
cols = {}
cols["distance"] = np.empty(N, dtype=float)
# Existing columns
keys0 = list(g0.keys())
for key in keys0:
cols[key] = np.empty(N, dtype=float)
n_sed_points = g0.seds.shape[1]
new_seds =
|
np.empty((N, n_sed_points), dtype=float)
|
numpy.empty
|
import sys, os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.stats import bayes_mvs as bayesest
import time
sys.path.insert(0, '../../PyEcoLib')
import simulator as Sz
mean_size = 1 # femto liter
doubling_time = 18 #min
tmax = 180 #min
sample_time = 2 #min
div_steps = 10
ncells = 5000
gr = np.log(2)/doubling_time
if not os.path.exists('./data'):
os.makedirs('./data') #data path
if not os.path.exists('./figures'):
os.makedirs('./figures') #Figures path
start = time.time()
sim = Sz.Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps)
sim.divstrat(tmax = tmax, sample_time = 0.1*doubling_time, nameDSM = "./data/dataDSMadder.csv")
print('It took', np.int(time.time()-start), 'seconds.')
start = time.time()
sim = Sz.Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps,lamb = 2)
sim.divstrat(tmax = tmax, sample_time = 0.1*doubling_time, nameDSM = "./data/dataDSMsizer.csv")
print('It took', np.int(time.time()-start), 'seconds.')
start = time.time()
sim = Sz.Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps,lamb = 0.5)
sim.divstrat(tmax = tmax, sample_time = 0.1*doubling_time, nameDSM = "./data/dataDSMtimer.csv")
print('It took', np.int(time.time()-start), 'seconds.')
start = time.time()
sim = Sz.Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps)
sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM1.csv")
print('It took', np.int(time.time()-start), 'seconds.')
CV2sz = 0.02
v0 = mean_size*np.random.gamma(shape=1/CV2sz,scale=CV2sz,size=ncells)
start = time.time()
sim = Sz.Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps,V0array=v0)
sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM2.csv")
print('It took', np.int(time.time()-start), 'seconds.')
CV2div = 0.002
CV2gr = 0.02
start = time.time()
sim = Sz.Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps, CV2div = CV2div, CV2gr = CV2gr)
sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM3.csv")
print('It took', np.int(time.time()-start), 'seconds.')
data1=pd.read_csv("./data/dataCRM1.csv")
timearray1=data1.time.unique()
mnszarray1=[]
cvszarray1=[]
errcv2szarray1=[]
errmnszarray1=[]
df=data1
del df['time']
for m in range(len(df)):
szs=df.loc[m, :].values.tolist()
mean_cntr, var_cntr, std_cntr = bayesest(szs,alpha=0.95)
mnszarray1.append(np.mean(szs))
errmnszarray1.append(mean_cntr[1][1]-mean_cntr[0])
cvszarray1.append(np.var(szs)/np.mean(szs)**2)
errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2szarray1.append(errv)
data1=pd.read_csv("./data/dataCRM2.csv")
timearray2=data1.time.unique()
mnszarray2=[]
cvszarray2=[]
errcv2szarray2=[]
errmnszarray2=[]
df=data1
del df['time']
for m in range(len(df)):
szs=df.loc[m, :].values.tolist()
mean_cntr, var_cntr, std_cntr = bayesest(szs,alpha=0.95)
mnszarray2.append(np.mean(szs))
errmnszarray2.append(mean_cntr[1][1]-mean_cntr[0])
cvszarray2.append(np.var(szs)/np.mean(szs)**2)
errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2szarray2.append(errv)
data1=pd.read_csv("./data/dataCRM3.csv")
timearray3=data1.time.unique()
mnszarray3=[]
cvszarray3=[]
errcv2szarray3=[]
errmnszarray3=[]
df=data1
del df['time']
for m in range(len(df)):
szs=df.loc[m, :].values.tolist()
mean_cntr, var_cntr, std_cntr = bayesest(szs,alpha=0.95)
mnszarray3.append(np.mean(szs))
errmnszarray3.append(mean_cntr[1][1]-mean_cntr[0])
cvszarray3.append(np.var(szs)/np.mean(szs)**2)
errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2szarray3.append(errv)
start = time.time()
sim = Sz.Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps)
sim.szdynFSP(tmax = tmax, nameFSP = "./data/dataFSP0.csv")
print('It took', np.int(time.time()-start), 'seconds.')
start = time.time()
CV2sz = 0.02
sim = Sz.Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps)
sim.szdynFSP(tmax = tmax, nameFSP = "./data/dataFSP.csv",CV2sz=CV2sz)
print('It took', np.int(time.time()-start), 'seconds.')
fig, ax = plt.subplots(2,3, figsize=(16,6),sharex=True)
data=pd.read_csv("./data/dataCRM1.csv")
tt=data.time
del data['time']
mmar=data.columns
for column in df.columns[0:10]:
ax[0,0].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_')
data=pd.read_csv("./data/dataCRM2.csv")
tt=data.time
del data['time']
mmar=data.columns
for column in df.columns[0:10]:
ax[0,1].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_')
data=pd.read_csv("./data/dataCRM3.csv")
tt=data.time
del data['time']
mmar=data.columns
for column in df.columns[0:10]:
ax[0,2].plot(tt/doubling_time,data[column],c="#B9B9B9")
ax[0,0].plot(np.array(timearray1)/doubling_time,mnszarray1,lw=2)
ax[0,0].fill_between(np.array(timearray1)/doubling_time,np.array(mnszarray1)-np.array(errmnszarray1),np.array(mnszarray1)
+np.array(errmnszarray1),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
ax[1,0].plot(np.array(timearray1)/doubling_time,cvszarray1,lw=2)
ax[1,0].fill_between(np.array(timearray1)/doubling_time,np.array(cvszarray1)-np.array(errcv2szarray1),np.array(cvszarray1)
+np.array(errcv2szarray1),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0)
ax[0,1].plot(np.array(timearray2)/doubling_time,mnszarray2,lw=2)
ax[0,1].fill_between(np.array(timearray2)/doubling_time,np.array(mnszarray2)-np.array(errmnszarray2),np.array(mnszarray2)
+np.array(errmnszarray2),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
ax[1,1].plot(np.array(timearray2)/doubling_time,cvszarray2,lw=2)
ax[1,1].fill_between(np.array(timearray2)/doubling_time,np.array(cvszarray2)-np.array(errcv2szarray2),np.array(cvszarray2)
+np.array(errcv2szarray2),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0)
ax[0,2].plot(np.array(timearray3)/doubling_time,mnszarray3,lw=2)
ax[0,2].fill_between(np.array(timearray3)/doubling_time,np.array(mnszarray3)-np.array(errmnszarray3),np.array(mnszarray3)
+np.array(errmnszarray3),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
ax[1,2].plot(np.array(timearray3)/doubling_time,cvszarray3,lw=2)
ax[1,2].fill_between(np.array(timearray3)/doubling_time,np.array(cvszarray3)-np.array(errcv2szarray3),np.array(cvszarray3)
+np.array(errcv2szarray3),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0)
#ax[0].set_ylim([1,1.7])
#ax[1].set_ylim([0,0.15])
ax[0,0].set_title("Stochastic division",fontsize=15)
ax[0,1].set_title("Finite Initial Distribution",fontsize=15)
ax[0,2].set_title("Noisy Splitting",fontsize=15)
data=pd.read_csv("./data/dataFSP.csv")
ax[0,1].plot(data.time/doubling_time,data.Meansize,ls='--',c='g',label="Numeric")
ax[1,1].plot(data.time/doubling_time,data.VarSize/data.Meansize**2,ls='--',c='g')
data=pd.read_csv("./data/dataFSP0.csv")
ax[0,0].plot(data.time/doubling_time,data.Meansize,ls='--',c='g',label="Numeric")
ax[1,0].plot(data.time/doubling_time,data.VarSize/data.Meansize**2,ls='--',c='g')
ax[0,0].legend(fontsize=15)
ax[0,1].legend(fontsize=15)
ax[0,0].set_ylabel(r"$\langle s\rangle$ $(\mu m)$",size=15)
ax[1,0].set_ylabel("$C_V^2(s)$",size=15)
ax[1,0].set_xlabel(r"$t/\tau$",size=15)
ax[1,1].set_xlabel(r"$t/\tau$",size=15)
ax[1,2].set_xlabel(r"$t/\tau$",size=15)
for l in [0,1]:
for m in [0,1,2]:
ax[l,m].set_xlim([0,6])
taqui=np.arange(0,7,step=1)
ax[l,m].set_xticks(np.array(taqui))
ax[l,m].grid()
ax[l,m].tick_params(axis='x', labelsize=12)
ax[l,m].tick_params(axis='y', labelsize=12)
for axis in ['bottom','left']:
ax[l,m].spines[axis].set_linewidth(2)
ax[l,m].tick_params(axis='both', width=2,length=6)
for axis in ['top','right']:
ax[l,m].spines[axis].set_linewidth(0)
ax[l,m].tick_params(axis='both', width=0,length=6)
taqui=np.arange(0,0.13,step=0.02)
ax[1,m].set_yticks(np.array(taqui))
taqui=np.arange(0.5,3,step=.5)
ax[0,m].set_yticks(np.array(taqui))
ax[1,m].set_ylim([0,0.13])
ax[0,m].set_ylim([0.5,3])
plt.subplots_adjust(hspace=0.15,wspace=0.2)
#ax[1].plot(time4,np.array(allvarsz4),c='r')
#ax[0].plot(time4,mean_size*np.array(allmeansz4),c='r',label="Numeric")
plt.savefig('./figures/size_statistics_comp1.eps',bbox_inches='tight')
plt.savefig('./figures/size_statistics_comp1.svg',bbox_inches='tight')
plt.savefig('./figures/size_statistics_comp1.png',bbox_inches='tight')
data2=pd.read_csv("./data/dataDSMadder.csv")
data2=data2[data2.time>5*doubling_time]
quantnumber=5
pvadd2=data2
CV2darr1=[]
deltarr1=[]
sbarr1=[]
errcv2darr1=[]
errdeltarr1=[]
errsbarr1=[]
for i in range(quantnumber):
lperv0=np.percentile(pvadd2.S_b,i*100/quantnumber)
hperv0=np.percentile(pvadd2.S_b,(i+1)*100/quantnumber)
quanta1=pvadd2[pvadd2.S_b>lperv0]
quanta2=quanta1[quanta1.S_b<hperv0]
mean_cntr, var_cntr, std_cntr = bayesest((quanta2.S_d-quanta2.S_b)/np.mean(pvadd2.S_d-pvadd2.S_b),alpha=0.95)
meanv0_cntr, varv0_cntr, stdv0_cntr = bayesest(quanta2.S_b/np.mean(pvadd2.S_b),alpha=0.95)
CV2darr1.append(var_cntr[0]/mean_cntr[0]**2)
deltarr1.append(mean_cntr[0])
sbarr1.append(meanv0_cntr[0])
errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2darr1.append(errv)
errdeltarr1.append(mean_cntr[1][1]-mean_cntr[0])
errsbarr1.append(meanv0_cntr[1][1]-meanv0_cntr[0])
data3=pd.read_csv("./data/dataDSMsizer.csv")
data3=data3[data3.time>5*doubling_time]
quantnumber=5
pvadd2=data3
CV2darr2=[]
deltarr2=[]
sbarr2=[]
errcv2darr2=[]
errdeltarr2=[]
errsbarr2=[]
for i in range(quantnumber):
lperv0=np.percentile(pvadd2.S_b,i*100/quantnumber)
hperv0=np.percentile(pvadd2.S_b,(i+1)*100/quantnumber)
quanta1=pvadd2[pvadd2.S_b>lperv0]
quanta2=quanta1[quanta1.S_b<hperv0]
mean_cntr, var_cntr, std_cntr = bayesest((quanta2.S_d-quanta2.S_b)/np.mean(pvadd2.S_d-pvadd2.S_b),alpha=0.95)
meanv0_cntr, varv0_cntr, stdv0_cntr = bayesest(quanta2.S_b/np.mean(pvadd2.S_b),alpha=0.95)
CV2darr2.append(var_cntr[0]/mean_cntr[0]**2)
deltarr2.append(mean_cntr[0])
sbarr2.append(meanv0_cntr[0])
errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2darr2.append(errv)
errdeltarr2.append(mean_cntr[1][1]-mean_cntr[0])
errsbarr2.append(meanv0_cntr[1][1]-meanv0_cntr[0])
data4=pd.read_csv("./data/dataDSMtimer.csv")
data4=data4[data4.time>5*doubling_time]
quantnumber=5
pvadd2=data4
CV2darr3=[]
deltarr3=[]
sbarr3=[]
errcv2darr3=[]
errdeltarr3=[]
errsbarr3=[]
for i in range(quantnumber):
lperv0=np.percentile(pvadd2.S_b,i*100/quantnumber)
hperv0=np.percentile(pvadd2.S_b,(i+1)*100/quantnumber)
quanta1=pvadd2[pvadd2.S_b>lperv0]
quanta2=quanta1[quanta1.S_b<hperv0]
mean_cntr, var_cntr, std_cntr = bayesest((quanta2.S_d-quanta2.S_b)/np.mean(pvadd2.S_d-pvadd2.S_b),alpha=0.95)
meanv0_cntr, varv0_cntr, stdv0_cntr = bayesest(quanta2.S_b/np.mean(pvadd2.S_b),alpha=0.95)
CV2darr3.append(var_cntr[0]/mean_cntr[0]**2)
deltarr3.append(mean_cntr[0])
sbarr3.append(meanv0_cntr[0])
errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2darr3.append(errv)
errdeltarr3.append(mean_cntr[1][1]-mean_cntr[0])
errsbarr3.append(meanv0_cntr[1][1]-meanv0_cntr[0])
print(np.mean(pvadd2.S_b))
print(np.mean(pvadd2.S_d-pvadd2.S_b))
sim = Sz.Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps,lamb=0.5)
sbar=np.linspace(0.5,1.5,100)*mean_size
cv2tim=[]
delttim=[]
for i in sbar:
sd,cv2=sim.SdStat(i)
cv2tim.append(cv2)
delttim.append(sd-i)
sim = Sz.Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps)
sbar=np.linspace(0.5,1.5,100)*mean_size
cv2ad=[]
deltad=[]
for i in sbar:
sd,cv2=sim.SdStat(i)
cv2ad.append(cv2)
deltad.append(sd-i)
sim = Sz.Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps,lamb=2)
sbar=np.linspace(0.5,1.5,100)*mean_size
cv2sz=[]
deltsz=[]
for i in sbar:
sd,cv2=sim.SdStat(i)
cv2sz.append(cv2)
deltsz.append(sd-i)
fig, ax = plt.subplots(1,2, figsize=(12,4))
#ax[0].scatter(data2.S_b/np.mean(data2.S_b),(data2.S_d-data2.S_b)/np.mean(data2.S_b),s=2)
#ax[0].scatter(data3.S_b/np.mean(data3.S_b),(data2.S_d-data3.S_b)/np.mean(data3.S_b),s=2)
#ax[0].scatter(data4.S_b/np.mean(data4.S_b),(data4.S_d-data2.S_b)/np.mean(data4.S_b),s=2)
ax[0].errorbar(np.array(sbarr1),np.array(deltarr1),xerr=errsbarr1,yerr=errdeltarr1, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='k')
ax[1].errorbar(np.array(sbarr1),CV2darr1,xerr=errsbarr1,yerr=errcv2darr1, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='k')
ax[0].errorbar(np.array(sbarr2),np.array(deltarr2),xerr=errsbarr2,yerr=errdeltarr2, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='r')
ax[1].errorbar(np.array(sbarr2),CV2darr2,xerr=errsbarr2,yerr=errcv2darr2, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='r')
ax[0].errorbar(np.array(sbarr3),np.array(deltarr3),xerr=errsbarr3,yerr=errdeltarr3, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='g')
ax[1].errorbar(np.array(sbarr3),CV2darr3,xerr=errsbarr3,yerr=errcv2darr3, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='g')
ax[1].set_ylim([0,0.3])
ax[0].set_xlabel("$s_b/\overline{s_b}$",size=20)
ax[1].set_xlabel("$s_b/\overline{s_b}$",size=20)
ax[0].set_ylabel("$\Delta/\overline{s_b}$",size=15)
ax[1].set_ylabel("$C_V^2(\Delta)$",size=15)
#ax[0].set_xlim([0.5,1.5])
for l in [0,1]:
#ax[l].set_xlim([0.2,2])
ax[l].grid()
ax[l].tick_params(axis='x', labelsize=15)
ax[l].tick_params(axis='y', labelsize=15)
for axis in ['bottom','left']:
ax[l].spines[axis].set_linewidth(2)
ax[l].tick_params(axis='both', width=2,length=6)
for axis in ['top','right']:
ax[l].spines[axis].set_linewidth(0)
ax[l].tick_params(axis='both', width=0,length=6)
ax[0].plot(np.array(sbar)/mean_size, np.array(delttim)/mean_size, lw=2,c='g',label="$\lambda=0.5$")
ax[1].plot(np.array(sbar)/mean_size, cv2tim, lw=2,c='g')
ax[0].plot(np.array(sbar)/mean_size, np.array(deltad)/mean_size, lw=2,c='k',label="$\lambda=1$")
ax[1].plot(np.array(sbar)/mean_size, cv2ad, lw=2,c='k')
ax[0].plot(np.array(sbar)/mean_size, np.array(deltsz)/mean_size, lw=2,c='r',label="$\lambda=2$")
ax[1].plot(np.array(sbar)/mean_size, cv2sz, lw=2,c='r')
ax[0].set_ylim(0.75,1.35)
ax[1].set_ylim(0.03,0.17)
ax[0].text(0.55,1.27,"$\lambda = 2$",rotation=-35,fontsize=10)
ax[0].text(0.55,1.01,"$\lambda = 1$",fontsize=10)
ax[0].text(0.55,0.87,"$\lambda = 0.5$",rotation=35,fontsize=10)
ax[1].text(0.5,0.05,"$\lambda = 2$",rotation=15,fontsize=10)
ax[1].text(0.5,0.11,"$\lambda = 1$",fontsize=10)
ax[1].text(0.5,0.155,"$\lambda = 0.5$",rotation=-10,fontsize=10)
#ax[0].set_ylim([0.7,1.5])
plt.savefig('./figures/full_div_strategy.eps',bbox_inches='tight')
plt.savefig('./figures/full_div_strategy.svg',bbox_inches='tight')
plt.savefig('./figures/full_div_strategy.png',bbox_inches='tight')
fig, ax = plt.subplots(2,4, figsize=(16,5))
data=pd.read_csv("./data/dataCRM1.csv")
tt=data.time
del data['time']
for column in data.columns[0:10]:
ax[0,0].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_')
data=pd.read_csv("./data/dataCRM2.csv")
tt=data.time
del data['time']
for column in data.columns[0:10]:
ax[0,1].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_')
data=pd.read_csv("./data/dataCRM3.csv")
tt=data.time
del data['time']
for column in data.columns[0:10]:
ax[0,2].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_')
ax[0,0].plot(np.array(timearray1)/doubling_time,mnszarray1,lw=2)
ax[0,0].fill_between(np.array(timearray1)/doubling_time,np.array(mnszarray1)-np.array(errmnszarray1),np.array(mnszarray1)
+np.array(errmnszarray1),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
ax[1,0].plot(np.array(timearray1)/doubling_time,cvszarray1,lw=2)
ax[1,0].fill_between(np.array(timearray1)/doubling_time,np.array(cvszarray1)-np.array(errcv2szarray1),np.array(cvszarray1)
+np.array(errcv2szarray1),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0)
ax[0,1].plot(np.array(timearray2)/doubling_time,mnszarray2,lw=2)
ax[0,1].fill_between(np.array(timearray2)/doubling_time,np.array(mnszarray2)-np.array(errmnszarray2),np.array(mnszarray2)
+np.array(errmnszarray2),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
ax[1,1].plot(np.array(timearray2)/doubling_time,cvszarray2,lw=2)
ax[1,1].fill_between(np.array(timearray2)/doubling_time,np.array(cvszarray2)-np.array(errcv2szarray2),np.array(cvszarray2)
+np.array(errcv2szarray2),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0)
ax[0,2].plot(np.array(timearray3)/doubling_time,mnszarray3,lw=2)
ax[0,2].fill_between(np.array(timearray3)/doubling_time,np.array(mnszarray3)-np.array(errmnszarray3),np.array(mnszarray3)
+np.array(errmnszarray3),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
ax[1,2].plot(np.array(timearray3)/doubling_time,cvszarray3,lw=2)
ax[1,2].fill_between(np.array(timearray3)/doubling_time,np.array(cvszarray3)-np.array(errcv2szarray3),np.array(cvszarray3)
+np.array(errcv2szarray3),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0)
#ax[0].set_ylim([1,1.7])
#ax[1].set_ylim([0,0.15])
ax[0,0].set_title("Stochastic division",fontsize=15)
ax[0,1].set_title("Finite Initial Distribution",fontsize=15)
ax[0,2].set_title("Noisy Splitting",fontsize=15)
data=pd.read_csv("./data/dataFSP.csv")
ax[0,1].plot(data.time/doubling_time,data.Meansize,ls='--',c='g',label="Numeric")
ax[1,1].plot(data.time/doubling_time,data.VarSize/data.Meansize**2,ls='--',c='g')
data=pd.read_csv("./data/dataFSP0.csv")
ax[0,0].plot(data.time/doubling_time,data.Meansize,ls='--',c='g',label="Numeric")
ax[1,0].plot(data.time/doubling_time,data.VarSize/data.Meansize**2,ls='--',c='g')
ax[0,0].legend(fontsize=10)
ax[0,1].legend(fontsize=10)
ax[0,2].legend(fontsize=10)
#ax[0,1].legend(fontsize=10)
ax[0,3].errorbar(np.array(sbarr1),np.array(deltarr1),xerr=errsbarr1,yerr=errdeltarr1, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='k')
ax[1,3].errorbar(np.array(sbarr1),CV2darr1,xerr=errsbarr1,yerr=errcv2darr1, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='k')
ax[0,3].errorbar(np.array(sbarr2),np.array(deltarr2),xerr=errsbarr2,yerr=errdeltarr2, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='r')
ax[1,3].errorbar(np.array(sbarr2),CV2darr2,xerr=errsbarr2,yerr=errcv2darr2, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='r')
ax[0,3].errorbar(np.array(sbarr3),np.array(deltarr3),xerr=errsbarr3,yerr=errdeltarr3, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='g')
ax[1,3].errorbar(np.array(sbarr3),CV2darr3,xerr=errsbarr3,yerr=errcv2darr3, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='g')
ax[0,3].plot(np.array(sbar)/mean_size, np.array(delttim)/mean_size, lw=2,c='g',label="$\lambda=0.5$")
ax[1,3].plot(np.array(sbar)/mean_size, cv2tim, lw=2,c='g')
ax[0,3].plot(np.array(sbar)/mean_size, np.array(deltad)/mean_size, lw=2,c='k',label="$\lambda=1$")
ax[1,3].plot(np.array(sbar)/mean_size, cv2ad, lw=2,c='k')
ax[0,3].plot(np.array(sbar)/mean_size,
|
np.array(deltsz)
|
numpy.array
|
from netCDF4 import Dataset
import numpy as np
from math import sin, cos, pi, radians
#-------------------------------------------------------------
def velocities_strains_stress_divergences(x, y):
A = 2.56#np.random.uniform(2,4)
B = 2.56#np.random.uniform(2,4)
C = 2.56#np.random.uniform(2,4)
D = 2.56#np.random.uniform(2,4)
Lx = 1.0
Ly = 1.0
u = sin((2.0 * pi * x * A) / Lx) * sin((2.0 * pi * y * B) / Ly)
v = sin((2.0 * pi * x * C) / Lx) * sin((2.0 * pi * y * D) / Ly)
dudx = ((2.0 * pi * A) / Lx) * cos((2.0 * pi * x * A) / Lx) * sin((2.0 * pi * y * B) / Ly)
dudy = ((2.0 * pi * B) / Ly) * sin((2.0 * pi * x * A) / Lx) * cos((2.0 * pi * y * B) / Ly)
dvdx = ((2.0 * pi * C) / Lx) * cos((2.0 * pi * x * C) / Lx) * sin((2.0 * pi * y * D) / Ly)
dvdy = ((2.0 * pi * D) / Ly) * sin((2.0 * pi * x * C) / Lx) * cos((2.0 * pi * y * D) / Ly)
d2udx2 = -((2.0 * pi * A) / Lx)*((2.0 * pi * A) / Lx) * sin((2.0 * pi * x * A) / Lx) * sin((2.0 * pi * y * B) / Ly)
d2udy2 = -((2.0 * pi * B) / Ly)*((2.0 * pi * B) / Ly) * sin((2.0 * pi * x * A) / Lx) * sin((2.0 * pi * y * B) / Ly)
d2udxdy = ((2.0 * pi * A) / Lx)*((2.0 * pi * B) / Ly) * cos((2.0 * pi * x * A) / Lx) * cos((2.0 * pi * y * B) / Ly)
d2vdx2 = -((2.0 * pi * C) / Lx)*((2.0 * pi * C) / Lx) * sin((2.0 * pi * x * C) / Lx) * sin((2.0 * pi * y * D) / Ly)
d2vdy2 = -((2.0 * pi * D) / Ly)*((2.0 * pi * D) / Ly) * sin((2.0 * pi * x * C) / Lx) * sin((2.0 * pi * y * D) / Ly)
d2vdxdy = ((2.0 * pi * C) / Lx)*((2.0 * pi * D) / Ly) * cos((2.0 * pi * x * C) / Lx) * cos((2.0 * pi * y * D) / Ly)
e11 = dudx
e22 = dvdy
e12 = 0.5 * (dudy + dvdx)
de11dx = d2udx2
de12dy = 0.5 * (d2udy2 + d2vdxdy)
de12dx = 0.5 * (d2udxdy + d2vdx2)
de22dy = d2vdy2
divu = de11dx + de12dy
divv = de12dx + de22dy
return u, v, e11, e22, e12, divu, divv
#-------------------------------------------------------------
def create_ic(gridfile, icfile):
# load grid file
grid = Dataset(gridfile, "r")
nCells = len(grid.dimensions["nCells"])
nVertices = len(grid.dimensions["nVertices"])
maxEdges = len(grid.dimensions["maxEdges"])
vertexDegree = len(grid.dimensions["vertexDegree"])
nEdgesOnCell = grid.variables["nEdgesOnCell"][:]
verticesOnCell = grid.variables["verticesOnCell"][:]
verticesOnCell[:] = verticesOnCell[:] - 1
cellsOnVertex = grid.variables["cellsOnVertex"][:]
cellsOnVertex[:] = cellsOnVertex[:] - 1
xCell = grid.variables["xCell"][:]
yCell = grid.variables["yCell"][:]
xVertex = grid.variables["xVertex"][:]
yVertex = grid.variables["yVertex"][:]
grid.close()
xMin = np.amin(xVertex)
xMax = np.amax(xVertex)
yMin = np.amin(yVertex)
yMax = np.amax(yVertex)
# calculate output variables
uVelocity = np.empty(nVertices)
vVelocity = np.empty(nVertices)
stressDivergenceU = np.empty(nVertices)
stressDivergenceV =
|
np.empty(nVertices)
|
numpy.empty
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 3 14:33:25 2016
@author: hjalmar
"""
from ht_helper import get_input, FrameStepper, CountdownPrinter
import matplotlib.pyplot as plt
from datetime import datetime
from scipy.misc import imresize
from time import sleep
from glob import glob
import numpy as np
import warnings
import tensorflow as tf
warnings.simplefilter("ignore")
class LabelFrames:
"""
"""
def __init__(self, video_fname, log_fname=None):
"""
"""
# Supress the plt.ginput warning.
warnings.warn("deprecated", DeprecationWarning)
plt.ion()
self.start_t = datetime.now()
self.video_fname = video_fname
if log_fname is None:
self.log_fname = ('%s.txt' %
video_fname.split('/')[-1].split('.')[-2])
else:
self.log_fname = log_fname
# Open output log file.
self.f = open(self.log_fname, 'w')
self.figsize = [8.125, 6.125]
self.fst = FrameStepper(self.video_fname)
self.available_nf = self.fst.tot_n
self.frame = self.fst.frame
self.frame_num = self.fst.n
self.frame_time = self.fst.t
self.fig = plt.figure(figsize=self.figsize)
self.ax = self.fig.add_axes([0.01, 0.01, 0.97, 0.97])
self.num_labelled = 0
self.imwdt = self.frame.shape[1]
self.imhgt = self.frame.shape[0]
self.head_data = {'center_x': 0.0,
'center_y': 0.0,
'angle': 0.0,
'angle_visible': 0,
'box_width': 0.0,
'box_height': 0.0,
'forehead_pos_x': 0.0,
'forehead_pos_y': 0.0,
'tuft_pos_left_x': 0.0,
'tuft_pos_left_y': 0.0,
'tuft_pos_right_x': 0.0,
'tuft_pos_right_y': 0.0}
self._head_len_wdt_ratio = 1.0 # head a bit wider than long
self._head_backwrd_shift = 0.6
self._head_scaling = 3 # multiplied with inter_tuft_distance to get
# width of head including tufts
# Write the header to the log file.
self._write_log(write_header=True)
def run_spacedbatch(self, t0=0.2, batch_size=1000):
"""
"""
self.batch_size = batch_size
n_skip = self.fst.tot_n // batch_size
frame_num = int(t0 / self.fst.dt)
while frame_num <= self.fst.tot_n:
self.fst.read_frame(frame_num)
self.frame = self.fst.frame
self.frame_num = self.fst.n
self.frame_time = self.fst.t
self._annote(1, batch_type='spaced')
if self.head_position_ok:
self._write_log(write_header=False)
self.num_labelled += 1
frame_num =
|
np.random.randint(3, 2*n_skip-1)
|
numpy.random.randint
|
"""Functions to generate QQ-plots as well as Manhattan-plots from FaST-LMM.
Source: FaST-LMM Python package from Microsoft Corporation
Minor modifications
- adapted Python syntax to 3.7 (xrange to range and print statements)
- set universal usage of matplotlib.pylab to plt
- other minor modifications highlighted by comments in code (find by: "<NAME>")
"""
import matplotlib.pylab as plt
import numpy as np
import scipy as sp
import scipy.stats as st
# Source: line 68 fastlmm fastlmm.util.stats.plotp
def _qqplot_bar(M=1000000, alphalevel = 0.05,distr = 'log10'):
'''
calculate error bars for a QQ-plot
--------------------------------------------------------------------
Input:
------------- ----------------------------------------------------
M number of points to compute error bars
alphalevel significance level for the error bars (default 0.05)
distr space in which the error bars are implemented
Note only log10 is implemented (default 'log10')
--------------------------------------------------------------------
Returns:
------------- ----------------------------------------------------
betaUp upper error bars
betaDown lower error bars
theoreticalPvals theoretical P-values under uniform
--------------------------------------------------------------------
'''
#assumes 'log10'
mRange=10**(np.arange(np.emath.log10(0.5),np.emath.log10(M-0.5)+0.1,0.1));#should be exp or 10**?
numPts=len(mRange);
betaalphaLevel=np.zeros(numPts);#down in the plot
betaOneMinusalphaLevel=np.zeros(numPts);#up in the plot
betaInvHalf=np.zeros(numPts);
for n in range(numPts):
m=mRange[n]; #numplessThanThresh=m;
betaInvHalf[n]=st.beta.ppf(0.5,m,M-m);
betaalphaLevel[n]=st.beta.ppf(alphalevel,m,M-m);
betaOneMinusalphaLevel[n]=st.beta.ppf(1-alphalevel,m,M-m);
pass
betaDown=betaInvHalf-betaalphaLevel;
betaUp=betaOneMinusalphaLevel-betaInvHalf;
theoreticalPvals=mRange/M;
return betaUp, betaDown, theoreticalPvals
# Source: line 593 fastlmm fastlmm.util.stats.plotp
def addqqplotinfo(qnull,M,xl='-log10(P) observed',yl='-log10(P) expected',xlim=None,ylim=None,alphalevel=0.05,legendlist=None,fixaxes=False):
distr='log10'
plt.plot([0, qnull.max()], [0, qnull.max()], 'k')
plt.ylabel(xl)
plt.xlabel(yl)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
if alphalevel is not None:
if distr == 'log10':
betaUp, betaDown, theoreticalPvals = _qqplot_bar(M=M,alphalevel=alphalevel,distr=distr)
lower = -np.emath.log10(theoreticalPvals-betaDown)
upper = -np.emath.log10(theoreticalPvals+betaUp)
plt.fill_between(-np.emath.log10(theoreticalPvals), lower, upper, color="grey", alpha=0.5)
#plt.plot(-np.emath.log10(theoreticalPvals),lower,'g-.')
#plt.plot(-np.emath.log10(theoreticalPvals),upper,'g-.')
if legendlist is not None:
leg = plt.legend(legendlist, loc=4, numpoints=1)
# set the markersize for the legend
for lo in leg.legendHandles:
lo.set_markersize(10)
if fixaxes:
fix_axes()
# Source: line 620 fastlmm fastlmm.util.stats.plotp
def qqplot(pvals, fileout=None, alphalevel=0.05, legend=None, xlim=None, ylim=None, fixaxes=True, addlambda=True,
minpval=1e-20, title=None, h1=None, figsize=[5, 5], grid=True):
'''
performs a P-value QQ-plot in -log10(P-value) space
-----------------------------------------------------------------------
Args:
pvals P-values, for multiple methods this should be a list (each element will be flattened)
fileout if specified, the plot will be saved to the file (optional)
alphalevel significance level for the error bars (default 0.05)
if None: no error bars are plotted
legend legend string. For multiple methods this should be a list
xlim X-axis limits for the QQ-plot (unit: -log10)
ylim Y-axis limits for the QQ-plot (unit: -log10)
fixaxes Makes xlim=0, and ylim=max of the two ylimits, so that plot is square
addlambda Compute and add genomic control to the plot, bool
title plot title, string (default: empty)
h1 figure handle (default None)
figsize size of the figure. (default: [5,5])
grid boolean: use a grid? (default: True)
Returns: fighandle, qnull, qemp
-----------------------------------------------------------------------
'''
distr = 'log10'
if type(pvals) == list:
pvallist = pvals
else:
pvallist = [pvals]
if type(legend) == list:
legendlist = legend
else:
legendlist = [legend]
if h1 is None:
h1 = plt.figure(figsize=figsize)
plt.grid(b=grid, alpha=0.5)
maxval = 0
for i in range(len(pvallist)):
pval = pvallist[i].flatten()
M = pval.shape[0]
pnull = (0.5 + np.arange(M)) / M
# pnull = np.sort(np.random.uniform(size = tests))
pval[pval < minpval] = minpval
pval[pval >= 1] = 1
if distr == 'chi2':
qnull = st.chi2.isf(pnull, 1)
qemp = (st.chi2.isf(np.sort(pval), 1))
xl = 'LOD scores'
yl = '$\chi^2$ quantiles'
if distr == 'log10':
qnull = -np.emath.log10(pnull)
qemp = -np.emath.log10(np.sort(pval)) # sorts the object, returns nothing
xl = '-log10(P) observed'
yl = '-log10(P) expected'
if not (sp.isreal(qemp)).all(): raise Exception("imaginary qemp found")
# <NAME>: changed qnull.max to qnull.max(), otherwise a function is returned with numpy versions,
# no value, which is required here
if qnull.max() > maxval:
maxval = qnull.max()
plt.plot(qnull, qemp, '.', markersize=2)
# plt.plot([0,qemp.max()], [0,qemp.max()],'r')
if addlambda:
lambda_gc = estimate_lambda(pval)
print("lambda=%1.4f" % lambda_gc)
# plt.legend(["gc="+ '%1.3f' % lambda_gc],loc=2)
# if there's only one method, just print the lambda
if len(pvallist) == 1:
legendlist = ["$\lambda_{GC}=$%1.4f" % lambda_gc]
# otherwise add it at the end of the name
else:
legendlist[i] = legendlist[i] + " ($\lambda_{GC}=$%1.4f)" % lambda_gc
addqqplotinfo(qnull, M, xl, yl, xlim, ylim, alphalevel, legendlist, fixaxes)
if title is not None:
plt.title(title)
if fileout is not None:
plt.savefig(fileout)
return h1, qnull, qemp,
# Source: line 705 fastlmm fastlmm.util.stats.plotp
def fix_axes(buffer=0.1):
'''
Makes x and y max the same, and the lower limits 0.
'''
maxlim=max(plt.xlim()[1], plt.ylim()[1])
plt.xlim([0 - buffer, maxlim + buffer])
plt.ylim([0 - buffer, maxlim + buffer])
# Source: line 713 fastlmm fastlmm.util.stats.plotp
def estimate_lambda(pv):
'''
estimate the lambda for a given array of P-values
------------------------------------------------------------------
pv numpy array containing the P-values
------------------------------------------------------------------
L lambda value
------------------------------------------------------------------
'''
LOD2 = np.median(st.chi2.isf(pv, 1))
L = (LOD2/st.chi2(1).median())
return L
# Source: line 470 fastlmm.util.util
def manhattan_plot(chr_pos_pvalue_array, pvalue_line=None, plot_threshold=1.0, vline_significant=False, marker="o",
chromosome_starts=None, xaxis_unit_bp=True, alpha=0.5):
"""
Function to create a Manhattan plot. See http://en.wikipedia.org/wiki/Manhattan_plot.
Args:
chr_pos_pvalue_array: an n x 3 numpy array. The three columns are the chrom number
(as a number), the position, and pvalue.
:type chr_pos_pvalue_array: numpy array
pvalue_line: (Default: None). If given, draws a line at that PValue.
:type pvalue_line: a 'pheno dictionary' or a string
plot_threshold: plot only SNPs that achieve a P-value smaller than pvalue_threshold
to speed up plotting
vline_significant: boolean. Draw a vertical line at each significant Pvalue?
:rtype: none, but changes the global current figure.
marker: marker for the scatter plot. default: "o"
chromosome_starts: [Nchrom x 3] ndarray: chromosome, cumulative start position, cumulative stop position
cumulative chromosome starts, for plotting. If None (default), this is estimated from data
xaxis_unit_bp: plot cumulative position in basepair units on x axis? If False, only
use rank of SNP positions. (default: True)
alpha: alpha (opaquness) for P-value markers in scatterplot (default 0.5)
Returns:
chromosome_starts [Nchrom x 3] ndarray: chromosome, cumulative start position, cumulative stop position
cumulative chromosome starts used in plotting.
:Example:
"""
import matplotlib
matplotlib.use('Agg', warn=False) # This lets it work even on machines without graphics displays
# create a copy of the data and sort it by chrom and then position
# <NAME>: Cast array to float (if not already numeric beforehand)
try:
array = np.array(chr_pos_pvalue_array).astype(float)
except TypeError:
print('Chromosome identifier need to be numeric for plotting of Manhattan plot.')
if plot_threshold:
array = array[array[:, 2] <= plot_threshold]
else:
plot_threshold = 1.0
array = array[
|
np.argsort(array[:, 1])
|
numpy.argsort
|
import math
import os
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
import librosa
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# import pandas as pd
import pytorch_lightning as pl
import segmentation_models_pytorch as smp
import torch
import torch.nn.functional as F
import torchvision
import yaml
from omegaconf import DictConfig, OmegaConf
from src.dataset.datamodule import IMG_MEAN, IMG_STD, get_input_size_wo_pad, show_stft
from src.dataset.dataset import WaveformDataset
from src.postprocess.visualize import plot_rec
class LitModel(pl.LightningModule):
def __init__(
self, conf: DictConfig, dataset_len: int = 72899, logger_name="tensorboard"
) -> None:
super().__init__()
self.save_hyperparameters() # type: ignore
# self.hparams = conf # type: ignore[misc]
self.conf = conf
self.dataset_len = dataset_len
self.logger_name = logger_name
print("\t >>do segmentation")
self.num_inchannels = len(self.conf.stft_targets) * 3
self.classes_num = self.num_inchannels
smp_params = OmegaConf.to_container(conf.model.smp_params)
smp_params["classes"] = self.classes_num
smp_arch = smp_params.pop("arch_name")
if smp_arch == "unet":
smp_func = smp.Unet
elif smp_arch == "unetpp":
smp_func = smp.UnetPlusPlus
elif smp_arch == "manet":
smp_func = smp.MAnet
elif smp_arch == "deeplabv3":
smp_func = smp.DeepLabV3
elif smp_arch == "deeplabv3p":
smp_func = smp.DeepLabV3Plus
self.model = smp_func(**smp_params)
# self.model = nn.Sequential(smp_func(**smp_params),)
if self.num_inchannels != 3:
patch_first_conv(self.model, in_channels=self.num_inchannels)
if self.conf.model.channels_last:
# Need to be done once, after model initialization (or load)
self.model = self.model.to(memory_format=torch.channels_last)
if self.conf.model.loss == "mse":
self.criterion = torch.nn.MSELoss(reduction="none")
else:
raise NotImplementedError
self.loss_ch = (
len(self.conf.stft_targets) * 1
if self.conf.train_energy_only
else len(self.conf.stft_targets) * 3
)
if self.conf.model.metrics == "mse":
self.metrics = pl.metrics.MeanSquaredError()
else:
raise NotImplementedError
if self.conf.model.last_act == "sigmoid":
self.activation = torch.nn.Sigmoid()
elif self.conf.model.last_act == "tanh":
self.activation = torch.nn.Tanh()
elif self.conf.model.last_act == "identity":
self.activation = torch.nn.Identity()
else:
raise NotImplementedError
self.val_sync_dist = self.conf.trainer.gpus > 1
self.is_debug = self.conf.is_debug
self.h_, self.w_ = get_input_size_wo_pad(
n_fft=self.conf.stft_params.n_fft, input_width=self.conf.input_width
)
def on_fit_start(self):
self._set_image_normalization()
def on_test_start(self):
self._set_image_normalization()
def forward(self, x):
x = self.model(x)
return x
def _remove_pad(
self, inputs: torch.Tensor, pred: torch.Tensor, targets: torch.Tensor
) -> Tuple[torch.Tensor, ...]:
return (
inputs[:, :, : self.h_, : self.w_],
pred[:, :, : self.h_, : self.w_],
targets[:, :, : self.h_, : self.w_],
)
def training_step(self, batch, batch_idx):
inputs = batch["image"]
if self.conf.model.channels_last:
# Need to be done for every input
inputs = inputs.to(memory_format=torch.channels_last)
targets = batch["target_image"]
outputs = self.model(inputs)
pred = self.activation(outputs)
# if self.conf.model.last_act == "tanh":
# pred = pred * 2.0
inputs, pred, targets = self._remove_pad(
inputs=inputs, pred=pred, targets=targets
)
if self.conf.gt_as_mask:
loss = self.criterion(
pred, targets - (inputs * self._img_std + self._img_mean)
)[:, : self.loss_ch].mean()
else:
loss = self.criterion(pred, targets - inputs)[:, : self.loss_ch].mean()
if self.logger_name == "tensorboard":
self.log("train_loss", loss)
elif self.logger_name == "neptune":
self.logger.experiment["loss/train"].log(loss)
return loss
def validation_step(self, batch, batch_idx):
inputs = batch["image"]
if self.conf.model.channels_last:
# Need to be done for every input
inputs = inputs.to(memory_format=torch.channels_last)
targets = batch["target_image"]
outputs = self.model(inputs)
pred = self.activation(outputs)
# if self.conf.model.last_act == "tanh":
# pred = pred * 2.0
inputs, pred, targets = self._remove_pad(
inputs=inputs, pred=pred, targets=targets
)
if self.conf.gt_as_mask:
loss = self.criterion(
pred, targets - (inputs * self._img_std + self._img_mean)
)[:, : self.loss_ch].mean()
pred = pred + (inputs * self._img_std + self._img_mean)
else:
loss = self.criterion(pred, targets - inputs)[:, : self.loss_ch].mean()
pred = pred + inputs
# only for checkpoint call back
self.log("val_loss", loss)
sequence_results = self.convert_img_pred_to_sequence(pred=pred, batch=batch)
abs_error = sequence_results.pop("abs_error")
metrics = np.mean(abs_error)
if batch_idx in [0, 2]:
pred = torch.clamp(pred, 0.0, 1.0)
epoch = (
self.trainer.global_step * self.conf.batch_size
) // self.dataset_len
num_ = 3
ba_ind = 0
imgs = {
"inputs": inputs[:num_].cpu().numpy().transpose((0, 2, 3, 1)),
"pred": pred[:num_].detach().cpu().numpy().transpose((0, 2, 3, 1)),
"targets": targets[:num_]
.detach()
.cpu()
.numpy()
.transpose((0, 2, 3, 1)),
}
# === PLOT ===
nrows = 3
ncols = 3
ch_ = 0
fig, axes = plt.subplots(
nrows=nrows, ncols=ncols, figsize=(12, 6), sharey=True, sharex=True,
)
fig.suptitle(
"_".join(
[
str(batch["phone"][ba_ind]),
str(batch["millisSinceGpsEpoch"][ba_ind].cpu().numpy()),
str(batch["phone_time"][ba_ind].cpu().numpy()),
"epoch",
str(epoch),
]
)
)
D_mats = {}
for j, (key, img) in enumerate(imgs.items()):
gt_as_mask = (key in ["pred", "targets"]) and self.conf.gt_as_mask
abs_, cos_, sin_ = WaveformDataset.handle_stft_normalize(
img=img.copy(),
cnum=len(self.conf.stft_targets),
is_encode=False,
img_std=self._img_std.cpu().numpy().transpose((0, 2, 3, 1)),
img_mean=self._img_mean.cpu().numpy().transpose((0, 2, 3, 1)),
gt_as_mask=gt_as_mask,
)
show_stft(
conf=self.conf,
D_abs=abs_[ba_ind][..., ch_],
D_cos=cos_[ba_ind][..., ch_],
D_sin=sin_[ba_ind][..., ch_],
ax=axes,
stft_ind=j,
stft_name=key,
)
D_mats[key] = {
"D_abs": abs_[ba_ind][..., ch_],
"D_theta": np.arctan2(
sin_[ba_ind][..., ch_], cos_[ba_ind][..., ch_]
),
}
if self.logger_name == "tensorboard":
self.logger.experiment.add_figure(
"prediction_fig", fig, global_step=self.trainer.global_step,
)
elif self.logger_name == "neptune":
self.logger.experiment[f"val/pred_{batch_idx}_{ba_ind}"].log(fig)
plt.close()
x_gts = batch[self.conf.stft_targets[0].replace("_diff", "_gt_diff")]
x = batch[self.conf.stft_targets[0]]
plot_rec(
x=x[ba_ind].cpu().numpy(),
x_gt=x_gts[ba_ind].cpu().numpy(),
D_abs=D_mats["pred"]["D_abs"],
D_theta=D_mats["pred"]["D_theta"],
D_abs_gt=D_mats["targets"]["D_abs"],
D_theta_gt=D_mats["targets"]["D_theta"],
length=x_gts[ba_ind].shape[0],
is_db=self.conf.stft_params.is_db,
hop_length=self.conf.stft_params.hop_length,
win_length=self.conf.stft_params.win_length,
logger=self.logger,
logger_name=self.logger_name,
global_step=self.trainer.global_step,
log_name=f"val/pred_{batch_idx}_{ba_ind}_line",
target_name=self.conf.stft_targets[0],
)
return {"loss": loss, "metrics": metrics, "sequence_results": sequence_results}
def validation_epoch_end(self, validation_step_outputs):
keys = list(validation_step_outputs[0].keys())
met_dict = {key: [] for key in keys}
for pred in validation_step_outputs:
for key in keys:
met_dict[key].append(pred[key])
sequence_results = {key: [] for key in met_dict["sequence_results"][0].keys()}
for key in keys:
if key == "sequence_results":
for seq_res in met_dict[key]:
for seq_key, values in seq_res.items():
if not isinstance(values, np.ndarray):
values = np.array(values)
sequence_results[seq_key].append(values)
elif isinstance(met_dict[key][0], torch.Tensor):
met_dict[key] = torch.mean(torch.stack(met_dict[key])).cpu().numpy()
else:
met_dict[key] = np.mean(
|
np.stack(met_dict[key])
|
numpy.stack
|
#!/usr/bin/env python
"""
faps -- Frontend for Automated Adsorption Analysis of Porous Solids.
aka
shpes -- Sorption analysis with a High throughput Python
frontend to Examine binding in Structures
Strucutre adsorption property analysis for high throughput processing. Run
as a script, faps will automatically run complete analysis on a structure.
Sensible defaults are implemented, but calculations can be easily customised.
Faps also provides classes and methods for adapting the simulation or only
doing select parts.
"""
# Turn on keyword expansion to get revision numbers in version strings
# in .hg/hgrc put
# [extensions]
# keyword =
#
# [keyword]
# faps.py =
#
# [keywordmaps]
# Revision = {rev}
try:
__version_info__ = (1, 5, 0, int("$Revision$".strip("$Revision: ")))
except ValueError:
__version_info__ = (1, 5, 0, 0)
__version__ = "%i.%i.%i.%i" % __version_info__
import bz2
import code
try:
import configparser
except ImportError:
import ConfigParser as configparser
import gzip
import mmap
import os
import pickle
import re
import shlex
import shutil
import subprocess
import sys
import tarfile
import textwrap
import time
from copy import copy
from glob import glob
from itertools import count
from logging import warning, debug, error, info, critical
from math import ceil, log
from os import path
import numpy as np
from numpy import pi, cos, sin, sqrt, arccos, arctan2
from numpy import array, identity, dot, cross
from numpy.linalg import norm
from binding_sites.absl import calculate_binding_sites
from config import Options
from elements import WEIGHT, ATOMIC_NUMBER, UFF, VASP_PSEUDO_PREF
from elements import CCDC_BOND_ORDERS, GULP_BOND_ORDERS, OB_BOND_ORDERS, METALS
from elements import COVALENT_RADII, UFF_FULL, QEQ_PARAMS
from eos import peng_robinson
from job_handler import JobHandler
from logo import LOGO
# Global constants
DEG2RAD = pi / 180.0
BOHR2ANG = 0.52917720859
EV2KCAL = 23.060542301389
NAVOGADRO = 6.02214129E23
INFINITY = float('inf')
KCAL_TO_KJ = 4.1868 # Steam tables from openbabel
FASTMC_DEFAULT_GRID_SPACING = 0.1
# ID values for system state
NOT_RUN = 0
RUNNING = 1
FINISHED = 2
UPDATED = 3
SKIPPED = -1
NOT_SUBMITTED = -2
# Possible folder names; need these so that similar_ones_with_underscores are
# not globbed
FOLDER_SUFFIXES = ['gulp', 'gulp_opt', 'gulp_fit', 'siesta', 'vasp', 'egulp',
'repeat', 'fastmc', 'properties', 'absl', 'gromacs']
class PyNiss(object):
"""
PyNiss -- Negotiation of Intermediate System States
A single property calculation for one structure. Instance with a set of
options, then run the job_dispatcher() to begin the calculation. The
calculation will pickle itself, or can be pickled at any time, by calling
dump_state().
"""
def __init__(self, options):
"""
Instance an empty structure in the calculation; The dispatcher should
be called to fill it up with data, as needed.
"""
self.options = options
self.structure = Structure(options.get('job_name'))
self.state = {'init': (NOT_RUN, False),
'ff_opt': (NOT_RUN, False),
'dft': (NOT_RUN, False),
'esp': (NOT_RUN, False),
'charges': (NOT_RUN, False),
'properties': (NOT_RUN, False),
'absl': {},
'gcmc': {}}
self.job_handler = JobHandler(options)
def dump_state(self):
"""Write the .niss file holding the current system state."""
job_name = self.options.get('job_name')
info("Writing state file, %s.niss." % job_name)
os.chdir(self.options.get('job_dir'))
# Don't save the job handler in case it changes
save_handler = self.job_handler
self.job_handler = None
my_niss = open(job_name + ".niss", "wb")
pickle.dump(self, my_niss)
my_niss.close()
# put the job handler back and continue
self.job_handler = save_handler
def re_init(self, new_options):
"""Re initialize simulation (with updated options)."""
if new_options.getbool('update_opts'):
info("Using new options.")
self.options = new_options
self.structure.name = new_options.get('job_name')
else:
# Just update command line stuff
info("Using old options with new command line arguments.")
self.options.args = new_options.args
self.options.options = new_options.options
self.options.cmdopts = new_options.cmdopts
self.structure.name = new_options.get('job_name')
self.status(initial=True)
def job_dispatcher(self):
"""
Run parts explicity specified on the command line or do the next step
in an automated run. Drop to interactive mode, if requested.
"""
# In case options have changed, re-intitialize
self.job_handler = JobHandler(self.options)
if 'status' in self.options.args:
self.status(initial=True)
if self.options.getbool('interactive'):
code_locals = locals()
code_locals.update(globals())
console = code.InteractiveConsole(code_locals)
console.push('import rlcompleter, readline')
console.push('readline.parse_and_bind("tab: complete")')
banner = ("((-----------------------------------------------))\n"
"(( Interactive faps mode ))\n"
"(( ===================== ))\n"
"(( ))\n"
"(( WARNING: mode is designed for devs and ))\n"
"(( experts only! ))\n"
"(( Current simulation is accessed as 'self' and ))\n"
"(( associated methods. Type 'dir()' to see the ))\n"
"(( methods in the local namespace and 'help(x)' ))\n"
"(( for help on any object. ))\n"
"(( Use 'self.dump_state()' to save any changes. ))\n"
"((-----------------------------------------------))")
console.interact(banner=banner)
if self.options.getbool('import'):
info("Importing results from a previous simulation")
self.import_old()
self.dump_state()
if self.state['init'][0] == NOT_RUN:
info("Reading in structure")
# No structure, should get one
self.structure.from_file(
self.options.get('job_name'),
self.options.get('initial_structure_format'),
self.options)
if self.options.getbool('order_atom_types'):
info("Forcing atom re-ordering by types")
self.structure.order_by_types()
self.state['init'] = (UPDATED, False)
self.dump_state()
self.step_force_field()
self.step_dft()
self.step_charges()
if self.options.getbool('qeq_fit'):
if not 'qeq_fit' in self.state and self.state['charges'][0] == UPDATED:
info("QEq parameter fit requested")
self.run_qeq_gulp(fitting=True)
self.dump_state()
self.step_properties()
self.step_gcmc()
self.step_absl()
self.send_to_database()
self.post_summary()
def status(self, initial=False):
"""Print the current status to the terminal."""
valid_states = {NOT_RUN: 'Not run',
RUNNING: 'Running',
FINISHED: 'Finished',
UPDATED: 'Processed',
SKIPPED: 'Skipped',
NOT_SUBMITTED: 'Not submitted'}
if initial:
info("Previous system state reported from .niss file "
"(running jobs may have already finished):")
else:
info("Current system status:")
for step, state in self.state.items():
if step == 'gcmc':
if not state:
info(" * State of GCMC: Not run")
else:
for point, job in state.items():
if job[0] is RUNNING:
info(" * GCMC %s: Running, jobid: %s" %
(point, job[1]))
else:
info(" * GCMC %s: %s" %
(point, valid_states[job[0]]))
elif step == 'absl':
if not state:
info(" * State of ABSL: Not run")
else:
# ABSL used to be multiple jobs, still treat jobid as list
for point, jobs in state.items():
if jobs[0] is RUNNING:
info(" * ABSL %s: Running, jobids: %s" %
(point, ",".join('%s' % x for x in jobs[1])))
else:
info(" * ABSL %s: %s" %
(point, valid_states[jobs[0]]))
elif state[0] is RUNNING:
info(" * State of %s: Running, jobid: %s" % (step, state[1]))
else:
info(" * State of %s: %s" % (step, valid_states[state[0]]))
def send_to_database(self):
"""If using a database, store the results"""
# we can skip if not using a database
if not 'sql' in self.options.get('initial_structure_format'):
return
# extract the database and structure names
db_params = self.options.get('job_name').split('.')
# import this here so sqlalchemy is not required generally
from backend.sql import AlchemyBackend
database = AlchemyBackend(db_params[0])
info("Storing results in database")
database.store_results(db_params[1], int(db_params[2]), self.structure)
debug("Database finished")
def post_summary(self):
"""Summarise any results for GCMC, properties..."""
# Also includes excess calculation if void volume calculated
# N = pV/RT
all_csvs = {}
R_GAS = 8.3144621E25 / NAVOGADRO # A^3 bar K-1 molecule
job_name = self.options.get('job_name')
info("Summary of GCMC results")
info("======= ======= ======= ======= =======")
nguests = len(self.structure.guests)
for idx, guest in enumerate(self.structure.guests):
# Determine whether we can calculate the excess for
# any different probes
void_volume = self.structure.sub_property('void_volume')
he_excess, guest_excess = "", ""
if 1.0 in void_volume:
he_excess = 'He-xs-molc/uc,He-xs-mmol/g,He-xs-v/v,He-xs-wt%,'
if hasattr(guest, 'probe_radius'):
if guest.probe_radius != 1.0 and guest.probe_radius in void_volume:
guest_excess = 'xs-molc/uc,xs-mmol/g,xs-v/v,xs-wt%,'
if hasattr(guest, 'c_v') and guest.c_v:
#TODO(tdaff): Make standard in 2.0
# makes sure that c_v is there and not empty
cv_header = "C_v,stdev,"
else:
cv_header = ""
if hasattr(guest, 'fugacities') and guest.fugacities:
fuga_header = "f/bar,"
else:
fuga_header = ""
# Generate headers separately
csv = ["#T/K,p/bar,molc/uc,mmol/g,stdev,",
"v/v,stdev,wt%,stdev,hoa/kcal/mol,stdev,",
guest_excess, he_excess, cv_header, fuga_header,
",".join("p(g%i)" % gidx for gidx in range(nguests)), "\n"]
info(guest.name)
info("---------------------------------------")
info("molc/uc mmol/g vstp/v hoa T_P")
info("======= ======= ======= ======= =======")
for tp_point in sorted(guest.uptake):
# <N>, sd, supercell
uptake = guest.uptake[tp_point]
uptake = [uptake[0]/uptake[2], uptake[1]/uptake[2]]
hoa = guest.hoa[tp_point]
# uptake in mmol/g
muptake = 1000*uptake[0]/self.structure.weight
muptake_stdev = 1000*uptake[1]/self.structure.weight
# volumetric uptake
vuptake = (guest.molar_volume*uptake[0]/
(6.023E-4*self.structure.volume))
vuptake_stdev = (guest.molar_volume*uptake[1]/
(6.023E-4*self.structure.volume))
# weight percent uptake
wtpc = 100*(1 - self.structure.weight/
(self.structure.weight + uptake[0]*guest.weight))
wtpc_stdev = 100*(1 - self.structure.weight/
(self.structure.weight + uptake[1]*guest.weight))
info("%7.2f %7.2f %7.2f %7.2f %s" % (
uptake[0], muptake, vuptake, hoa[0],
("T=%s" % tp_point[0] +
''.join(['P=%s' % x for x in tp_point[1]]))))
csv.append("%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f," % (
tp_point[0], tp_point[1][idx], uptake[0],
muptake, muptake_stdev,
vuptake, vuptake_stdev,
wtpc, wtpc_stdev,
hoa[0], hoa[1]))
if guest_excess:
guest_void = void_volume[guest.probe_radius]
n_bulk = (tp_point[1][idx]*guest_void)/(tp_point[0]*R_GAS)
xs_uptake = uptake[0]-n_bulk
# uptake in mmol/g
muptake = 1000*xs_uptake/self.structure.weight
# volumetric uptake
vuptake = (guest.molar_volume*xs_uptake/
(6.023E-4*self.structure.volume))
# weight percent uptake
wtpc = 100*(1 - self.structure.weight/
(self.structure.weight + xs_uptake*guest.weight))
csv.append("%f,%f,%f,%f," % (
xs_uptake, muptake, vuptake, wtpc,))
if he_excess:
guest_void = void_volume[1.0]
n_bulk = (tp_point[1][idx]*guest_void)/(tp_point[0]*R_GAS)
xs_uptake = uptake[0]-n_bulk
# uptake in mmol/g
muptake = 1000*xs_uptake/self.structure.weight
# volumetric uptake
vuptake = (guest.molar_volume*xs_uptake/
(6.023E-4*self.structure.volume))
# weight percent uptake
wtpc = 100*(1 - self.structure.weight/
(self.structure.weight + xs_uptake*guest.weight))
csv.append("%f,%f,%f,%f," % (
xs_uptake, muptake, vuptake, wtpc,))
if cv_header:
csv.append("%f,%f," % (guest.c_v[tp_point]))
if fuga_header:
try:
csv.append("%f," % (guest.fugacities[tp_point]))
except KeyError:
# Assume it was done without fugacity correction
csv.append("%f," % tp_point[1][idx])
# list all the other guest pressures and start a new line
csv.append(",".join("%f" % x for x in tp_point[1]) + "\n")
csv_filename = '%s-%s.csv' % (job_name, guest.ident)
csv_file = open(csv_filename, 'w')
csv_file.writelines(csv)
csv_file.close()
all_csvs[csv_filename] = "".join(csv)
info("======= ======= ======= ======= =======")
info("Structure properties")
# Internally calculated surface area
surf_area_results = self.structure.surface_area()
if surf_area_results:
info("Summary of faps surface areas")
info("========= ========= ========= =========")
info(" radius/A total/A^2 m^2/cm^3 m^2/g")
info("========= ========= ========= =========")
for probe, area in surf_area_results.items():
vol_area = 1E4*area/self.structure.volume
specific_area = NAVOGADRO*area/(1E20*self.structure.weight)
info("%9.3f %9.2f %9.2f %9.2f" %
(probe, area, vol_area, specific_area))
info("========= ========= ========= =========")
# Messy, but check individual properties that might not be there
# and dump them to the screen
info("weight (u): %f" % self.structure.weight)
if hasattr(self.structure, 'pore_diameter'):
info("pores (A): %f %f %f" % self.structure.pore_diameter)
channel_results = self.structure.sub_property('dimensionality')
if channel_results:
for probe, channels in channel_results.items():
info(("channels %.2f probe: " % probe) +
" ".join("%i" % x for x in channels))
# The table is copied from above as it does some calculating
surf_area_results = self.structure.sub_property('zeo_surface_area')
if surf_area_results:
info("Summary of zeo++ surface areas")
info("========= ========= ========= =========")
info(" radius/A total/A^2 m^2/cm^3 m^2/g")
info("========= ========= ========= =========")
for probe, area in surf_area_results.items():
vol_area = 1E4*area/self.structure.volume
specific_area = NAVOGADRO*area/(1E20*self.structure.weight)
info("%9.3f %9.2f %9.2f %9.2f" %
(probe, area, vol_area, specific_area))
info("========= ========= ========= =========")
info("volume (A^3): %f" % self.structure.volume)
void_volume_results = self.structure.sub_property('void_volume')
if surf_area_results:
info("Summary of zeo++ void volumes")
info("========= ========= ========= =========")
info(" radius/A total/A^3 fraction cm^3/g")
info("========= ========= ========= =========")
for probe, void in void_volume_results.items():
void_fraction = void/self.structure.volume
specific_area = NAVOGADRO*void/(1E24*self.structure.weight)
info("%9.3f %9.2f %9.5f %9.4f" %
(probe, void, void_fraction, specific_area))
info("========= ========= ========= =========")
pxrd = self.structure.sub_property('pxrd')
if pxrd:
info("Summary of PXRD; see properties for cpi file")
for probe, pattern in pxrd.items():
info("%s Powder XRD:" % probe)
plot = [['|']*21]
# 1000 points makes this 75 columns wide
averaged = [sum(pattern[x:x+20])/20.0
for x in range(0, 1000, 20)]
# make peaks horizontal first
peak = max(averaged)
for point in averaged:
height = int(round(15*point/peak))
plot.append([' ']*(15-height) + ['|']*height + ['-'])
# transpose for printing
plot = zip(*plot)
for line in plot:
info(''.join(line))
# Email at the end so everything is in the .flog
self.email(all_csvs)
def email(self, csvs=None):
"""Send an email, if one has not already been sent."""
job_name = self.options.get('job_name')
email_addresses = self.options.gettuple('email')
if email_addresses:
info("Emailing results to %s" % ", ".join(email_addresses))
else:
# nobody to email to, why bother?
return
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Construct an email, thanks documentation!
sender = '<NAME> <<EMAIL>>'
outer = MIMEMultipart()
outer['Subject'] = 'Results for faps job on %s' % job_name
outer['To'] = ', '.join(email_addresses)
outer['From'] = sender
outer.preamble = 'This is a MIME multipart message\n'
# Just attach all the csv files
if csvs is not None:
for csv in csvs:
msg = MIMEText(csvs[csv], _subtype='csv')
msg.add_header('Content-Disposition', 'attachment',
filename=csv)
outer.attach(msg)
# Include a cif file
msg_cif = MIMEText("".join(self.structure.to_cif()))
msg_cif.add_header('Content-Disposition', 'attachment',
filename="%s.faps.cif" % job_name)
outer.attach(msg_cif)
# And the flog file
try:
flog = open("%s.flog" % job_name)
msg_flog = MIMEText(flog.read())
flog.close()
msg_flog.add_header('Content-Disposition', 'attachment',
filename="%s.flog" % job_name)
outer.attach(msg_flog)
except IOError:
# Error reading the file, don't care
pass
# Send via local SMTP server
s = smtplib.SMTP('localhost')
s.sendmail(sender, email_addresses, outer.as_string())
s.quit()
def step_force_field(self):
"""Check the force field step of the calculation."""
end_after = False
if 'ff_opt' not in self.state:
self.state['ff_opt'] = (NOT_RUN, False)
if self.state['ff_opt'][0] not in [UPDATED, SKIPPED]:
if self.options.getbool('no_force_field_opt'):
info("Skipping force field optimisation")
self.state['ff_opt'] = (SKIPPED, False)
elif self.state['ff_opt'][0] == RUNNING:
job_state = self.job_handler.jobcheck(self.state['ff_opt'][1])
if not job_state:
info("Queue reports force field optimisation has finished")
self.state['ff_opt'] = (FINISHED, False)
else:
# Still running
info("Force field optimisation still in progress")
end_after = True
if self.state['ff_opt'][0] == NOT_RUN or 'ff_opt' in self.options.args:
jobid = self.run_ff_opt()
sys_argv_strip('ff_opt')
end_after = self.postrun(jobid)
self.dump_state()
if self.state['ff_opt'][0] == FINISHED:
self.structure.update_pos(self.options.get('ff_opt_code'),
options=self.options)
self.state['ff_opt'] = (UPDATED, False)
self.dump_state()
# If postrun is submitted then this script is done!
if end_after:
terminate(0)
def step_dft(self):
"""Check the DFT step of the calculation."""
end_after = False
if self.state['dft'][0] not in [UPDATED, SKIPPED]:
if self.options.getbool('no_dft'):
info("Skipping DFT step completely")
info("Job might fail later if you need the ESP")
self.state['dft'] = (SKIPPED, False)
elif self.state['dft'][0] == RUNNING:
job_state = self.job_handler.jobcheck(self.state['dft'][1])
if not job_state:
info("Queue reports DFT step has finished")
self.state['dft'] = (FINISHED, False)
else:
# Still running
info("DFT still in progress")
end_after = True
if self.state['dft'][0] == NOT_RUN or 'dft' in self.options.args:
jobid = self.run_dft()
sys_argv_strip('dft')
end_after = self.postrun(jobid)
self.dump_state()
if self.state['dft'][0] == FINISHED:
self.structure.update_pos(self.options.get('dft_code'))
self.state['dft'] = (UPDATED, False)
self.dump_state()
# If postrun is submitted then this script is done!
if end_after:
terminate(0)
def step_charges(self):
"""Check the charge step of the calculation."""
end_after = False
if self.state['charges'][0] not in [UPDATED, SKIPPED]:
if self.options.getbool('no_charges'):
info("Skipping charge calculation")
self.state['charges'] = (SKIPPED, False)
elif self.state['charges'][0] == RUNNING:
job_state = self.job_handler.jobcheck(self.state['charges'][1])
if not job_state:
info("Queue reports charge calculation has finished")
self.state['charges'] = (FINISHED, False)
else:
info("Charge calculation still running")
end_after = True
if self.state['charges'][0] == NOT_RUN or 'charges' in self.options.args:
jobid = self.run_charges()
sys_argv_strip('charges')
end_after = self.postrun(jobid)
self.dump_state()
if self.state['charges'][0] == FINISHED:
self.structure.update_charges(self.options.get('charge_method'),
self.options)
self.state['charges'] = (UPDATED, False)
self.dump_state()
# If postrun is submitted then this script is done!
if end_after:
terminate(0)
def step_gcmc(self):
"""Check the GCMC step of the calculation."""
end_after = False
jobids = {}
postrun_ids = []
if self.options.getbool('no_gcmc'):
info("Skipping GCMC simulation")
return
elif not self.state['gcmc'] or 'gcmc' in self.options.args:
# The dictionary is empty before any runs
info("Starting gcmc step")
jobids = self.run_fastmc()
sys_argv_strip('gcmc')
self.dump_state()
for tp_point, jobid in jobids.items():
if jobid is True:
self.state['gcmc'][tp_point] = (FINISHED, False)
elif jobid is False:
self.state['gcmc'][tp_point] = (SKIPPED, False)
else:
info("FastMC job in queue. Jobid: %s" % jobid)
self.state['gcmc'][tp_point] = (RUNNING, jobid)
postrun_ids.append(jobid)
# unfinished GCMCs
end_after = True
else:
# when the loop completes write out the state
self.dump_state()
for tp_point in self.state['gcmc']:
tp_state = self.state['gcmc'][tp_point]
if tp_state[0] == RUNNING:
new_state = self.job_handler.jobcheck(tp_state[1])
if not new_state:
info("Queue reports GCMC %s finished" % (tp_point,))
# need to know we have finished to update below
tp_state = (FINISHED, False)
self.state['gcmc'][tp_point] = tp_state
self.dump_state()
else:
info("GCMC %s still running" % (tp_point,))
# unfinished GCMC so exit later
end_after = True
# any states that need to be updated should have been done by now
if tp_state[0] == FINISHED:
startdir = os.getcwd()
# wooki seems slow to copy output files back
# so we give them a few chances to appear
max_attempts = 6
for attempt_count in range(max_attempts):
time.sleep(attempt_count)
try:
self.structure.update_gcmc(tp_point, self.options)
self.state['gcmc'][tp_point] = (UPDATED, False)
self.dump_state()
break
except IOError:
os.chdir(startdir)
else:
error('OUTPUT file never appeared')
if postrun_ids:
self.postrun(postrun_ids)
if end_after:
info("GCMC run has not finished completely")
terminate(0)
def step_properties(self):
"""Run the properties calculations if required."""
if self.state['properties'][0] not in [UPDATED, SKIPPED]:
if self.options.getbool('no_properties'):
info("Skipping all properties calculations")
self.state['properties'] = (SKIPPED, False)
if self.state['properties'][0] == NOT_RUN or 'properties' in self.options.args:
self.calculate_properties()
self.state['properties'] = (UPDATED, False)
self.dump_state()
def step_absl(self):
"""Check the binding site step of the calculation."""
end_after = False
jobids = {}
postrun_ids = []
# check for old simulations with no absl state
# TODO(tdaff): remove eventually
if 'absl' not in self.state:
self.state['absl'] = {}
if self.options.getbool('no_absl'):
info("Skipping ABSL calculation")
return
elif self.options.getbool('no_gcmc'):
info("no_gcmc requested, can't do ABSL, skipping")
return
elif not self.options.getbool('mc_probability_plot'):
info("No probability plot; Skipping ABSL calculation")
return
elif not self.options.getbool('find_maxima'):
info("No TABASCO maxima; Skipping ABSL calculation")
return
elif not self.state['absl'] or 'absl' in self.options.args:
# The dictionary is empty before any runs
info("Starting absl step")
jobids = self.run_absl()
sys_argv_strip('absl')
self.dump_state()
for tp_point, jobid in jobids.items():
if set(jobid) == set([True]):
self.state['absl'][tp_point] = (FINISHED, False)
elif set(jobid) == set([False]):
self.state['absl'][tp_point] = (SKIPPED, False)
else:
info("ABSL job in queue. Jobid: %s" % jobid)
self.state['absl'][tp_point] = (RUNNING, jobid)
postrun_ids.extend(jobid)
# unfinished ABSL calculations
end_after = True
else:
# when the loop completes write out the state
self.dump_state()
for tp_point in self.state['absl']:
tp_state = self.state['absl'][tp_point]
if tp_state[0] == RUNNING:
new_state = set([self.job_handler.jobcheck(job)
for job in tp_state[1]])
if new_state == set([False]):
info("Queue reports ABSL %s finished" % (tp_point,))
# need to know we have finished to update below
tp_state = (FINISHED, False)
self.state['absl'][tp_point] = tp_state
self.dump_state()
else:
info("ABSL %s still running" % (tp_point,))
# unfinished ABSL so exit later
end_after = True
# any states that need to be updated should have been done by now
if tp_state[0] == FINISHED:
startdir = os.getcwd()
# wooki seems slow to copy output files back
# so we give them a few chances to appear
max_attempts = 6
for attempt_count in range(max_attempts):
time.sleep(attempt_count)
try:
self.structure.update_absl(tp_point, self.options)
self.state['absl'][tp_point] = (UPDATED, False)
self.dump_state()
break
except IOError:
os.chdir(startdir)
else:
#TODO(tdaff): does this matter here?
error('ABSL output never appeared')
if postrun_ids:
self.postrun(postrun_ids)
if end_after:
info("ABSL run has not finished completely")
terminate(0)
def import_old(self):
"""Try and import any data from previous stopped simulation."""
job_name = self.options.get('job_name')
job_dir = self.options.get('job_dir')
try:
self.structure.from_file(
job_name,
self.options.get('initial_structure_format'),
self.options)
warning("Ensure that order_atom_types is on for pre-1.4 data")
if self.options.getbool('order_atom_types'):
info("Forcing atom re-ordering by types")
self.structure.order_by_types()
self.state['init'] = (UPDATED, False)
except IOError:
info("No initial structure found to import")
try:
self.structure.update_pos(self.options.get('ff_opt_code'))
self.state['ff_opt'] = (UPDATED, False)
except IOError:
info("No force field optimised structure found to import")
try:
self.structure.update_pos(self.options.get('dft_code'))
self.state['dft'] = (UPDATED, False)
except IOError:
info("No optimized structure found to import")
try:
self.structure.update_charges(self.options.get('charge_method'),
self.options)
self.state['charges'] = (UPDATED, False)
except IOError:
info("No charges found to import")
# Need to generate supercell here on import so that it is set, and
# is based on the cell from dft, if changed
self.structure.gen_supercell(self.options)
guests = [Guest(x) for x in self.options.gettuple('guests')]
if not same_guests(self.structure.guests, guests):
info("Replacing old guests with %s" % " ".join(guest.ident for
guest in guests))
self.structure.guests = guests
else:
# use existing guests that may have data
debug("Retaining previous guests")
guests = self.structure.guests
temps = self.options.gettuple('mc_temperature', float)
presses = self.options.gettuple('mc_pressure', float)
indivs = self.options.gettuple('mc_state_points', float)
for tp_point in state_points(temps, presses, indivs, len(guests)):
try:
self.structure.update_gcmc(tp_point, self.options)
self.state['gcmc'][tp_point] = (UPDATED, False)
except (IOError, OSError):
info("GCMC point %s not found" % str(tp_point))
# Reset directory at end
os.chdir(job_dir)
def postrun(self, jobid):
"""Determine if we need the job handler to post submit itself."""
# update the job tracker
if jobid is not False and jobid is not True:
if self.options.getbool('run_all'):
debug('Submitting postrun script')
os.chdir(self.options.get('job_dir'))
self.job_handler.postrun(jobid)
return True
else:
debug('Postrun script not submitted')
return False
else:
return False
def run_ff_opt(self):
"""Prepare the system and run the selected force field optimisation."""
ff_opt_code = self.options.get('ff_opt_code')
info("Checking connectivity/types")
if self.structure.check_connectivity():
if self.options.getbool('infer_types_from_bonds'):
self.structure.gen_types_from_bonds()
else:
warning("No types; try with infer_types_from_bonds")
else:
info("Bonds and types, provided")
info("Running a %s calculation" % ff_opt_code)
if ff_opt_code == 'gromacs':
jobid = self.run_optimise_gromacs()
elif ff_opt_code == 'gulp':
jobid = self.run_optimise_gulp()
else:
critical("Unknown force field method: %s" % ff_opt_code)
terminate(91)
if jobid is True:
# job run and finished
self.state['ff_opt'] = (FINISHED, False)
else:
info("Running %s job in queue. Jobid: %s" % (ff_opt_code, jobid))
self.state['ff_opt'] = (RUNNING, jobid)
return jobid
def run_dft(self):
"""Select correct method for running the dft/optim."""
dft_code = self.options.get('dft_code')
info("Running a %s calculation" % dft_code)
if dft_code == 'vasp':
jobid = self.run_vasp()
elif dft_code == 'siesta':
jobid = self.run_siesta()
else:
critical("Unknown dft method: %s" % dft_code)
terminate(92)
# update the job tracker
#if jobid is False:
# self.state['dft'] = (NOT_SUBMITTED, False)
# submission skipped
if jobid is True:
# job run and finished
self.state['dft'] = (FINISHED, False)
else:
info("Running %s job in queue. Jobid: %s" % (dft_code, jobid))
self.state['dft'] = (RUNNING, jobid)
return jobid
def run_charges(self):
"""Select correct charge processing methods."""
chg_method = self.options.get('charge_method')
info("Calculating charges with %s" % chg_method)
if chg_method == 'repeat':
# Get ESP
self.esp_to_cube()
jobid = self.run_repeat()
elif chg_method == 'gulp':
jobid = self.run_qeq_gulp()
elif chg_method == 'egulp':
jobid = self.run_qeq_egulp()
else:
critical("Unknown charge calculation method: %s" % chg_method)
terminate(93)
# update the job tracker
if jobid is True:
# job run and finished
self.state['charges'] = (FINISHED, False)
else:
info("Running %s job in queue. Jobid: %s" % (chg_method, jobid))
self.state['charges'] = (RUNNING, jobid)
return jobid
## Methods for specific codes start here
def run_optimise_gromacs(self):
"""Run GROMACS to do a UFF optimisation."""
job_name = self.options.get('job_name')
optim_code = self.options.get('ff_opt_code')
g_verbose = self.options.getbool('gromacs_verbose')
# Run in a subdirectory
optim_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, optim_code))
mkdirs(optim_dir)
os.chdir(optim_dir)
debug("Running in %s" % optim_dir)
metal_geom = self.options.get('gromacs_metal_geometry')
gro_file, top_file, itp_file = self.structure.to_gromacs(metal_geom)
# We use default names so we don't have to specify
# anything extra on the command line
filetemp = open('conf.gro', 'w')
filetemp.writelines(gro_file)
filetemp.close()
filetemp = open('topol.top', 'w')
filetemp.writelines(top_file)
filetemp.close()
filetemp = open('topol.itp', 'w')
filetemp.writelines(itp_file)
filetemp.close()
filetemp = open('grompp.mdp', 'w')
filetemp.writelines(mk_gromacs_mdp(self.structure.cell, mode='bfgs',
verbose=g_verbose))
filetemp.close()
filetemp = open('pcoupl.mdp', 'w')
filetemp.writelines(mk_gromacs_mdp(self.structure.cell, mode='pcoupl',
verbose=g_verbose))
filetemp.close()
# prepare for simulation!
# Codes we need; comment out the trjconv if being quiet
grompp = self.options.get('grompp_exe')
mdrun = self.options.get('mdrun_exe')
if g_verbose:
trjconv = "echo 0 | %s" % self.options.get('trjconv_exe')
else:
trjconv = "#echo 0 | %s" % self.options.get('trjconv_exe')
# everything runs in a script -- to many steps otherwise
# only make the g96 file at the end so we can tell if it breaks
gromacs_faps = open('gromacs_faps', 'w')
gromacs_faps.writelines([
"#!/bin/bash\n\n",
"export OMP_NUM_THREADS=1\n\n",
"# preprocess first bfgs\n",
"%s -maxwarn 2 &>> g.log\n\n" % grompp,
"# bfgs step\n",
"%s -nt 1 &>> g.log\n\n" % mdrun,
"%s -o traject1.gro -f traj.trr &>> g.log\n" % trjconv,
"# overwrite with pcoupl step\n",
"%s -maxwarn 2 -t traj.trr -f pcoupl.mdp &>> g.log\n\n" % grompp,
"# pcoupl step\n",
"%s -nt 1 &>> g.log\n\n" % mdrun,
"%s -o traject2.gro -f traj.trr &>> g.log\n" % trjconv,
"# overwrite with final bfgs\n",
"%s -maxwarn 2 -t traj.trr &>> g.log\n\n" % grompp,
"# generate final structure\n",
"%s -nt 1 -c confout.g96 &>> g.log\n" % mdrun,
"%s -o traject3.gro -f traj.trr &>> g.log\n" % trjconv])
gromacs_faps.close()
os.chmod('gromacs_faps', 0o755)
# Leave the run to the shell
info("Generated gromcas inputs and run script")
if self.options.getbool('no_submit'):
info("GROMACS input files generated; skipping job submission")
jobid = False
else:
jobid = self.job_handler.submit(optim_code, self.options)
# Tidy up at the end
os.chdir(self.options.get('job_dir'))
return jobid
def run_optimise_gulp(self):
"""Run GULP to do a UFF optimisation."""
job_name = self.options.get('job_name')
optim_code = 'gulp'
terse = self.options.getbool('gulp_terse')
# put an opt in path to distinguish from the charge calculation
optim_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s_opt' % (job_name, optim_code))
mkdirs(optim_dir)
os.chdir(optim_dir)
debug("Running in %s" % optim_dir)
filetemp = open('%s.gin' % job_name, 'w')
filetemp.writelines(self.structure.to_gulp(optimise=True, terse=terse))
filetemp.close()
if 'GULP_LIB' not in os.environ:
warning("gulp library directory not set; optimisation might fail")
if self.options.getbool('no_submit'):
info("GULP input files generated; skipping job submission")
jobid = False
else:
jobid = self.job_handler.submit(optim_code, self.options,
input_file='%s.gin' % job_name)
# Tidy up at the end
os.chdir(self.options.get('job_dir'))
return jobid
def run_vasp(self):
"""Make inputs and run vasp job."""
job_name = self.options.get('job_name')
nproc = self.options.getint('vasp_ncpu')
# Keep things tidy in a subdirectory
dft_code = self.options.get('dft_code')
vasp_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, dft_code))
mkdirs(vasp_dir)
os.chdir(vasp_dir)
debug("Running in %s" % vasp_dir)
info("Running on %i nodes" % nproc)
filetemp = open("POSCAR", "w")
filetemp.writelines(self.structure.to_vasp(self.options))
filetemp.close()
esp_grid = self.esp_grid
#TODO(jlo): self.structure.types gives you each type
# e.g ['C', 'C', 'O'... ]
# self.options.get('...') to get charge or something set a default
# in default.ini
# calcualte nelect
filetemp = open("INCAR", "w")
if self.esp_reduced:
# Let VASP do the grid if we don't need to
filetemp.writelines(mk_incar(self.options, esp_grid=esp_grid))
else:
filetemp.writelines(mk_incar(self.options))
filetemp.close()
filetemp = open("KPOINTS", "w")
filetemp.writelines(mk_kpoints(self.options.gettuple('kpoints', int)))
filetemp.close()
potcar_types = unique(self.structure.types)
filetemp = open("POTCAR", "w")
potcar_dir = self.options.get('potcar_dir')
previous_type = ""
for at_type in self.structure.types:
if at_type == previous_type:
continue
# Try and get the preferred POTCARS
debug("Using %s pseudopotential for %s" %
(VASP_PSEUDO_PREF.get(at_type, at_type), at_type))
potcar_src = path.join(potcar_dir,
VASP_PSEUDO_PREF.get(at_type, at_type),
"POTCAR")
shutil.copyfileobj(open(potcar_src), filetemp)
previous_type = at_type
filetemp.close()
if self.options.getbool('no_submit'):
info("Vasp input files generated; skipping job submission")
# act as if job completed
jobid = False
else:
self.job_handler.env(dft_code, options=self.options)
jobid = self.job_handler.submit(dft_code, self.options)
# Tidy up at the end and pass on job id
os.chdir(self.options.get('job_dir'))
return jobid
def run_siesta(self):
"""Make siesta input and run job."""
job_name = self.options.get('job_name')
nproc = self.options.getint('siesta_ncpu')
# Keep things tidy in a subdirectory
dft_code = self.options.get('dft_code')
siesta_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, dft_code))
mkdirs(siesta_dir)
os.chdir(siesta_dir)
debug("Running in %s" % siesta_dir)
info("Running on %i nodes" % nproc)
filetemp = open('%s.fdf' % job_name, 'w')
filetemp.writelines(self.structure.to_siesta(self.options))
filetemp.close()
psf_types = unique(self.structure.types)
psf_dir = self.options.get('psf_dir')
for at_type in psf_types:
psf_atm = '%s.psf' % at_type
psf_src = path.join(psf_dir, psf_atm)
psf_dest = path.join(siesta_dir, psf_atm)
try:
if not path.exists(psf_atm):
os.symlink(psf_src, psf_dest)
# symlinks not available pre 3.2 on windows
except AttributeError:
shutil.copy(psf_src, siesta_dir)
filetemp.close()
if self.options.getbool('no_submit'):
info("Siesta input files generated; skipping job submission")
jobid = False
else:
# sharcnet does weird things for siesta
self.job_handler.env(dft_code, options=self.options)
jobid = self.job_handler.submit(dft_code, self.options,
input_file='%s.fdf' % job_name)
# Tidy up at the end
os.chdir(self.options.get('job_dir'))
return jobid
def run_qeq_gulp(self, fitting=False):
"""Run GULP to calculate charge equilibration charges."""
job_name = self.options.get('job_name')
qeq_code = 'gulp'
if fitting:
qeq_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s_fit' % (job_name, qeq_code))
else:
qeq_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, qeq_code))
mkdirs(qeq_dir)
os.chdir(qeq_dir)
debug("Running in %s" % qeq_dir)
qeq_dict = parse_qeq_params(self.options.gettuple('qeq_parameters'))
filetemp = open('%s.gin' % job_name, 'w')
filetemp.writelines(self.structure.to_gulp(qeq_fit=fitting, qeq_dict=qeq_dict))
filetemp.close()
if self.options.getbool('no_submit'):
info("GULP input files generated; skipping job submission")
jobid = False
elif fitting:
jobid = self.job_handler.submit(qeq_code, self.options,
input_file='%s.gin' % job_name)
info("Running GULP fitting job in queue. Jobid: %s" % jobid)
self.state['qeq_fit'] = (RUNNING, jobid)
else:
jobid = self.job_handler.submit(qeq_code, self.options,
input_file='%s.gin' % job_name)
# Tidy up at the end
os.chdir(self.options.get('job_dir'))
return jobid
def run_qeq_egulp(self):
"""Run EGULP to calculate charge equilibration charges."""
job_name = self.options.get('job_name')
qeq_code = 'egulp'
qeq_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, qeq_code))
typed_atoms = self.options.getbool('egulp_typed_atoms')
mkdirs(qeq_dir)
os.chdir(qeq_dir)
debug("Running in %s" % qeq_dir)
filetemp = open('%s.geo' % job_name, 'w')
filetemp.writelines(self.structure.to_egulp(typed_atoms))
filetemp.close()
# EGULP defaults to GULP parameters if not specified
egulp_parameters = self.options.gettuple('qeq_parameters')
if 'mepo' in egulp_parameters:
from parameters import mepo_qeq
info("Using MEPO-QEq base parameters")
egulp_parameters = [x for x in egulp_parameters if x != 'mepo']
for element, parameters in mepo_qeq.items():
# Put MEPO parameters at the beginning so they can be
# overridden
plist = [element, parameters[0], parameters[1]]
egulp_parameters = plist + egulp_parameters
if not egulp_parameters:
# parameters are mandatory in new egulp
egulp_parameters = ('H', QEQ_PARAMS['H'][0], QEQ_PARAMS['H'][1])
else:
info("Custom EGULP parameters selected")
filetemp = open('%s.param' % job_name, 'w')
filetemp.writelines(mk_egulp_params(egulp_parameters))
filetemp.close()
filetemp = open('%s.ini' % job_name, 'w')
filetemp.writelines(mk_egulp_ini(self.options))
filetemp.close()
egulp_args = ['%s.geo' % job_name,
'%s.param' % job_name,
'%s.ini' % job_name]
if self.options.getbool('no_submit'):
info("EGULP input files generated; skipping job submission")
jobid = False
else:
jobid = self.job_handler.submit(qeq_code, self.options,
input_args=egulp_args)
# Tidy up at the end
os.chdir(self.options.get('job_dir'))
return jobid
def esp_to_cube(self):
"""Make the cube for repeat input."""
job_name = self.options.get('job_name')
# No case where the esp source will be different from the dft code
esp_src = self.options.get('dft_code')
repeat_dir = path.join(self.options.get('job_dir'),
'faps_%s_repeat' % job_name)
mkdirs(repeat_dir)
src_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, esp_src))
os.chdir(src_dir)
if esp_src == 'vasp':
esp_to_cube_args = shlex.split(self.options.get('vasp_to_cube'))
info("Converting vasp esp to cube, this might take a minute...")
try:
fix_vasp_wrapped_types('LOCPOT')
except IOError:
error("Couldn't find the LOCPOT file; did VASP fail?")
submit = subprocess.Popen(esp_to_cube_args)
submit.wait()
# Cube should have job_name, but can get truncated;
# therefore we try to look for it first
cube_file = glob('*.cube')
if len(cube_file) == 1:
cube_file = cube_file[0]
elif len(cube_file) > 1:
cube_file = cube_file[0]
warning("More or than one .cube found; using %s" % cube_file)
else:
error("No cube files found; check vasp_to_cube output")
# Move it to the repeat directory and give a proper name
move_and_overwrite(cube_file,
path.join(repeat_dir, job_name + '.cube'))
unneeded_files = self.options.gettuple('vasp_delete_files')
remove_files(unneeded_files)
keep_files = self.options.gettuple('vasp_compress_files')
compress_files(keep_files)
elif esp_src == 'siesta':
esp_to_cube_args = shlex.split(self.options.get('siesta_to_cube'))
esp_grid = self.esp_grid
info("Generating ESP grid of %ix%ix%i" % esp_grid)
siesta_to_cube_input = [
"%s\n" % job_name,
"%f %f %f\n" % (0.0, 0.0, 0.0),
"%i %i %i\n" % esp_grid]
info("Converting siesta esp to cube, this might take a minute...")
submit = subprocess.Popen(esp_to_cube_args, stdin=subprocess.PIPE)
submit.communicate(input=''.join(siesta_to_cube_input))
move_and_overwrite(job_name + '.cube', repeat_dir)
unneeded_files = self.options.gettuple('siesta_delete_files')
remove_files(unneeded_files)
keep_files = self.options.gettuple('siesta_compress_files')
compress_files(keep_files)
os.chdir(self.options.get('job_dir'))
def run_repeat(self):
"""Submit the repeat calc to the queue."""
job_name = self.options.get('job_name')
charge_code = self.options.get('charge_method')
repeat_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, charge_code))
mkdirs(repeat_dir)
os.chdir(repeat_dir)
if self.options.getbool('symmetry'):
mk_repeat(cube_name=job_name + '.cube', symmetry=True)
mk_connectivity_ff(self.structure.symmetry_tree)
else:
mk_repeat(cube_name=job_name + '.cube', symmetry=False)
if self.options.getbool('no_submit'):
info("REPEAT input files generated; skipping job submission")
jobid = False
else:
jobid = self.job_handler.submit(charge_code, self.options)
os.chdir(self.options.get('job_dir'))
return jobid
def run_fastmc(self):
"""Submit a fastmc job to the queue."""
job_name = self.options.get('job_name')
mc_code = self.options.get('mc_code')
# Set the guests before generating the files
# Load here as options may change in each run
# and before changing directory, or it will not find guests.lib
guests = [Guest(x) for x in self.options.gettuple('guests')]
if not same_guests(self.structure.guests, guests):
info("Replacing old guests with %s" % " ".join(guest.ident for
guest in guests))
self.structure.guests = guests
else:
# use existing guests that may have data
debug("Retaining previous guests")
guests = self.structure.guests
gcmc_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, mc_code))
mkdirs(gcmc_dir)
os.chdir(gcmc_dir)
config, field = self.structure.to_config_field(self.options, fastmc=True)
filetemp = open("CONFIG", "w")
filetemp.writelines(config)
filetemp.close()
filetemp = open("FIELD", "w")
filetemp.writelines(field)
filetemp.close()
temps = self.options.gettuple('mc_temperature', float)
presses = self.options.gettuple('mc_pressure', float)
indivs = self.options.gettuple('mc_state_points', float)
jobids = {}
for tp_point in state_points(temps, presses, indivs, len(guests)):
temp = tp_point[0]
press = tp_point[1]
info("Running GCMC: T=%.1f " % temp +
" ".join(["P=%.2f" % x for x in press]))
tp_path = format_tp_path(tp_point)
mkdirs(tp_path)
os.chdir(tp_path)
try_symlink(path.join('..', 'CONFIG'), 'CONFIG')
try_symlink(path.join('..', 'FIELD'), 'FIELD')
filetemp = open("CONTROL", "w")
# Calculate fugacities for the input if required
if self.options.get('equation_of_state').lower() == 'peng-robinson':
info("Using Peng-Robinson EOS gas fugacities")
ideal = {}
for guest, pressure in zip(guests, press):
if not hasattr(guest, 'species'):
try:
guest.species = Guest(guest.ident).species
except AttributeError:
error("Unable to use equation of state with guest"
"%s. Failure imminent." % guest.name)
if not hasattr(guest, 'fugacities'):
guest.fugacities = {}
ideal[guest.species] = pressure
# Apply the correction
fugacities = peng_robinson(ideal, temp)
fuga = []
for guest, pressure in zip(guests, press):
info("Fugacity correction for %s: %f bar -> %f bar" %
(guest.ident, pressure, fugacities[guest.species]))
fuga.append(fugacities[guest.species])
guest.fugacities[tp_point] = fugacities[guest.species]
# Expects single guest not in a list
if len(guests) == 1:
fuga = fuga[0]
else:
info("Using ideal gas fugacities")
for guest, pressure in zip(guests, press):
guest.fugacities[tp_point] = pressure
fuga = press
# make control with fugacities
filetemp.writelines(mk_gcmc_control(temp, fuga, self.options,
guests, self.structure.gcmc_supercell))
filetemp.close()
if self.options.getbool('no_submit'):
info("FastMC input files generated; "
"skipping job submission")
jobids[(temp, press)] = False
else:
jobid = self.job_handler.submit(mc_code, self.options)
jobids[(temp, press)] = jobid
os.chdir('..')
os.chdir(self.options.get('job_dir'))
return jobids
def run_absl(self):
"""Submit absl jobs to the queue."""
job_name = self.options.get('job_name')
guests = self.structure.guests
# fun in the gcmc directories
absl_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, 'absl'))
mkdirs(absl_dir)
os.chdir(absl_dir)
temps = self.options.gettuple('mc_temperature', float)
presses = self.options.gettuple('mc_pressure', float)
indivs = self.options.gettuple('mc_state_points', float)
jobids = {}
for tp_point in state_points(temps, presses, indivs, len(guests)):
temp = tp_point[0]
press = tp_point[1]
info("Running ABSL: T=%.1f " % temp +
" ".join(["P=%.2f" % x for x in press]))
tp_path = format_tp_path(tp_point)
mkdirs(tp_path)
os.chdir(tp_path)
# make the dummy;
dummy_guest = self.structure.guests[0]
dummy_include = {dummy_guest.ident: [[[x, 0.0, 0.0] for x in
range(dummy_guest.natoms)]]}
with open("CONFIG", "w") as config:
with open("FIELD", "w") as field:
dlp_files = self.structure.to_config_field(
self.options, include_guests=dummy_include, dummy=True)
config.writelines(dlp_files[0])
field.writelines(dlp_files[1])
with open("CONTROL", "w") as control:
control.writelines(mk_dl_poly_control(self.options, dummy=True))
# Keep track of directories so that we can run jobs at once
individual_directories = ['.']
# calculate binding sites here and submit
for guest in self.structure.guests:
binding_sites = calculate_binding_sites(guest, tp_point,
self.structure.cell)
if hasattr(guest, 'binding_sites'):
guest.binding_sites[tp_point] = binding_sites
else:
guest.binding_sites = {tp_point: binding_sites}
for bs_idx, binding_site in enumerate(binding_sites):
bs_directory = "%s_bs_%04d" % (guest.ident, bs_idx)
mkdirs(bs_directory)
os.chdir(bs_directory)
include_guests = {guest.ident: [guest.aligned_to(*binding_site)]}
dlp_files = self.structure.to_config_field(
self.options, include_guests=include_guests)
with open("CONFIG", "w") as config:
config.writelines(dlp_files[0])
if bs_idx > 0:
# symlink on FIELD to save space
zero_directory = "%s_bs_%04d" % (guest.ident, 0)
try_symlink(path.join('..', zero_directory, 'FIELD'),
'FIELD')
try_symlink(path.join('..', zero_directory, 'CONTROL'),
'CONTROL')
else:
# Always put the FIELD and CONTORL in zero to symlink to
with open("FIELD", "w") as field:
field.writelines(dlp_files[1])
with open("CONTROL", "w") as control:
control.writelines(mk_dl_poly_control(self.options))
individual_directories.append(bs_directory)
os.chdir('..')
# Make the script to run all the jobs now, using the individual
# directories
dl_poly_exe = self.options.get('dl_poly_exe')
# Try and delete REVIVE files while running the DL_POLY jobs
# we need to keep a few for processing, like OUTPUT, REVCON, CONFIG
# STATIS and FIELD and CONTROL will hopefully be symlinks, so we
# can't delete them, but REVIVE is never needed
absl_delete_files = self.options.get('absl_delete_files')
if 'REVIVE' in absl_delete_files or '*_bs_*' in absl_delete_files:
rm_line = 'rm REVIVE\n'
else:
rm_line = ''
absl_script = ["#!/bin/bash\n\n", "export FORT_BUFFERED=true\n\n",
"export OMP_NUM_THREADS=1\n\n"]
for directory in individual_directories:
absl_script.extend(["pushd %s > /dev/null\n" % directory,
"%s\n" % dl_poly_exe,
rm_line,
"popd > /dev/null\n"])
absl_faps = open('absl_faps', 'w')
absl_faps.writelines(absl_script)
absl_faps.close()
os.chmod('absl_faps', 0o755)
# Submit this script
if self.options.getbool('no_submit'):
info("ABSL input files generated; skipping job submission")
jobids[(temp, press)] = [False]
else:
jobid = self.job_handler.submit('absl', self.options)
jobids[(temp, press)] = [jobid]
os.chdir('..')
os.chdir(self.options.get('job_dir'))
return jobids
def calculate_properties(self):
"""Calculate general structural properties."""
job_name = self.options.get('job_name')
job_dir = self.options.get('job_dir')
props_dir = path.join(job_dir, 'faps_%s_properties' % job_name)
mkdirs(props_dir)
os.chdir(props_dir)
# Neighbour list is only used by surface area, uncomment if needed
# for anything else
#self.structure.gen_neighbour_list()
# Since this runs before fastmc, and can run without it, check if the
# guests are initialised here
guests = [Guest(x) for x in self.options.gettuple('guests')]
if not same_guests(self.structure.guests, guests):
info("Replacing old guests with %s" % " ".join(guest.ident for
guest in guests))
self.structure.guests = guests
##
# Surface area calculations
##
surf_probes = self.options.gettuple('surface_area_probe', dtype=float)
for probe in surf_probes:
if self.structure.surface_area(probe) is None:
self.structure.surface_area(probe, value=self.calc_surface_area(probe))
# Neighbour list makes .niss too big; remove them
for atom in self.structure.atoms:
atom.neighbours = None
del atom.neighbours
# Zeoplusplus gives fast access to many properties
if self.options.getbool('zeo++'):
try:
self.calculate_zeo_properties()
except (OSError, IOError):
error("Error running zeo++; skipping")
# PLATON can calculate the PXRD pattern
if self.options.getbool('platon_pxrd'):
try:
self.calculate_pxrd()
except (OSError, IOError):
error("Error running platon; skipping")
os.chdir(job_dir)
def calculate_zeo_properties(self):
"""Run the zeo++ and update properties with no error trapping."""
job_name = self.options.get('job_name')
zeofiles = self.structure.to_zeoplusplus()
filetemp = open("%s.cssr" % job_name, 'w')
filetemp.writelines(zeofiles[0])
filetemp.close()
filetemp = open("%s.rad" % job_name, 'w')
filetemp.writelines(zeofiles[1])
filetemp.close()
filetemp = open("%s.mass" % job_name, 'w')
filetemp.writelines(zeofiles[2])
filetemp.close()
probes = set([1.0]) # Always have a helium probe
for guest in self.structure.guests:
if hasattr(guest, 'probe_radius'):
probes.add(guest.probe_radius)
zeo_exe = shlex.split(self.options.get('zeo++_exe'))
zeo_exe += ['-mass', '%s.mass' % job_name, '-r', '%s.rad' % job_name]
cssr_file = ['%s.cssr' % job_name]
# incuded sphere, free sphere, included sphere along free path
zeo_command = zeo_exe + ['-res'] + cssr_file
info("Running zeo++ pore diameters")
debug("Running command: '" + " ".join(zeo_command) + "'")
zeo_process = subprocess.Popen(zeo_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
zeo_process.wait()
zeo_stderr = " ".join(x.strip() for x in zeo_process.stderr.readlines())
debug(zeo_stderr)
if "Voronoi volume check failed" in zeo_stderr:
warning("Structure is likely bad; zeo++ is unable to complete")
warning(zeo_stderr)
self.structure.bad_structure = True
res_file = open('%s.res' % job_name).read().split()
self.structure.pore_diameter = tuple(float(x) for x in res_file[1:])
atom_samples = '%i' % 2000
volume_samples = '%i' % (20*self.structure.cell.volume)
for probe in probes:
zeo_command = zeo_exe + [
'-chan', '%f' % probe,
'-sa', '%f' % probe, '%f' % probe, atom_samples,
'-vol', '%f' % probe, '%f' % probe, volume_samples] + cssr_file
debug("Running command: '" + " ".join(zeo_command) + "'")
zeo_process = subprocess.Popen(zeo_command, stdout=subprocess.PIPE)
zeo_process.wait()
# channel dimensionality
channels = [int(x) for x in open('%s.chan' % job_name).read().split()[5:]]
self.structure.sub_property('dimensionality', probe, channels)
# surface area
for line in open('%s.sa' % job_name):
if 'A^2' in line:
self.structure.sub_property('zeo_surface_area', probe,
value=float(line.split()[-1]))
# accessible volume
for line in open('%s.vol' % job_name):
if 'A^3' in line:
self.structure.sub_property('void_volume', probe,
value=float(line.split()[-1]))
def calculate_pxrd(self):
"""Run platon PXRD and update properties with no error trapping."""
job_name = self.options.get('job_name')
out_cif = self.structure.to_cif()
filetemp = open("%s.faps.cif" % job_name, 'w')
filetemp.writelines(out_cif)
filetemp.close()
platon_exe = self.options.get('platon_exe')
platon_cmd = [platon_exe, '-Q', '-o', '%s.faps.cif' % job_name]
info("Running PLATON PXRD")
debug("Running command: '" + " ".join(platon_cmd) + "'")
platon_process = subprocess.Popen(platon_cmd, stdout=subprocess.PIPE)
platon_process.wait()
cpi_file = open('%s.faps.cpi' % job_name).readlines()
probe = cpi_file[4].strip() # source metal, e.g. Cu, Mo
try:
xrd = [int(x) for x in cpi_file[10:]]
self.structure.sub_property('pxrd', probe=probe, value=xrd)
except ValueError:
warning("PXRD gave weird result, check structure")
# These are big and useless?
remove_files(['%s.faps.lis' % job_name, '%s.faps.eld' % job_name,
'%s.faps.ps' % job_name])
@property
def esp_grid(self):
"""Estimate the esp grid based on resolution and memory."""
# If repeat is unsing double precision, use 4 for single
repeat_prec = 8
# User defined resolution, try to use this
resolution = self.options.getfloat('esp_resolution')
repeat_ncpu = self.options.getint('repeat_ncpu')
if repeat_ncpu == 1:
vmem = self.options.getfloat('serial_memory')
else:
vmem = self.options.getfloat('threaded_memory')
# Nice even grids might scale better in parallel repeat
esp_grid = tuple([int(4*np.ceil(x/(4*resolution)))
for x in self.structure.cell.params[:3]])
memory_guess = prod(esp_grid)*self.structure.natoms*repeat_prec/1e9
self._esp_reduced = False
if memory_guess > vmem:
warning("ESP at this resolution might need up to %.1f GB of "
"memory but calculation will only request %.1f" %
(memory_guess, vmem))
resolution = resolution/pow(vmem/memory_guess, 1.0/3)
esp_grid = tuple([int(4*np.ceil(x/(4*resolution)))
for x in self.structure.cell.params[:3]])
warning("Reduced grid to %.2f A resolution to fit" % resolution)
self._esp_reduced = True
elif resolution != 0.1:
# VASP defaults to grids of around 0.1, so check if user has
# requested a reduced grid
info("User requested esp resolution %f" % resolution)
self._esp_reduced = True
self._esp_grid = esp_grid
return esp_grid
@property
def esp_reduced(self):
"""Has the esp been reduced to fit the memory requirements?"""
if not hasattr(self, '_esp_reduced'):
# generate the esp and check memory requirements
self.esp_grid
return self._esp_reduced
def calc_surface_area(self, rprobe=0.0):
"""Accessible surface area by uniform or Monte Carlo sampling."""
self.structure.gen_neighbour_list()
xyz = []
resolution = self.options.getfloat('surface_area_resolution')
uniform = self.options.getbool('surface_area_uniform_sample')
info("Calculating surface area: %.3f probe, %s points, %.3f res" %
(rprobe, ("random","uniform")[uniform], resolution))
total_area = 0.0
hydrophilic_area = 0.0
# gromacs default of 0.2 seems very constrained
hydrophilic_threshold = 0.3
cell = self.structure.cell.cell
inv_cell = np.linalg.inv(cell.T)
# Pre-calculate and organise the in-cell atoms
atoms = [(atom.ipos(cell, inv_cell).tolist(),
atom.ifpos(inv_cell),
atom.vdw_radius+rprobe,
atom.neighbours,
atom) for atom in self.structure.atoms]
# sigma is the vdw_radius plus distance to center of probe, which
# gives accessible surface area;
all_samples = []
for a1_pos, a1_fpos, a1_sigma, neighbours, atom in atoms:
surface_area = 4*pi*(a1_sigma**2)
nsamples = int(surface_area/resolution)
if not nsamples in all_samples:
debug("Atom type with %i samples" % nsamples)
all_samples.append(nsamples)
ncount = 0
if uniform:
# uniform spiral sample of surface
z_vals = np.linspace(1, -1, nsamples, endpoint=True)
r_vals = sqrt(1-z_vals**2)
t_vals = np.linspace(0, pi*(3-(5**0.5))*nsamples,
nsamples, endpoint=False)
points = array([r_vals*cos(t_vals),
r_vals*sin(t_vals),
z_vals]).transpose()*a1_sigma + a1_pos
else:
# random MC sampling
phi = 2*np.random.random(nsamples)*pi
costheta = np.random.random(nsamples)*2 - 1
theta = arccos(costheta)
points = array([sin(theta)*cos(phi),
sin(theta)*sin(phi),
cos(theta)]).transpose()*a1_sigma + a1_pos
# All points are brought into the cell
points = [dot(inv_cell, x) for x in points]
fpoints = np.mod(points, 1.0)
points = [list(dot(x, cell)) for x in fpoints]
for point, fpoint in zip(points, fpoints):
# Check for overlap
for a2_dist, a2_idx in neighbours:
a2_pos = atoms[a2_idx][0]
a2_fpos = atoms[a2_idx][1]
a2_sigma = atoms[a2_idx][2]
if a2_dist > a1_sigma + a2_sigma:
# No more atoms within the radius, point valid
ncount += 1
xyz.append((atom.type, point, atom.charge))
break
elif vecdist3(point, a2_pos) < a2_sigma:
# Point collision
break
elif min_dist(point, fpoint, a2_pos, a2_fpos, cell) < a2_sigma:
# Periodic collision
break
else:
# Loop over all atoms finished; point valid
ncount += 1
xyz.append((atom.type, point, atom.charge))
# Fraction of the accessible surface area for sphere to real area
if abs(atom.charge) > hydrophilic_threshold:
hydrophilic_area += (surface_area*ncount)/nsamples
total_area += (surface_area*ncount)/nsamples
if self.options.getbool('surface_area_save'):
job_name = self.options.get('job_name')
xyz_out = open('%s-surf-%.2f.xyz' % (job_name, rprobe), 'w')
xyz_out.write('%i\nResolution: %f Area: %f\n' %
(len(xyz), resolution, total_area))
for ppt in xyz:
xyz_out.write(('%-6s' % ppt[0]) +
('%10.6f %10.6f %10.6f' % tuple(ppt[1])) +
('%10.6f\n' % ppt[2]))
try:
hydrophilic_fraction = hydrophilic_area/total_area
except ZeroDivisionError:
hydrophilic_fraction = 0.0
info("Hydrophilic area (A^2) and fraction (probe: %f): %f, %f" %
(rprobe, hydrophilic_area, hydrophilic_fraction))
return total_area
class Structure(object):
"""
The current state of the structure; update as the calculations proceed.
Structure provides methods to produce input files for and take output from
various computational chemistry packages but needs to be told what to do.
Internal energy units are kcal/mol.
Methods are grouped:
* Initial structure parsers
* Output file parsers to update structure
* Input file generation
* Internal manipulation methods
"""
# TODO: dft energy?
def __init__(self, name):
"""Just instance an empty structure initially."""
self.name = name
self.cell = Cell()
self.atoms = []
self.esp = None
self.dft_energy = 0.0
self.uff_energy = 0.0
self.guests = []
self.properties = {}
self.space_group = None
self.net_charge = None
def from_file(self, basename, filetype, defaults):
"""Select the correct file parser."""
if filetype in ['sql', 'sqlite']:
# Makeshift selection method to select a mof
# jobname is dbname.type.structure_id
from backend.sql import AlchemyBackend
# [db_name, sym or free, identity]
db_params = self.name.split('.')
reader = AlchemyBackend(db_params[0])
cif_string = reader.start_cif(db_params[1], int(db_params[2]))
self.from_cif(string=cif_string)
elif filetype.lower() in ['pdb']:
self.from_pdb(basename + '.' + filetype)
elif filetype.lower() in ['pqr']:
# Look for a pqr or just a pdb wih charges
listdir = os.listdir('.')
if (basename + '.pqr') in listdir:
self.from_pdb(basename + '.pqr', charges=True)
else:
self.from_pdb(basename + '.pdb', charges=True)
elif filetype.lower() in ['vasp', 'poscar', 'contcar']:
listdir = os.listdir('.')
test_files = [
basename + '.contcar', basename + '.CONTCAR', 'CONTCAR',
basename + '.poscar', basename + '.POSCAR', 'POSCAR']
for filename in test_files:
if filename in listdir:
self.from_vasp(filename)
break
elif filetype.lower() in ['cif']:
self.from_cif(basename + '.' + filetype)
elif filetype.lower() in ['xyz']:
cell = defaults.gettuple('default_cell', float)
self.from_xyz(basename + '.' + filetype, cell=cell)
else:
error("Unknown filetype %s" % filetype)
def update_pos(self, opt_code, options=None):
"""Select the method for updating atomic positions."""
opt_path = path.join('faps_%s_%s' % (self.name, opt_code))
info("Updating positions from %s" % opt_code)
if opt_code == 'vasp':
self.from_vasp(path.join(opt_path, 'CONTCAR'), update=True)
elif opt_code == 'siesta':
self.from_siesta(path.join(opt_path, '%s.STRUCT_OUT' % self.name))
elif opt_code == 'gromacs':
self.from_gromacs(path.join(opt_path, 'confout.g96'))
unneeded_files = options.gettuple('gromacs_delete_files')
remove_files(unneeded_files, opt_path)
keep_files = options.gettuple('gromacs_compress_files')
compress_files(keep_files, opt_path)
elif opt_code == 'gulp':
opt_path = "%s_opt" % opt_path
self.optimisation_output = validate_gulp_output(
path.join(opt_path, 'faps-%s.out' % self.name))
self.from_gulp_output(path.join(opt_path, '%s.grs' % self.name))
else:
error("Unknown positions to import %s" % opt_code)
def update_charges(self, charge_method, options=None):
"""Select the method for updating charges."""
charge_path = path.join('faps_%s_%s' % (self.name, charge_method))
if charge_method == 'repeat':
info("Updating charges from repeat")
self.charges_from_repeat(
path.join(charge_path, 'faps-%s.out' % self.name),
options.getbool('symmetry'))
# Cleanup of REPEAT files
unneeded_files = options.gettuple('repeat_delete_files')
remove_files(unneeded_files, charge_path)
keep_files = options.gettuple('repeat_compress_files')
compress_files(keep_files, charge_path)
elif charge_method == 'gulp':
info("Updating charges from GULP QEq")
self.charges_from_gulp(
path.join(charge_path, 'faps-%s.out' % self.name))
elif charge_method == 'egulp':
info("Updating charges from EGULP QEq")
self.charges_from_egulp(path.join(charge_path, 'charges.dat'))
else:
error("Unknown charge method to import %s" % charge_method)
def update_gcmc(self, tp_point, options):
"""Select the source for GCMC results and import."""
gcmc_code = options.get('mc_code')
gcmc_path = path.join('faps_%s_%s' % (self.name, gcmc_code))
# Runs in subdirectories
tp_path = path.join(gcmc_path, format_tp_path(tp_point))
if gcmc_code == 'fastmc':
info("Importing results from FastGCMC")
self.fastmc_postproc(tp_path, tp_point, options)
else:
error("Unknown gcmc method to import %s" % gcmc_code)
def update_absl(self, tp_point, options):
"""Select the source for ABSL results and import."""
absl_path = path.join('faps_%s_%s' % (self.name, 'absl'))
# Runs in subdirectories
tp_path = path.join(absl_path, format_tp_path(tp_point))
info("Importing results from ABSL")
self.absl_postproc(tp_path, tp_point, options)
def from_pdb(self, filename, charges=False):
"""Read an initial structure from a pdb file."""
info("Reading positions from pdb file: %s" % filename)
filetemp = open(filename)
pdb_file = filetemp.readlines()
filetemp.close()
# Build a local list before setting attribute
newatoms = []
for line in pdb_file:
lline = line.lower()
if lline.startswith('cryst1'):
self.cell.from_pdb(line)
self.space_group = line[55:56]
elif lline.startswith('atom') or lline.startswith('hetatm'):
newatom = Atom()
newatom.from_pdb(line, charges=charges)
newatoms.append(newatom)
self.atoms = newatoms
def from_cif(self, filename=None, string=None):
"""Genereate structure from a .cif file."""
if filename is not None:
info("Reading positions from cif file: %s" % filename)
filetemp = open(filename)
cif_file = filetemp.readlines()
filetemp.close()
elif string is not None:
info("Positions from cif string")
cif_file = string.splitlines()
else:
error("No source for cif file")
cif_file = strip_blanks(cif_file)
params = [None, None, None, None, None, None]
atoms = []
cif_bonds = {}
symmetry = []
loops = []
idx = 0
while idx < len(cif_file):
# line text needs to be manageable; cif guidelines can be
# permissive
# Can no longer just check for _underscores in lines
# as UFF types can have them and mess up parsing
line = cif_file[idx].lower().strip()
if '_cell_length_a' in line:
params[0] = ufloat(line.split()[1])
elif '_cell_length_b' in line:
params[1] = ufloat(line.split()[1])
elif '_cell_length_c' in line:
params[2] = ufloat(line.split()[1])
elif '_cell_angle_alpha' in line:
params[3] = ufloat(line.split()[1])
elif '_cell_angle_beta' in line:
params[4] = ufloat(line.split()[1])
elif '_cell_angle_gamma' in line:
params[5] = ufloat(line.split()[1])
elif '_symmetry_space_group_name_h-m' in line:
self.space_group = line.split()[1]
elif '_chemical_properties_physical' in line:
physical = list(shlex.shlex(line, posix=False))[1].strip("'").lower()
if physical.startswith('net charge is'):
self.net_charge = float(physical.split()[3])
elif 'loop_' in line:
# loops for _atom_site, _symmetry and _geom
heads = []
body = []
while line.startswith('_') or 'loop_' in line:
# must keep the loop_ line as this can still contain headers
heads.extend(line.split())
idx += 1
# don't lower these to keep atomic symbols
line = cif_file[idx].strip()
while idx < len(cif_file) and not line.startswith('_') and not 'loop_' in line:
# shlex keeps 'quoted items' as one
# Some cifs seem to have primed atom symbols
# posix=False should help
# using .shlex instead of .split works with '#' comments too
split_line = shlex.shlex(line, posix=False)
split_line.whitespace_split = True
split_line = list(split_line)
body.extend([x.strip("'").strip('"') for x in split_line])
idx += 1
try:
line = cif_file[idx]
except IndexError:
line = ''
if 'loop_' in heads:
heads.remove('loop_')
loops.append((heads, body))
continue
idx += 1
# cell first
if np.all(params):
self.cell.params = params
else:
error("No cell or incomplete cell found in cif file")
# parse loop contents
for heads, body in loops:
if '_atom_site_fract_x' in heads:
while body:
atoms.append(dict(zip(heads, body)))
body = body[len(heads):]
if '_symmetry_equiv_pos_as_xyz' in heads:
while body:
sym_dict = dict(zip(heads, body))
symmetry.append(
Symmetry(sym_dict['_symmetry_equiv_pos_as_xyz']))
body = body[len(heads):]
if '_ccdc_geom_bond_type' in heads:
while body:
bond_dict = dict(zip(heads, body))
# bond is sorted so there are no duplicates
# and tuple so it can be hashed
bond = (bond_dict['_geom_bond_atom_site_label_1'],
bond_dict['_geom_bond_atom_site_label_2'])
bond = tuple(sorted(bond))
# bond distance and type defualts to None if not specified
distance = bond_dict.get('_geom_bond_distance')
if distance is not None:
distance = float(distance)
bond_type = bond_dict.get('_ccdc_geom_bond_type')
cif_bonds[bond] = (distance, bond_type)
body = body[len(heads):]
if not symmetry:
debug('No symmetry found; assuming identity only')
symmetry = [Symmetry('x,y,z')]
newatoms = []
for site_idx, atom in enumerate(atoms):
for sym_op in symmetry:
newatom = Atom(parent=self)
newatom.from_cif(atom, self.cell.cell, sym_op, site_idx)
newatoms.append(newatom)
self.atoms = newatoms
if len(symmetry) > 1:
# can skip if just identity operation as it's slow for big systems
# Some of pete's symmetrised mofs need a higher tolerence
duplicate_tolerance = 0.2 # Angstroms
self.remove_duplicates(duplicate_tolerance)
bonds = {}
# TODO(tdaff): this works for the one tested MOF; 0.1 was not enough
# only check for bonds that are too long, not too short.
bond_tolerence = 0.25
# Assign bonds by index
for bond, bond_data in cif_bonds.items():
for first_index, first_atom in enumerate(self.atoms):
if first_atom.site == bond[0]:
for second_index, second_atom in enumerate(self.atoms):
if second_atom is first_atom:
continue
elif second_atom.site == bond[1]:
# TODO(tdaff): symmetry implementation for cif bonding
distance = min_distance(first_atom, second_atom)
bond_dist = bond_data[0]
if bond_dist is None:
bond_dist = first_atom.covalent_radius + second_atom.covalent_radius
if distance < (bond_dist + bond_tolerence):
# use the sorted index as bonds between the
# same type are doubly specified
bond_id = tuple(sorted((first_index, second_index)))
bonds[bond_id] = CCDC_BOND_ORDERS[bond_data[1]]
if first_atom.is_metal or second_atom.is_metal:
first_atom.is_fixed = True
second_atom.is_fixed = True
self.bonds = bonds
self.symmetry = symmetry
def from_vasp(self, filename='CONTCAR', update=False):
"""Read a structure from a vasp [POS,CONT]CAR file."""
info("Reading positions from vasp file: %s" % filename)
fix_vasp_wrapped_types(filename)
filetemp = open(filename)
contcar = filetemp.readlines()
filetemp.close()
atom_list = []
atom_types = []
scale = float(contcar[1])
self.cell.from_lines(contcar[2:5], scale)
if contcar[5].split()[0].isalpha():
# vasp 5 with atom names
atom_types = contcar[5].split()
del contcar[5]
poscar_counts = [int(x) for x in contcar[5].split()]
natoms = sum(poscar_counts)
if contcar[6].strip()[0].lower() in "s":
# 's'elective dynamics line; we don't care
del contcar[6]
# mcell converts frac -> cart if necessary and scales
if contcar[6].strip()[0].lower() in "ck":
mcell = identity(3) * scale
else:
mcell = self.cell.cell
# parsing positions
if update:
for atom, at_line in zip(self.atoms, contcar[7:7+natoms]):
atom.from_vasp(at_line, cell=mcell)
elif not atom_types:
critical("Will not extract structure from older vasp files")
else:
line_idx = 6
for at_type, at_count in zip(atom_types, poscar_counts):
for _atom_idx in range(at_count):
line_idx += 1
this_atom = Atom()
this_atom.from_vasp(contcar[line_idx], at_type, mcell)
atom_list.append(this_atom)
self.atoms = atom_list
def from_siesta(self, filename):
"""Update the structure from the siesta output."""
info("Updating positions from file: %s" % filename)
filetemp = open(filename)
struct_out = filetemp.readlines()
filetemp.close()
self.cell.from_lines(struct_out[:3])
for atom, line in zip(self.atoms, struct_out[4:]):
atom.from_siesta(line, self.cell.cell)
def from_gromacs(self, filename):
"""Update the structure from a gromacs optimisation G96 format file."""
info("Updating positions from file: %s" % filename)
g96 = open(filename).readlines()
# Atom positions, just regular
for line, atom in zip(g96[4:], self.atoms):
atom.pos = [10.0*float(x) for x in line[25:].split()] # nm to A
del atom.fractional
# New cell too, possibly, check ordering.
box = [10*float(x) for x in g96[-2].split()]
# Only reports three values for cubic cell
if len(box) == 3:
new_cell = [box[0], 0.0, 0.0,
0.0, box[1], 0.0,
0.0, 0.0, box[2]]
else:
new_cell = [box[0], box[3], box[4],
box[5], box[1], box[6],
box[7], box[8], box[2]]
self.cell.cell = new_cell
# looking for energy too
md_log = open(path.join(path.dirname(filename), 'md.log'))
for line in md_log:
if line.startswith('Potential Energy'):
self.uff_energy = float(line.split()[-1])
info("UFF energy: %f kJ/mol" % self.uff_energy)
elif line.startswith('Maximum force'):
maximum_force = float(line.split()[3])
info("Maximum Force: %f kJ/mol/nm" % maximum_force)
if maximum_force > 10:
error("Calculation is not converged!! Check output!")
# Make sure everything is good from here
if self.check_close_contacts(covalent=1.0):
warning("Structure may have atom overlap, check gromacs output!")
self.bad_structure = True
if self.bond_length_check():
warning("Structure may have strained bonds, check gromacs output!")
self.bad_structure = True
def from_gulp_output(self, filename):
"""Update the structure from the gulp optimisation output."""
info("Updating positions from file: %s" % filename)
grs_out = open(filename)
for line in grs_out:
if line.strip() == 'cell':
params = tuple(float(x) for x in grs_out.next().split()[:6])
self.cell.params = params
elif line.strip() == 'fractional':
cell = self.cell.cell
for atom, atom_line in zip(self.atoms, grs_out):
atom.pos = dot([gfloat(x) for x in atom_line.split()[2:5]], cell)
# FIXME(tdaff) fractionals need to change automatically
# when the position gets updated (or the cell!)
del atom.fractional
# Make sure everything is good from here
if self.check_close_contacts(covalent=1.0):
warning("Structure might have atom overlap, check gulp output!")
self.bad_structure = True
if self.bond_length_check():
warning("Structure might have strained bonds, check gulp output!")
self.bad_structure = True
def from_xyz(self, filename, update=False, cell=None):
"""Read a structure from an file."""
info("Reading positions from xyz file: %s" % filename)
filetemp = open(filename)
xyz_file = filetemp.readlines()
filetemp.close()
# Setting the cell
if len(cell) == 6:
self.cell.params = cell
elif len(cell) == 9:
self.cell.cell = array(cell).reshape((3, 3))
elif cell is not None:
error("Invalid cell specification %s" % str(cell))
# Build a local list before setting attribute
newatoms = []
natoms = int(xyz_file[0])
self.properties['header'] = xyz_file[1].strip()
for line in xyz_file[2:2+natoms]:
newatom = Atom()
newatom.from_xyz(line)
newatoms.append(newatom)
if update:
if natoms != self.natoms:
critical("Incorrect number of atoms to update")
terminate(96)
for atom, newatom in zip(self.atoms, newatoms):
if atom.type != newatom.type:
error("Atom order may have changed")
atom.pos = newatom.pos
else:
self.atoms = newatoms
def charges_from_repeat(self, filename, symmetry=False):
"""Parse charges and update structure."""
info("Getting charges from file: %s" % filename)
charges = []
filetemp = open(filename)
for line in filetemp:
# Stripping line means it can read Fortran or C++ REPEAT output
if line.strip().startswith("Charge"):
line = line.split()
# index, type, charge
charges.append((int(line[1]), int(line[4]), float(line[6])))
if "Error" in line:
if float(line.split()[-1]) > 0.6:
warning("Error in repeat charges is very high - check cube!")
filetemp.close()
if symmetry:
tree = self.symmetry_tree
if len(charges) != len(tree):
error("Incorrect number of charge sets; check REPEAT output")
terminate(97)
for symm, charge in zip(sorted(tree.items()), charges):
for at_idx in symm[1]:
self.atoms[at_idx].charge = charge[2]
else:
if len(charges) != len(self.atoms):
error("Incorrect number of charges; check REPEAT output")
terminate(90)
for atom, charge in zip(self.atoms, charges):
atom.charge = charge[2]
def charges_from_gulp(self, filename):
"""Parse QEq charges from GULP output."""
info("Getting charges from file: %s" % filename)
filetemp = open(filename)
gout = filetemp.readlines()
filetemp.close()
start_line = None
failures = 0
for line_num, line in enumerate(gout):
if ' Final charges from QEq :' in line:
# Take the last set of charges
start_line = line_num + 7
elif 'Failed to converge' in line:
# This will occur twice in the output for complete failure
failures += 1
if start_line is None:
error("Final charges not found in gulp output")
terminate(184)
elif failures > 1:
warning("Gulp charges may not be converged")
for atom, chg_line in zip(self.atoms, gout[start_line:]):
atom.charge = float(chg_line.split()[2])
def charges_from_egulp(self, filename):
"""Parse QEq charges from EGULP output."""
info("Getting charges from file: %s" % filename)
filetemp = open(filename)
gout = filetemp.readlines()
filetemp.close()
# charges.dat file only has list of charges in it
for atom, chg_line in zip(self.atoms, gout):
atom.charge = float(chg_line.split()[2])
if atom.charge != atom.charge:
# These are 'nan'; should not continue or fastmc will mess up
error("Egulp charges did not converge, check structure")
terminate(107)
elif abs(atom.charge) == INFINITY:
error("Egulp gave infinite charges, check structure")
terminate(108)
elif abs(atom.charge) > 10:
warning("Very high charge from egulp: %s %f" %
(atom.site, atom.charge))
def to_vasp(self, options):
"""Return a vasp5 poscar as a list of lines."""
optim_h = options.getbool('optim_h')
optim_all = options.getbool('optim_all')
poscar = ["%s\n" % self.name[:80],
" 1.0\n"]
# Vasp does 16 dp but we get rounding errors (eg cubic) use 14
poscar.extend(self.cell.to_vector_strings(fmt="%23.14f"))
# Bunch up types as much as possible.
types, counts = count_ordered_types(self.atoms)
# Non consecutive similar types are not the same species anymore
sspecies = "".join(" %s" % x for x in types) + "\n"
scounts = "".join(" %i" % x for x in counts) + "\n"
if len(sspecies) > 254 or len(scounts) > 254:
warning("Faps has detected that you have too many atoms for vasp"
"to use in an orderd list and has attempted to re-order"
"them. This might break your calculation and it might be"
"better to run with oreder_atom_types turned on")
self.order_by_types()
types, counts = count_ordered_types(self.atoms)
# Regenerate these strings for new ordering
sspecies = "".join(" %s" % x for x in types) + "\n"
scounts = "".join(" %i" % x for x in counts) + "\n"
poscar.append(sspecies)
poscar.append(scounts)
# We always have the T or F so turn on selective dynamics for
# fixed pos variable cell
poscar.extend(["Selective dynamics\n", "Cartesian\n"])
if optim_all:
info("Optimizing all atom positions")
fix_h = 'T'
fix_all = 'T'
elif optim_h:
info("Optimizing hydrogen positions")
fix_h = 'T'
fix_all = 'F'
else:
info("All atom positions fixed")
fix_h = 'F'
fix_all = 'F'
# assume atoms are ordered
# Atom lines differ from vasp to ensure spaces between numbers
# with <-10 values
for atom in self.atoms:
if atom.type == "H":
poscar.append("%20.15f %19.15f %19.15f" % tuple(atom.pos) +
"%4s%4s%4s\n" % (fix_h, fix_h, fix_h))
else:
poscar.append("%20.15f %19.15f %19.15f" % tuple(atom.pos) +
"%4s%4s%4s\n" % (fix_all, fix_all, fix_all))
return poscar
def to_siesta(self, options):
"""Return a siesta input file as a list of lines."""
job_name = options.get('job_name')
siesta_accuracy = options.get("siesta_accuracy").lower()
if siesta_accuracy in ['high']:
info("Using 'high' accuracy siesta settings")
basis = ('DZP', 100, 200)
elif siesta_accuracy in ['low']:
info("Using 'low' accuracy siesta settings")
basis = ('SZ', 200, 100)
else:
info("Using default siesta accuracy settings")
basis = ('DZ', 150, 150)
u_atoms = unique(self.atoms, key=lambda x: x.type)
u_types = unique(self.types)
fdf = ([
"SystemName %s\n" % job_name,
"SystemLabel %s\n" % job_name,
"\n",
"NumberOfAtoms %i\n" % len(self.atoms),
"NumberOfSpecies %i\n" % len(u_atoms),
"\n",
"SaveElectrostaticPotential .true.\n",
"WriteMullikenPop 1\n"
"WriteXML F\n",
"\n",
"# Accuracy bits\n",
"\n",
"PAO.BasisSize %s\n" % basis[0],
"PAO.EnergyShift %i meV\n" % basis[1],
"MeshCutoff %i Ry\n" % basis[2],
"\n",
"MaxSCFIterations 100\n",
"XC.Functional GGA\n",
"XC.Authors PBE\n",
"SolutionMethod diagon\n",
"ElectronicTemperature 25 K\n",
"DM.NumberPulay 5\n",
"DM.MixingWeight 0.05\n",
"\n",
"%block ChemicalSpeciesLabel\n"] + [
"%6i %6i %6s\n" % ((idx + 1), atom.atomic_number, atom.type)
for idx, atom in enumerate(u_atoms)] + [
"%endblock ChemicalSpeciesLabel\n",
"\n",
"LatticeConstant 1 Ang\n"
"%block LatticeVectors\n"] +
self.cell.to_vector_strings() + [
"%endblock LatticeVectors\n",
"\n",
"AtomicCoordinatesFormat Ang\n",
"\n",
"%block AtomicCoordinatesAndAtomicSpecies\n"] + [
"%12.8f %12.8f %12.8f" % tuple(atom.pos) + "%6i\n" %
(u_types.index(atom.type) + 1) for atom in self.atoms] + [
"%endblock AtomicCoordinatesAndAtomicSpecies\n"])
if "Br" in u_types:
fdf.extend(["\n%block PS.lmax\n",
" Br 2\n",
"%endblock PS.lmax\n"])
if "In" in u_types:
# semicore states need to be accounted for
# TODO(tdaff): selective polarisation on the basis functions
fdf.extend(["\n%block PAO.Basis\n",
"In 3\n",
" n=5 0 2\n",
" 0.0 0.0\n",
" n=5 1 2 P\n",
" 0.0 0.0\n",
" n=4 2 2\n",
" 0.0 0.0\n",
"%endblock PAO.Basis\n"])
optim_h = options.getbool('optim_h')
optim_all = options.getbool('optim_all')
optim_cell = options.getbool('optim_cell')
constraint = []
if optim_all:
info("Optimizing all atom positions")
fdf.append("\nMD.TypeOfRun CG\n")
fdf.append("MD.NumCGSteps %i\n" % 800)
elif optim_h and "H" in self.types:
info("Optimizing hydrogen positions")
fdf.append("\nMD.TypeOfRun CG\n")
fdf.append("MD.NumCGSteps %i\n" % 300)
constraint = ["%i" % (idx+1)
for idx, species in enumerate(self.types)
if species not in ["H"]]
elif optim_cell:
fdf.append("\nMD.TypeOfRun CG\n")
fdf.append("MD.NumCGSteps %i\n" % 300)
constraint = ["%i" % (idx+1) for idx in range(self.natoms)]
if optim_cell:
info("Cell vectors will be optimized")
fdf.append("MD.VariableCell .true.\n")
if constraint:
constraint = textwrap.fill(" ".join(constraint),
initial_indent='position ',
subsequent_indent='position ')
fdf.extend(["\n%block GeometryConstraints\n",
constraint, "\n",
"%endblock GeometryConstraints\n"])
return fdf
def to_gulp(self, qeq_fit=False, optimise=False, terse=False, qeq_dict={}):
"""Return a GULP file to use for the QEq charges."""
if qeq_fit:
keywords = "fitting bulk_noopt qeq\n"
elif optimise:
return self.to_gulp_optimise(terse=terse)
else:
keywords = "single conp qeq\n"
gin_file = [
"# \n# Keywords:\n# \n",
keywords,
"# \n# Options:\n# \n",
"# Initial file written by faps\n"
"name %s\n" % self.name,
"vectors\n"] + self.cell.to_vector_strings() + [
"cartesian\n"]
if qeq_fit:
for atom in self.atoms:
gin_file.extend(["%-5s core " % atom.type,
"%14.7f %14.7f %14.7f " % tuple(atom.pos),
"%14.7f\n" % atom.charge])
gin_file.append("\n# \n# Fitting variables:\n# \nobservables\n")
for idx, atom in enumerate(self.atoms):
gin_file.append("monopoleq\n%5s%11.6f\n" % (idx+1, atom.charge))
gin_file.append("end\nqelectronegativity\n")
for at_type in unique(self.types):
gin_file.extend(
["%-5s" % at_type,
"%9.5f %9.5f %9.5f 1 1 0\n" % QEQ_PARAMS[at_type]])
else:
for atom in self.atoms:
gin_file.extend(["%-5s core " % atom.type,
"%14.7f %14.7f %14.7f\n" % tuple(atom.pos)])
if qeq_dict:
unique_types = unique(self.types)
gin_file.append('\nqelectronegativity\n')
for atom_type, params in qeq_dict.items():
if atom_type in unique_types:
gin_file.append('%-4s %f %f\n' % (atom_type, params[0], params[1]))
gin_file.append("\ndump every %s.grs\nprint 1\n" % self.name)
return gin_file
def to_gulp_optimise(self, terse=False):
"""Return a GULP file to optimise with UFF."""
# all atoms of the same forcefield type must have the same label
# gulp fails if atoms of the same type have different labels!
# this only crops up as an error in the 4.0 versions
# 'decimal_only' stops fractions that cannot be parsed when reading
# updated positions
if terse:
# Don't output the bonds
keywords = "opti noautobond decimal_only conj\n"
else:
keywords = "opti noautobond bond decimal_only conj\n"
gin_file = [
"# \n# Keywords:\n# \n",
keywords,
"# \n# Options:\n# \n",
"# UFF optimisation by gulp\n"
"name %s\n" % self.name,
# using an rfo minimiser with a preconditioned hessian from the
# BFGS minimiser seems to be the most efficient
"switch rfo gnorm 0.3\n",
"vectors\n"] + self.cell.to_vector_strings() + [
" 1 1 1\n 1 1 1\n 1 1 1\n", # constant pressure relaxation
"cartesian\n"]
all_ff_types = {}
for at_idx, atom in enumerate(self.atoms):
ff_type = atom.uff_type
if not ff_type in all_ff_types:
all_ff_types[ff_type] = atom.site
# Sanity check in case types are not given in the input
if ff_type not in UFF_FULL:
error("Atom %s has unknown type %s" % (atom, ff_type))
if atom.is_fixed:
fixed_flags = "0 0 0"
else:
fixed_flags = "1 1 1"
gin_file.extend(["%-5s core " % all_ff_types[ff_type],
"%14.7f %14.7f %14.7f " % tuple(atom.pos),
"%f " % atom.charge,
"%f " % 1.0, # occupancy
fixed_flags, "\n"])
#identify all the individual uff species for the library
gin_file.append("\nspecies\n")
for ff_type, species in all_ff_types.items():
gin_file.append("%-6s core %-6s\n" % (species, ff_type))
gin_file.append("\n")
for bond in sorted(self.bonds):
bond_type = GULP_BOND_ORDERS[self.bonds[bond]]
gin_file.append("connect %6i %6i %s\n" % (bond[0] + 1, bond[1] + 1, bond_type))
gin_file.append("\nlibrary uff\n")
gin_file.append("\nstepmx opt 0.05\n")
# Restart file is for final structure
gin_file.append("\ndump every %s.grs\n" % self.name)
# optimization movie useful for debugging mostly
if terse:
# These tell gulp to be quiet, but we also stop the massive arc
# file being generated
gin_file.append("\nterse inout structure\n"
"terse inout potentials\n"
"terse inout derivatives\n"
"#output movie arc %s\n" % self.name)
else:
gin_file.append("\noutput movie arc %s\n" % self.name)
return gin_file
def to_egulp(self, typed_atoms=False):
"""Generate input files for Eugene's QEq code."""
# bind cell locally for speed and convenience
cell = self.cell.cell
inv_cell = self.cell.inverse
# format exactly as Eugene original script generates it
geometry_file = ['%s\n' % self.name]
geometry_file.extend(self.cell.to_vector_strings(fmt=' %15.12f'))
geometry_file.append('%i\n' % self.natoms)
atomic_numbers = self.atomic_numbers
if typed_atoms:
# Include custom typing, new types must be found by hand.
# 800 N=O -- removed
# 801 S=O
# 802 S-O-H
for atom_idx, atom in enumerate(self.atoms):
if atom.uff_type == 'S_3+6':
for bond in self.bonds:
if atom_idx in bond:
other_idx = other_bond_index(bond, atom_idx)
if self.atoms[other_idx].uff_type == 'O_2':
atomic_numbers[other_idx] = 801
elif self.atoms[other_idx].uff_type == 'O_3':
atomic_numbers[other_idx] = 802
for another_bond in self.bonds:
if other_idx in another_bond:
another_idx = other_bond_index(another_bond, other_idx)
if self.atoms[another_idx].uff_type == 'H_':
atomic_numbers[another_idx] = 1001
# TODO(tdaff): delete code in future version; removed NO2 typing
# elif atom.uff_type == 'N_R':
# this_bonds = []
# for bond in self.bonds:
# if atom_idx in bond:
# other_idx = other_bond_index(bond, atom_idx)
# if self.atoms[other_idx].uff_type == 'O_R':
# atomic_numbers[other_idx] = 800
# atomic numbers should have been modified with exotic types by now
geometry_file.extend([
('%6d ' % atomic_number) +
('%12.7f %12.7f %12.7f' % tuple(atom.ipos(cell, inv_cell))) +
('%12.7f\n' % atom.charge)
for atom, atomic_number in zip(self.atoms, atomic_numbers)
])
return geometry_file
def to_config_field(self, options, fastmc=False, include_guests=None,
dummy=False):
"""
Return CONFIG and FIELD files in DL_POLY style
Parameters
----------
fastmc : boolean
When set to True, the FIELD file will contain the positions for MC
guests and will give incorrect results with DL_POLY.
include_guests : dict or None
A dictionary with guests to be included in the config file with
the framework. The guest type is the key and their positions are
nested lists.
dummy : boolean
If dummy is set, the interaction parameters and charges for the
guest are set to zero
Returns
-------
config : list
The config file as a list of newline terminated strings
field : list
The field file as a list of newline terminated strings
"""
# CONFIG
self.gen_supercell(options)
supercell = self.gcmc_supercell
levcfg = 0 # always
imcon = self.cell.imcon
natoms = len(self.atoms) * prod(supercell)
# do included guests first so natoms can be corrected
offset = 1
included_guests_part = []
# count if any guests are included
# use get( ... , 0) to get zero for not included
guest_nummols = {}
if include_guests is not None:
# having only one atom in DL_POLY errors with
# complaints about too few degrees of freedom
# if there is only one atom included, add a ghost
guest_items = include_guests.items()
dof_fix = (len(guest_items) == 1 # one guest type
and len(guest_items[0][1]) == 1 # one guest of type
and len(guest_items[0][1][0]) == 1 # one atom
and not fastmc)
debug("dot_fix %s" % dof_fix)
for guest in self.guests:
if guest.ident in include_guests:
guest_nummols[guest.ident] = len(include_guests[guest.ident])
for positions in include_guests[guest.ident]:
for atom, position in zip(guest.atoms, positions):
included_guests_part.extend(
["%-6s%10i\n" % (atom.type, offset),
"%20.12f%20.12f%20.12f\n" % tuple(position)])
if dof_fix:
# Add a phantom atom; hopefully Non type
# is not used elsewhere
offset += 1
included_guests_part.extend(
["%-6s%10i\n" % ("Non", offset),
"%20.12f%20.12f%20.12f\n" % tuple(position)])
# increment offset as it is used for the framework
offset += 1
# make natoms correct
natoms += offset - 1
else:
# don't need to apply degrees of freedon fix
dof_fix = False
config = ["%s\n" % self.name[:80],
"%10i%10i%10i\n" % (levcfg, imcon, natoms)]
config.extend(self.cell.to_vector_strings(scale=supercell))
# included guests go first to match field
config.extend(included_guests_part)
# put them in
for idx, atom in enumerate(self.supercell(supercell)):
# idx+offset for 1 based indexes in CONFIG
config.extend(["%-6s%10i\n" % (atom.type, idx + offset),
"%20.12f%20.12f%20.12f\n" % tuple(atom.pos)])
# FIELD
ntypes = len(self.guests) + 1
field = ["%s\n" % self.name[:80],
"UNITS kcal\n",
"molecular types %i\n" % ntypes]
# Guests
for guest in self.guests:
natoms = guest.natoms
if dof_fix and guest.ident in include_guests:
natoms += 1
field.extend(["&guest %s: %s\n" % (guest.name, guest.source),
"NUMMOLS %i\n" % guest_nummols.get(guest.ident, 0),
"ATOMS %i\n" % natoms])
for atom in guest.atoms:
# Can't just turn off electrostatics as we need to compare to
# the empty framework so zero guest charges
if dummy:
charge = 0
else:
charge = atom.charge
field.append("%-6s %12.6f %12.6f" %
tuple([atom.type, atom.mass, charge]))
if fastmc:
field.append("%12.6f %12.6f %12.6f\n" % tuple(atom.pos))
else:
# atom positions confuse dl_poly which takes nrept or ifrz
field.append(" 1 0\n")
if dof_fix and guest.ident in include_guests:
field.append("%-6s %12.6f %12.6f 1 0\n" %
tuple(["Non", 0.0, 0.0]))
field.append("rigid 1\n")
field.append("%i " % guest.natoms)
field.append(" ".join("%i" % (x + 1) for x in range(guest.natoms)))
field.append("\nfinish\n")
# Framework
field.extend(["Framework\n",
"NUMMOLS %i\n" % prod(supercell),
"ATOMS %i\n" % len(self.atoms)])
if options.getbool('mc_zero_charges'):
for atom in self.atoms:
field.append("%-6s %12.6f %20.14f %6i %6i\n" %
(atom.type, atom.mass, 0.0, 1, 1))
else:
for atom in self.atoms:
field.append("%-6s %12.6f %20.14f %6i %6i\n" %
(atom.type, atom.mass, atom.charge, 1, 1))
field.append("finish\n")
# VDW potentials
atom_set = [atom.type for atom in self.atoms]
for guest in self.guests:
atom_set.extend(atom.type for atom in guest.atoms)
atom_set = unique(atom_set)
field.append("VDW %i\n" % ((len(atom_set) * (len(atom_set) + 1)) / 2))
# modify local ff to deal with guests
force_field = copy(UFF)
for guest in self.guests:
force_field.update(guest.potentials)
# zero everything if we just want framework
if dummy:
force_field = dict((element, (0.0, 0.0)) for element in force_field)
for idxl in range(len(atom_set)):
for idxr in range(idxl, len(atom_set)):
left = atom_set[idxl]
right = atom_set[idxr]
try:
sigma, epsilon = lorentz_berthelot(force_field[left],
force_field[right])
except KeyError:
# catch this if not in the UFF -> zero
warning("No potential defined for %s %s; defaulting to 0" %
(left, right))
sigma, epsilon = 0.0, 0.0
field.append("%-6s %-6s lj %f %f\n" %
(left, right, epsilon, sigma))
# EOF
field.append("close\n")
return config, field
def to_gromacs(self, metal_geometry='input'):
"""Generate GROMACS structure and topology.
metal_geometry will affect topology terms with metal atoms. 'input'
will use the structure to generate the topology, and 'fix' will also
apply a stiffer potential.
Return gro, top and itp files as lists of lines.
"""
# Use this to multiply the potential terms and make them rigid
metal_stiffness_factor = 1
if metal_geometry.lower().startswith('fix'):
keep_metal_geometry = True
metal_stiffness_factor = 10
info("Metals fixed at input geometries")
elif metal_geometry.lower().startswith('in'):
keep_metal_geometry = True
info("Using input geometry for metals")
else:
keep_metal_geometry = False
info("Using UFF parameters for metals")
##
# .gro file
##
# TITLE line
# number of atoms
gro_file = ["%s\n" % self.name, " %i\n" % self.natoms]
# Don't use the MOF name so that we can refer to this easily later
residue = (1, "RESI")
for idx, atom in enumerate(self.atoms):
# In the specification the atom position is given as
# %8.3f in nm, but we probably would like more accuracy
# but make sure there are 5 figures to left of decimal point
pos_nm = tuple([x/10.0 for x in atom.pos])
gro_file.append(("%5i%5s" % residue) +
("%5s%5i" % (atom.uff_type, idx + 1)) +
("%15.10f%15.10f%15.10f\n" % pos_nm))
# cell is also in nm
# v1(x) v2(y) v3(z) v1(y) v1(z) v2(x) v2(z) v3(x) v3(y)
box = self.cell.cell/10.0
# gromacs needs these conditions. Might need to rotate cell sometimes?
if not (box[0][1] == box[0][2] == box[1][2] == 0):
error("Gromacs can't handle this cell orientation")
gro_file.append("%f %f %f %f %f %f %f %f %f\n" % (
box[0][0], box[1][1], box[2][2],
box[0][1], box[0][2],
box[1][0], box[1][2],
box[2][0], box[2][1]))
##
# .top file
##
comb_rule = 2 # 1 = LORENTZ_BERTHELOT; 2 = GEOMETRIC
# geometric is recommended in UFF
# if you use Lorentz Berthelot then sigma and epsilon become C6 and C12
top_file = [
"; Topology file for %s\n" % self.name,
"[ defaults ]\n",
"; nbfunc comb-rule gen-pairs fudgeLJ fudgeQQ\n",
"1 %i yes 1.0 1.0\n\n"
% comb_rule]
# define the forcefield for UFF
unique_types = set()
top_file.append("[ atomtypes ]\n")
top_file.append("; name1 name2 mass charge "
" ptype sigma epsilon\n")
for atom in self.atoms:
if atom.uff_type not in unique_types:
uff_type = atom.uff_type
# sigma = x1 * 2^(-1/6)
# CONVERT TO nm !!
sigma = 0.1*UFF_FULL[uff_type][2]*(2**(-1.0/6.0))
# epsilon = D1 in kcal
epsilon = UFF_FULL[uff_type][3] * KCAL_TO_KJ
top_file.append("%-6s %-6s %9.4f %9.4f A %12.7f "
"%12.7f\n" % (uff_type, uff_type, atom.mass,
0.0, sigma, epsilon))
unique_types.add(uff_type)
# for included file, use default name scheme "topol..."
top_file.extend(["\n#include <topol.itp>\n\n",
"[ system ]\n",
"UFF optimisation of %s\n\n" % self.name,
"[ molecules ]\n",
"%s 1\n" % residue[1]])
##
# .itp file
##
# exclude 3 neighbours
itp_file = ["[ moleculetype ]\n",
"; molname nrexcl\n"
"%s 3\n" % residue[1],
"[ atoms ]\n",
"; nr type resnr residue atom "
"cgnr charge mass \n"]
##
# atoms
##
for idx, atom in enumerate(self.atoms):
uff_type = atom.uff_type
# charge group is different for each atom
# as gromacs has max of 32 in a group
itp_file.append("%-6i %-6s %i %-6s %-6s %d %9.4f %9.4f\n" %
(idx+1, uff_type, residue[0], residue[1], uff_type,
idx+1, atom.charge, atom.mass))
##
# bonds
##
itp_file.append("\n[ bonds ]\n")
itp_file.append("; ai aj funct b0 kb\n")
unique_bonds = {}
bonding_table = dict([(x, {}) for x in range(self.natoms)])
#FIXME(tdaff) bonds will have length in v2
for bond, bondorder in self.bonds.items():
idx_a = bond[0]
idx_b = bond[1]
atom_a = self.atoms[idx_a]
atom_b = self.atoms[idx_b]
uff_a = atom_a.uff_type
uff_b = atom_b.uff_type
typed_bond = tuple(sorted([uff_a, uff_b]) + [bondorder])
bonding_table[idx_a][idx_b] = typed_bond
bonding_table[idx_b][idx_a] = typed_bond
# have we already calculated the parameters
if not typed_bond in unique_bonds:
ri = UFF_FULL[uff_a][0]
rj = UFF_FULL[uff_b][0]
chiI = UFF_FULL[atom_a.uff_type][8]
chiJ = UFF_FULL[atom_b.uff_type][8]
rbo = -0.1332*(ri+rj)*log(bondorder)
ren = ri*rj*(((sqrt(chiI) - sqrt(chiJ))**2))/(chiI*ri + chiJ*rj)
r0 = (ri + rj + rbo - ren)
# force constant
# parameters Z1
kb = (UFF_FULL[uff_a][5]*UFF_FULL[uff_b][5])/(r0**3)
kb *= 0.5*KCAL_TO_KJ*664.12
unique_bonds[typed_bond] = (r0, kb) # in nm
if atom_a.is_metal or atom_b.is_metal and keep_metal_geometry:
params = (min_distance(atom_a, atom_b, self.cell.cell),
unique_bonds[typed_bond][1]*metal_stiffness_factor)
else:
params = unique_bonds[typed_bond]
bond_func = 1 # gromacs harmonic
# add 1 to bond as 1 indexed
# All these are converted to gromcas units, ugh...
itp_file.append('%-5i %-5i %1i %11.4f %11.4f ; %-5s %-5s %.2f\n' %
(bond[0]+1, bond[1]+1, bond_func, 0.1*params[0],
200*params[1], typed_bond[0], typed_bond[1],
bondorder))
##
# angles
##
itp_file.append("\n[ angles ]\n")
for idx_a in sorted(bonding_table):
bonded_atoms = bonding_table[idx_a]
for l_idx in bonded_atoms:
for r_idx in bonded_atoms:
if r_idx >= l_idx:
# don't include angles twice
continue
# FIXME(tdaff) special cases for 5 or more coord
# tag central uff and two largest neighbours
central_atom = self.atoms[idx_a]
l_atom = self.atoms[l_idx]
r_atom = self.atoms[r_idx]
# FIXME(tdaff) over/under coordinated atoms?
theta0 = UFF_FULL[central_atom.uff_type][1]
# FIXME(tdaff) switch if coordination is not correct
cosT0 = cos(theta0*DEG2RAD)
sinT0 = sin(theta0*DEG2RAD)
c2 = 1.0 / (4.0 * sinT0 * sinT0)
c1 = -4.0 * c2 * cosT0
c0 = c2*(2.0*cosT0*cosT0 + 1.0)
zi = UFF_FULL[l_atom.uff_type][5]
zk = UFF_FULL[r_atom.uff_type][5]
bond_ab = tuple(sorted((l_idx, idx_a)))
bond_ab = tuple(sorted([l_atom.uff_type, central_atom.uff_type]) + [self.bonds[bond_ab]])
bond_bc = tuple(sorted((r_idx, idx_a)))
bond_bc = tuple(sorted([r_atom.uff_type, central_atom.uff_type]) + [self.bonds[bond_bc]])
rab = unique_bonds[bond_ab][0]
rbc = unique_bonds[bond_bc][0]
rac = sqrt(rab*rab + rbc*rbc - 2.0 * rab*rbc*cosT0)
ka = (644.12 * KCAL_TO_KJ) * (zi * zk / (rac**5.0))
ka *= (3.0*rab*rbc*(1.0 - cosT0*cosT0) - rac*rac*cosT0)
# FIXME(tdaff) change uff_coordination to coordination
if central_atom.uff_coordination == 1:
# linear bonds (e.g. C_1 triple bonds) had 0 force
# constant otherwise
thetamin = 180.0
kappa = ka
elif abs(c2) > 0.001:
thetamin = pi - arccos(c1/(4.0*c2))
thetamin /= DEG2RAD
kappa = ka * (16.0*c2*c2 - c1*c1) / (4.0*c2)
elif central_atom.coordination == 2:
thetamin = 120.0
kappa = 4.0*ka/3.0
elif central_atom.coordination in (4, 6):
thetamin = 90.0
kappa = 2.0*ka
elif central_atom.coordination == 7:
alpha = 2.0*pi/5.0
c7 = sin(alpha)*(cos(alpha) - cos(2*alpha))
thetamin = 72.0
kappa = 2.0 * c7*c7 * ka * c1
else:
thetamin = pi - arccos(c1/(4.0*c2))
thetamin /= DEG2RAD
kappa = ka * (16.0*c2*c2 - c1*c1) / (4.0*c2)
if keep_metal_geometry and (central_atom.is_metal or
l_atom.is_metal or
r_atom.is_metal):
# harmonic potential means it will not
# flip around
potential = "harmonic"
kappa *= metal_stiffness_factor
thetamin = angle_between(l_atom, central_atom, r_atom,
cell=self.cell)
elif central_atom.coordination == 1:
# linear bonds seemed too flexible
potential = "harmonic"
else:
potential = "G96"
# Use the harmonic approximation for linear angles as the
# stiffness is undefined for the G96 form
if potential == "G96" and thetamin != 180.0:
kappa /= sin(thetamin*DEG2RAD)**2
potential_function = 2
else: # harmonic
potential_function = 1
angle_fmt = ("%-6i %-6i %-6i %i %9.4f %9.4f ;"
" %-6s %-6s %-6s\n")
itp_file.append(angle_fmt % (
l_idx + 1, idx_a + 1, r_idx + 1, potential_function,
thetamin, kappa, l_atom.uff_type,
central_atom.uff_type, r_atom.uff_type))
##
# dihedrals
##
itp_file.append("\n[ dihedrals ]\n; proper torsion terms\n")
# Like OB FindTorsions
# Generate all torsions first so we can find equivalents
torsions = []
for idx_b in sorted(bonding_table):
for idx_c in bonding_table[idx_b]:
if idx_b >= idx_c:
continue
for idx_a in bonding_table[idx_b]:
if idx_a == idx_c:
continue
for idx_d in bonding_table[idx_c]:
if idx_d == idx_b or idx_d == idx_a:
continue
else:
torsions.append((idx_a, idx_b, idx_c, idx_d))
overcoordinated = set()
# now loop to generate force field
for torsion in torsions:
idx_a, idx_b, idx_c, idx_d = torsion
atom_a = self.atoms[idx_a]
atom_b = self.atoms[idx_b]
atom_c = self.atoms[idx_c]
atom_d = self.atoms[idx_d]
bond_bc = bonding_table[idx_b][idx_c]
torsiontype = bond_bc[2] # bond order
# correct for over-coordinated things like vanadium
if atom_b.is_metal and atom_b.coordination > atom_b.uff_coordination:
coord_b = atom_b.coordination
overcoordinated.add(atom_b)
else:
coord_b = atom_b.uff_coordination
if atom_c.is_metal and atom_c.coordination > atom_c.uff_coordination:
coord_c = atom_c.coordination
overcoordinated.add(atom_c)
else:
coord_c = atom_c.uff_coordination
coord_bc = (coord_b, coord_c)
V = 0
n = 0
if coord_bc == (3, 3):
# two sp3 centers
phi0 = 60.0
n = 3
vi = UFF_FULL[atom_b.uff_type][6]
vj = UFF_FULL[atom_c.uff_type][6]
# exception for a pair of group 6 sp3 atoms
if atom_b.atomic_number == 8:
vi = 2.0
n = 2
phi0 = 90.0
elif atom_b.atomic_number in (16, 34, 52, 84):
vi = 6.8
n = 2
phi0 = 90.0
if atom_c.atomic_number == 8:
vj = 2.0
n = 2
phi0 = 90.0
elif atom_c.atomic_number in (16, 34, 52, 84):
vj = 6.8
n = 2
phi0 = 90.0
V = 0.5 * KCAL_TO_KJ * (vi * vj)**0.5
elif coord_bc == (2, 2):
# two sp2 centers
ui = UFF_FULL[atom_b.uff_type][7]
uj = UFF_FULL[atom_c.uff_type][7]
phi0 = 180.0
n = 2
V = (ui*uj)**0.5 * (1.0 + 4.18*log(torsiontype))
V *= 0.5 * KCAL_TO_KJ * 5.0
elif coord_bc in [(2, 3), (3, 2)]:
# one sp3, one sp2
phi0 = 0.0
n = 6
V = 0.5 * KCAL_TO_KJ * 1.0
# exception for group 6 sp3
if coord_c == 3:
if atom_c.atomic_number in (8, 16, 34, 52):
n = 2
phi0 = 90.0
if coord_b == 3:
if atom_b.atomic_number in (8, 16, 34, 52):
n = 2
phi0 = 90.0
if abs(V) < 2e-6: # don't bother calculating this torsion
continue
# Dividing by equivalent torsions is a GG addition
equivalent_torsions = 0
for equivalent in torsions:
if equivalent[1:3] == (idx_b, idx_c):
equivalent_torsions += 1
V /= equivalent_torsions
nphi0 = n*phi0
if abs(sin(nphi0*DEG2RAD)) > 1.0e-3:
error("WARNING!!! nphi0 = %r" % nphi0)
if atom_b.is_metal or atom_c.is_metal and keep_metal_geometry:
V *= metal_stiffness_factor
phi_s = dihedral(atom_a, atom_b, atom_c, atom_d, cell=self.cell)
else:
phi_s = nphi0 - 180.0 # phi_s in degrees
tor_fmt = ("%-6i %-6i %-6i %-6i %i %9.4f %9.4f %i ;"
" %-6s %-6s %-6s %-6s\n")
tor_type = 1 # proper dihedrals in GROMACS
itp_file.append(tor_fmt % (
idx_a + 1, idx_b + 1, idx_c + 1, idx_d + 1,
tor_type, phi_s, V, n,
atom_a.uff_type, atom_b.uff_type,
atom_c.uff_type, atom_d.uff_type))
# Don't warn about overcoordination unless user cares.
if overcoordinated:
debug("%i overcoordinated atoms found: %s" % (len(overcoordinated),
" ".join(set(x.uff_type for x in overcoordinated))))
##
# inversions / improper dihedrals
##
itp_file.append("\n[ dihedrals ]\n; inversions (improper dihedrals)\n")
for idx_b in sorted(bonding_table):
atom_b = self.atoms[idx_b]
# Only have parameters for limited elements
# and only want those with 3 neighbours
if not atom_b.atomic_number in (6, 7, 8, 15, 33, 51, 83):
continue
elif len(bonding_table[idx_b]) != 3:
continue
idx_a, idx_c, idx_d = bonding_table[idx_b]
atom_a = self.atoms[idx_a]
atom_c = self.atoms[idx_c]
atom_d = self.atoms[idx_d]
if atom_b.uff_type in ('N_3', 'N_2', 'N_R', 'O_2', 'O_R'):
c0 = 1.0
c1 = -1.0
c2 = 0.0
koop = 6.0*KCAL_TO_KJ
elif atom_b.uff_type in ('P_3+3', 'As3+3', 'Sb3+3', 'Bi3+3'):
if atom_b.uff_type == 'P_3+3':
phi = 84.4339 * DEG2RAD
elif atom_b.uff_type == 'As3+3':
phi = 86.9735 * DEG2RAD
elif atom_b.uff_type == 'Sb3+3':
phi = 87.7047 * DEG2RAD
else:
phi = 90.0 * DEG2RAD
c1 = -4.0 * cos(phi)
c2 = 1.0
c0 = -1.0*c1*cos(phi) + c2*cos(2.0*phi)
koop = 22.0 * KCAL_TO_KJ
elif atom_b.uff_type in ('C_2', 'C_R'):
c0 = 1.0
c1 = -1.0
c2 = 0.0
koop = 6.0*KCAL_TO_KJ
if 'O_2' in (atom_a.uff_type, atom_c.uff_type, atom_d.uff_type):
koop = 50.0 * KCAL_TO_KJ
else:
continue
# three permutations:
koop /= 3
# that was easy...
if abs(c2) < 1.0e-5:
csi0 = 0.0
kcsi = koop
else:
#TODO(tdaff): check if this is multiply or divide
csi0 = arccos(-c1/(4.0*c2))/DEG2RAD # csi_0 in degrees
kcsi = (16.0*c2*c2-c1*c1)/(4.0*c2*c2)
kcsi *= koop # kcsi in kJ/mol/rad^2
# put it thrice, middle atom first
# b, a, c, d
# b, d, c, a
# b, a, d, c
inv_fmt = ("%-6i %-6i %-6i %-6i %i %9.4f %9.4f ;"
" %-6s %-6s %-6s %-6s\n")
inv_type = 2 # improper dihedrals in GROMACS
itp_file.append(inv_fmt % (
idx_b + 1, idx_a + 1, idx_c + 1, idx_d + 1,
inv_type, csi0, kcsi,
atom_b.uff_type, atom_a.uff_type,
atom_c.uff_type, atom_d.uff_type))
itp_file.append(inv_fmt % (
idx_b + 1, idx_d + 1, idx_c + 1, idx_a + 1,
inv_type, csi0, kcsi,
atom_b.uff_type, atom_d.uff_type,
atom_c.uff_type, atom_a.uff_type))
itp_file.append(inv_fmt % (
idx_b + 1, idx_a + 1, idx_d + 1, idx_c + 1,
inv_type, csi0, kcsi,
atom_b.uff_type, atom_a.uff_type,
atom_d.uff_type, atom_c.uff_type))
# done!
return gro_file, top_file, itp_file
def to_cssr(self, cartesian=False, no_atom_id=False):
"""
Return a Cerius2 cssr file with coordinates and cell as a list of
strings. Set no_atom_id to produce labels to work with Zeo++.
"""
space_group = (1, "P 1")
opt = 1
cell = self.cell
# spacers between all format strings ensures that there will always
# be whitespace between components.
cssr = [" "*38, "%8.3f %7.3f %7.3f\n" % cell.params[:3],
" "*21, "%8.3f %7.3f %7.3f" % cell.params[3:], " "*4,
"SPGR = %3i %-11s" % space_group, "OPT = %i\n" % opt,
"%4i %3i %s\n" % (self.natoms, cartesian, self.name),
"Structure file generated by faps\n"]
for at_idx, atom in enumerate(self.atoms):
# The default of TypeID does not work for Zeo++ as it
# treats each label as a different type
if no_atom_id:
atom_name = "%s" % (atom.type)
else:
atom_name = "%s%i" % (atom.type, at_idx+1)
# Zeo++ needs fractional coordinates
if cartesian:
string_pos = "%9.5f %9.5f %9.5f " % tuple(atom.pos)
else:
string_pos = "%9.5f %9.5f %9.5f " % tuple(atom.ifpos(cell.inverse))
cssr.append("%4i %-4s %s" % (at_idx+1, atom_name, string_pos) +
" 0"*8 + " %7.3f\n" % atom.charge)
return cssr
def to_zeoplusplus(self):
"""
Return a tuple containing a cssr file, radii file and mass file.
"""
radii = ["%-7s %-f\n" % (atom, UFF[atom][0]/2.0)
for atom in unique(self.types)]
masses = ["%-7s %-f\n" % (atom, WEIGHT[atom])
for atom in unique(self.types)]
return self.to_cssr(no_atom_id=True), radii, masses
def to_cif(self):
"""Return a CIF file with bonding and atom types."""
name = self.name
atoms = self.atoms
cell = self.cell
if hasattr(self, 'bonds'):
bonds = self.bonds
else:
bonds = {}
inv_cell = cell.inverse
type_count = {}
atom_part = []
for idx, atom in enumerate(atoms):
if atom is None:
# blanks are left in here
continue
if hasattr(atom, 'uff_type') and atom.uff_type is not None:
uff_type = atom.uff_type
else:
uff_type = '?'
if atom.element in type_count:
type_count[atom.element] += 1
else:
type_count[atom.element] = 1
atom.site = "%s%i" % (atom.element, type_count[atom.element])
atom_part.append("%-5s %-5s %-5s " % (atom.site, atom.element, uff_type))
atom_part.append("%f %f %f " % tuple(atom.ifpos(inv_cell)))
atom_part.append("%f\n" % atom.charge)
# Materials studio sorts bonds in the same order as the atoms
# but perceives bonds wrong if bond orders are mixed.
# i.e. it misreads cifs that it writes itself.
bond_properties = []
for bond, order in bonds.items():
bond_type = CCDC_BOND_ORDERS[order]
atom0, atom1 = atoms[bond[0]], atoms[bond[1]]
distance, shift = cif_bond_dist(atom0, atom1, cell)
# assume P1 for now (only one symmetry operation) so "1_???"
if shift == [0, 0, 0]:
site_symm = "."
bond_properties.append((order, bond,
(atom0.site, atom1.site, distance,
site_symm, bond_type)))
else:
site_symm = "1_%i%i%i" % (5+shift[0], 5+shift[1], 5+shift[2])
bond_properties.append((order, bond,
(atom0.site, atom1.site, distance,
site_symm, bond_type)))
# MS also includes the reverse bond over the boundary
site_symm = "1_%i%i%i" % (5-shift[0], 5-shift[1], 5-shift[2])
rbond = (bond[1], bond[0])
bond_properties.append((order, rbond,
(atom1.site, atom0.site, distance,
site_symm, bond_type)))
# Can just sort bond_properties as it will be bond_order, site1, site2
bond_part = ["%-5s %-5s %.3f %-5s %2s\n" % x[2]
for x in sorted(bond_properties)]
cif_file = [
"data_%s\n" % name.replace(' ', '_'),
"%-33s %s\n" % ("_audit_creation_date", time.strftime('%Y-%m-%dT%H:%M:%S%z')),
"%-33s %s\n" % ("_audit_creation_method", "'faps %s'" % __version__),
"%-33s %s\n" % ("_symmetry_space_group_name_H-M", "P1"),
"%-33s %s\n" % ("_symmetry_Int_Tables_number", "1"),
"%-33s %s\n" % ("_space_group_crystal_system", cell.crystal_system),
"loop_\n", "_symmetry_equiv_pos_as_xyz\n", " x,y,z\n",
"%-33s %-.10s\n" % ("_cell_length_a", cell.a),
"%-33s %-.10s\n" % ("_cell_length_b", cell.b),
"%-33s %-.10s\n" % ("_cell_length_c", cell.c),
"%-33s %-.10s\n" % ("_cell_angle_alpha", cell.alpha),
"%-33s %-.10s\n" % ("_cell_angle_beta", cell.beta),
"%-33s %-.10s\n" % ("_cell_angle_gamma", cell.gamma),
"%-33s %s\n" % ("_cell_volume", cell.volume),
# start of atom loops
"\nloop_\n",
"_atom_site_label\n",
"_atom_site_type_symbol\n",
"_atom_site_description\n",
"_atom_site_fract_x\n",
"_atom_site_fract_y\n",
"_atom_site_fract_z\n",
"_atom_type_partial_charge\n"] + atom_part
# Don't put this if there are no bonds!
if bond_part:
cif_file.extend([
# bonding loop
"\nloop_\n",
"_geom_bond_atom_site_label_1\n",
"_geom_bond_atom_site_label_2\n",
"_geom_bond_distance\n",
"_geom_bond_site_symmetry_2\n",
"_ccdc_geom_bond_type\n"] + bond_part)
return cif_file
def fastmc_postproc(self, filepath, tp_point, options):
"""Update structure properties from gcmc OUTPUT."""
startdir = os.getcwd()
os.chdir(filepath)
# Since Pete changed output strip indentation and blank lines
filetemp = open('OUTPUT')
output = strip_blanks(filetemp.readlines())
filetemp.close()
# Keep track of supercell so we can get unit cell values
supercell_mult = prod(self.gcmc_supercell)
# Still positional as we need multiple values simultaneously
# and very old versions changed wording of heat of adsorption
# and enthalpy of guest
# TODO(tdaff, r2.0): deprecate reading older fastmc files
# and put Cv in the guest definition
for line in output[::-1]:
if "+/-" in line:
# This is version 1.3 of fastmc
debug("NEW OUTPUT")
line_offset = 5
read_cv = True
# In future this should be assumed to exist
for guest in self.guests:
if not hasattr(guest, 'c_v'):
guest.c_v = {}
break
else:
# +/- not found, assume old style output
debug("OLD OUTPUT")
line_offset = 0
read_cv = False
for idx, line in enumerate(output):
# Assume that block will always start like this
if 'final stats' in line:
idx += line_offset
guest_id = int(line.split()[4]) - 1
self.guests[guest_id].uptake[tp_point] = (
float(output[idx + 1].split()[-1]),
float(output[idx + 2].split()[-1]),
supercell_mult)
# This will sometimes be NaN
self.guests[guest_id].hoa[tp_point] = (
float(output[idx + 3].split()[-1]),
float(output[idx + 4].split()[-1]))
if read_cv:
self.guests[guest_id].c_v[tp_point] = (
float(output[idx + 5].split()[-1]),
float(output[idx + 6].split()[-1]))
elif 'total accepted steps' in line:
counted_steps = int(line.split()[-1])
if counted_steps < 10000:
warning("Number of accepted GCMC steps is very low; "
"only %i counted!" % counted_steps)
fold = options.getbool('fold')
find_maxima = options.getbool('find_maxima')
prob_plot = options.getbool('mc_probability_plot')
folded = False
if prob_plot and (fold or find_maxima):
sigma = options.getfloat('absl_sigma')
radius = options.getfloat('absl_radius')
cutoff = options.getfloat('absl_cutoff')
write = options.getbool('absl_write_smooth_cube')
folded = self.fold_and_maxima(fold, find_maxima, tp_point, sigma,
radius, cutoff, write)
if folded and not options.getbool('fastmc_keep_unfolded_cubes'):
debug("Removing unfolded cube files")
cubes = glob("prob_guest??_prob_??.cube")
remove_files(cubes)
unneeded_files = options.gettuple('fastmc_delete_files')
remove_files(unneeded_files)
keep_files = options.gettuple('fastmc_compress_files')
compress_files(keep_files)
os.chdir(startdir)
def absl_postproc(self, filepath, tp_point, options):
"""Update structure properties from DL_POLY outputs."""
startdir = os.getcwd()
os.chdir(filepath)
# For pretty output
temp = tp_point[0]
press = tp_point[1]
tp_string = " T=%.1f " % temp + " ".join(["P=%.2f" % x for x in press])
statis = compressed_open('STATIS').readlines()
empty_esp = float(statis[3].split()[4])
for guest in self.guests:
info("Postprocessing: %s at %s" % (guest.ident, tp_string))
binding_energies = []
for bs_idx, binding_site in enumerate(guest.binding_sites[tp_point]):
bs_directory = "%s_bs_%04d" % (guest.ident, bs_idx)
output = compressed_open(path.join(bs_directory,
'OUTPUT')).readlines()
if 'error - quaternion integrator failed' in output[-1]:
# bad guest placement
e_vdw = float('nan')
e_esp = float('nan')
# put position of original as final position, nan anyway
try:
shutil.move(path.join(bs_directory, 'CONFIG'),
path.join(bs_directory, 'REVCON'))
except IOError:
# CONFIG already moved, just doing update?
pass
# This is not a 'binding site'
magnitude = 0.0
else:
# energies
statis = compressed_open(path.join(bs_directory,
'STATIS')).readlines()
# Need to do a backwards search for the start of the last
# block since block size varies with number of atoms
# Timestep line will start with more spaces than data line
lidx = 0 # start block of final STATIS data
for ridx, line in enumerate(statis[::-1]):
if line.startswith(' ') and not 'NaN' in line:
lidx = ridx
break
else:
warning('No energies in STATIS')
e_vdw = float(statis[-lidx].split()[3])
e_esp = float(statis[-lidx].split()[4]) - empty_esp
# can just use the peak value
magnitude = binding_site[0][2]
# get position position
revcon = compressed_open(path.join(bs_directory,
'REVCON')).readlines()
# If using MD, skip is 4 due to velocities and forces!
revcon = revcon[6::2]
# Fix molecule if it crosses boundaries
# have to make dummy objects with fractional attribute for
# the minimum_image function
cell = self.cell
# For now, put (atom, dummy) in here
positions = []
for atom, ratom in zip(guest.atoms, revcon):
try:
dummy_pos = [float(x) for x in ratom.split()]
except ValueError:
# DL_POLY prints large numbers wrong, like
# 0.3970159038+105; these are too big anyway, so nan them
dummy_pos = [float('nan'), float('nan'), float('nan')]
# Sometimes atoms get flung out of the cell, but have
# high binding energy anyway, ignore them
if any((abs(x) > 2*cell.minimum_width) for x in dummy_pos):
e_vdw = float('nan')
e_esp = float('nan')
dummy = Atom(parent=self)
# create with the fractional, so position matches
dummy.fractional = [x % 1.0 for x in
dot(cell.inverse, dummy_pos)]
# Put dummy nearest to first atom, if it exists
if positions:
dummy.pos = minimum_image(positions[0][1], dummy,
cell.cell)
positions.append((atom, dummy))
else:
# ensure anchor is within the unit cell
dummy.pos = dot(dummy.fractional, cell.cell)
positions.append((atom, dummy))
# Extract the information we need from the tuples
positions = [(atom.type, dummy.pos)
for atom, dummy in positions]
info("Binding site %i: %f kcal/mol, %f occupancy" %
(bs_idx, (e_vdw+e_esp), magnitude))
binding_energies.append([magnitude, e_vdw, e_esp, positions])
with open('%s_absl.xyz' % guest.ident, 'w') as absl_out:
frame_number = 0
for idx, bind in enumerate(binding_energies):
energy = bind[1] + bind[2]
if energy > 0 or energy != energy:
continue
pc_elec = 100*bind[2]/energy
this_point = [
"%i\n" % len(bind[3]), # number of atoms
# idx, energy, %esp, e_vdw, e_esp, magnitude
" BS: %i, Frame: %i, Ebind= %f, esp= %.2f%%, Evdw= %f, "
"Eesp= %.2f, occ= %f\n" %
(idx, frame_number, energy, pc_elec, bind[1], bind[2],
bind[0])]
for atom in bind[3]:
this_point.append("%-5s " % atom[0])
this_point.append("%12.6f %12.6f %12.6f\n" % tuple(atom[1]))
absl_out.writelines(this_point)
frame_number += 1
if hasattr(guest, 'binding_energies'):
guest.binding_energies[tp_point] = binding_energies
else:
guest.binding_energies = {tp_point: binding_energies}
unneeded_files = options.gettuple('absl_delete_files')
remove_files(unneeded_files)
keep_files = options.gettuple('absl_compress_files')
compress_files(keep_files)
os.chdir(startdir)
def fold_and_maxima(self, fold=True, find_maxima=True, tp_point=None,
sigma=2.0, radius=0.31, cutoff=0.0, write=False):
"""Determine the positions of maxima and produce an xyz xyz file."""
from cube import Cube
folded = False
if fold:
fold = self.gcmc_supercell
else:
fold = None
for guest_idx, guest in enumerate(self.guests):
guest_locations = {}
for site_idx, sites in enumerate(guest.probability):
guest_cube = Cube("prob_guest%02i_prob_%02i.cube" %
(guest_idx+1, site_idx+1), fold=fold)
if fold is not None:
debug("Folded cube file: %s" % guest_cube.folded_name)
guest_cube.write_cube()
folded = True
if find_maxima:
guest_locations[sites] = guest_cube.maxima(
sigma, radius, cutoff, write)
if guest_locations:
if tp_point:
# We can keep them for later too, must create dict
# since it might not exist in old calculations
if not hasattr(guest, 'guest_locations'):
guest.guest_locations = {tp_point: guest_locations}
else:
guest.guest_locations[tp_point] = guest_locations
maxima = []
for sites in guest_locations:
atom_name = name_from_types(sites, guest)
for atom, magnitude in guest_locations[sites]:
maxima.append((magnitude, "%-6s" % atom_name +
"%10.6f %10.6f %10.6f " % tuple(atom) +
"%10.6f\n" % magnitude))
locations = open("%s-%s.xyz" % (self.name, guest.ident), 'w')
locations.write(" %i\nBinding sites at %r\n" %
(len(maxima), tp_point))
locations.writelines([x[1] for x in sorted(maxima, reverse=True)])
locations.close()
return folded
def remove_duplicates(self, tolerance=0.02):
"""Find overlapping atoms and remove them."""
uniq_atoms = []
found_atoms = []
for atom in self.atoms:
for uniq_atom in uniq_atoms:
if atom.type != uniq_atom.type:
continue
elif min_distance(atom, uniq_atom) < tolerance:
break
# else excutes when not found here
else:
uniq_atoms.append(atom)
debug("Found %i unique atoms in %i" % (len(uniq_atoms), self.natoms))
self.atoms = uniq_atoms
def check_connectivity(self):
"""
Carry out pre-optimisation checks checks on the structure to determine
if bonding information is included and if atom types are needed.
No bonding is inferred. Return True if connectivity is bad.
:return: bool
"""
if not self.bonds:
warning("No bonding information found. "
"Typing and optimisation will fail.")
return True
for atom in self.atoms:
if not atom.uff_type or atom.uff_type == '?':
debug("Untyped atom found.")
return True
return False
def check_close_contacts(self, absolute=1.0, covalent=None):
"""
Check for atoms that are too close. Specify either an absolute distance
in Angstrom or a scale factor for the sum of covalent radii. If a
covalent factor is specified it will take priority over an absolute
distance. Return True if close contacts found, else return False.
"""
close_contact_found = False
for atom_idx, atom in enumerate(self.atoms):
for other_idx, other in enumerate(self.atoms):
if other_idx >= atom_idx:
# short circuit half the calculations
# Can we do combinations with idx in 2.7
break
if covalent is not None:
tolerance = covalent * (atom.covalent_radius +
other.covalent_radius)
else:
tolerance = absolute
if min_distance(atom, other) < tolerance:
bond_ids = tuple(sorted([atom_idx, other_idx]))
if bond_ids not in self.bonds:
warning("Close atoms: %s(%i) and %s(%i)" %
(atom.site, atom_idx, other.site, other_idx))
close_contact_found = True
return close_contact_found
def bond_length_check(self, too_long=1.25, too_short=0.7):
"""
Check if all bonds fall within a sensible range of scale factors
of the sum of the covalent radii. Return True if bad bonds are found,
otherwise False.
"""
bad_bonds = False
for bond in self.bonds:
atom = self.atoms[bond[0]]
other = self.atoms[bond[1]]
distance = min_distance(atom, other)
bond_dist = (atom.covalent_radius + other.covalent_radius)
if distance > bond_dist * too_long:
warning("Long bond found: %s(%i) and %s(%i) = %.2f A" %
(atom.site, bond[0], other.site, bond[1], distance))
bad_bonds = True
elif distance < bond_dist * too_short:
warning("Short bond found: %s(%i) and %s(%i) = %.2f A" %
(atom.site, bond[0], other.site, bond[1], distance))
bad_bonds = True
return bad_bonds
def gen_supercell(self, options):
"""Cacluate the smallest satisfactory supercell and set attribute."""
config_supercell = options.gettuple('mc_supercell', int)
config_cutoff = options.getfloat('mc_cutoff')
if config_cutoff < 12:
warning("Simulation is using a very small cutoff! I hope you "
"know, what you are doing!")
minimum_supercell = self.cell.minimum_supercell(config_cutoff)
supercell = tuple(max(i, j)
for i, j in zip(config_supercell, minimum_supercell))
if self.gcmc_supercell != supercell:
info("%s supercell requested in config" % str(config_supercell))
info("%s minimum supercell for a %.1f cutoff" %
(str(minimum_supercell), config_cutoff))
info("Constructing %s supercell for gcmc." % str(supercell))
self.gcmc_supercell = supercell
def supercell(self, scale):
"""
Iterate over all the atoms of supercell where scale is an integer
to scale uniformly or triplet with scale factors for each direction.
"""
# Beware supercells larger than 2147483647 are not supported in
# python 2
if isinstance(scale, int):
scale = (scale, scale, scale)
for x_super in range(scale[0]):
for y_super in range(scale[1]):
for z_super in range(scale[2]):
offset = dot((x_super, y_super, z_super), self.cell.cell)
for atom in self.atoms:
newatom = copy(atom)
newatom.translate(offset)
yield newatom
def order_by_types(self):
"""
Sort the atoms alphabetically and group them as in old versions.
Update bonds to reflect new ordering.
"""
# Append indexes to each and sort on type (legacy sorting)
new_atoms = sorted(enumerate(self.atoms),
key=lambda x: (x[1].type, x[1].site))
self.atoms = [x[1] for x in new_atoms]
if hasattr(self, 'bonds'):
# dict of old_index: new_index
translation_table = dict((j, i) for i, j in
enumerate(x[0] for x in new_atoms))
new_bonds = {}
# Just translate atom indexes, bond data is the same
for bond in self.bonds:
new_bond = tuple(sorted([translation_table[bond[0]],
translation_table[bond[1]]]))
new_bonds[new_bond] = self.bonds[bond]
self.bonds = new_bonds
def gen_types_from_bonds(self):
"""
Pass the bonding information into openbabel to get the atomic types.
Modifies the atoms in place to set their uff_type attribute.
"""
info("Generating UFF atom types from bonding information")
if not self.bonds:
error('No bonds, cannot generate types!')
return
# import these locally so we can run faps without them
import openbabel as ob
import pybel
# Construct the molecule from atoms and bonds
obmol = ob.OBMol()
obmol.BeginModify()
for atom in self.atoms:
new_atom = obmol.NewAtom()
new_atom.SetAtomicNum(atom.atomic_number)
for bond, bond_order in self.bonds.items():
# Remember openbabel indexes from 1
obmol.AddBond(bond[0]+1, bond[1]+1, OB_BOND_ORDERS[bond_order])
obmol.EndModify()
pybel_mol = pybel.Molecule(obmol)
# need to tell the typing system to ignore all atoms in the setup
# or it will silently crash with memory issues
constraint = ob.OBFFConstraints()
for at_idx in range(pybel_mol.OBMol.NumAtoms()):
constraint.AddIgnore(at_idx)
uff = ob.OBForceField_FindForceField('uff')
uff.Setup(pybel_mol.OBMol, constraint)
uff.GetAtomTypes(pybel_mol.OBMol)
# Dative nitrogen bonds break aromaticity determination from resonant
# structures, so make anything with an aromatic bond be aromatic
for at_idx, atom, ob_atom in zip(count(), self.atoms, pybel_mol):
uff_type = ob_atom.OBAtom.GetData("FFAtomType").GetValue()
if atom.type in ['C', 'N', 'O', 'S']:
for bond, bond_order in self.bonds.items():
if at_idx in bond and bond_order == 1.5:
uff_type = uff_type[0] + '_R'
break
atom.uff_type = uff_type
def gen_neighbour_list(self, force=False):
"""All atom pair distances."""
# This can be expensive so skip if already calcualted
if not force:
for atom in self.atoms:
if not hasattr(atom, 'neighbours'):
break
else:
# finished loop over all atoms
debug("Neighbour list already calculated")
return
debug("Calculating neighbour list.")
cell = self.cell.cell
inv_cell = self.cell.inverse
cpositions = [atom.ipos(cell, inv_cell) for atom in self.atoms]
fpositions = [atom.ifpos(inv_cell) for atom in self.atoms]
cell = cell.tolist()
# loop over all pairs to find minimum periodic distances
for atom, a_cpos, a_fpos in zip(self.atoms, cpositions, fpositions):
neighbours = []
for o_idx, o_cpos, o_fpos in zip(count(), cpositions, fpositions):
sep = min_dist(a_cpos, a_fpos, o_cpos, o_fpos, cell)
neighbours.append((sep, o_idx))
# First one is self == 0
# save in incresaing distance order
atom.neighbours = sorted(neighbours)[1:]
## Neighbourlist printed in VASP style
#for idx, atom in enumerate(self.atoms):
# print("%4i" % (idx+1) +
# "%7.3f%7.3f%7.3f" % tuple(atom.ifpos(inv_cell)) +
# "-" +
# "".join("%4i%5.2f" % (y+1, x) for x, y in atom.neighbours if x<2.5))
def surface_area(self, probe=None, value=None, delete=False):
"""
Helper:
Return all {probe:area} if no arguments given
Return the area or None for a given probe
Set area if value given
Delete value if delete is True
Areas in A^2
"""
surface_areas = self.properties.get('surface_area', {})
if value is not None:
surface_areas[probe] = value
self.properties['surface_area'] = surface_areas
elif delete:
# Set it to None to avoid KeyErrors
surface_areas[probe] = None
del surface_areas[probe]
self.properties['surface_area'] = surface_areas
elif probe is not None:
return surface_areas.get(probe, None)
else:
return surface_areas
def sub_property(self, name, probe=None, value=None, delete=False):
"""
Helper:
Return all {probe:value} if no arguments given
Return the value or None for a given probe
Set area if value given
Delete value if delete is True
Units are based on Angstrom
"""
property_data = self.properties.get(name, {})
if value is not None:
property_data[probe] = value
self.properties[name] = property_data
elif delete:
# Set it to None to avoid KeyErrors
property_data[probe] = None
del property_data[probe]
self.properties[name] = property_data
elif probe is not None:
return property_data.get(probe, None)
else:
return property_data
def void_volume(self):
"""Estimate the void volume based on VdW radii."""
initial_resolution = 0.2
params = self.cell.params
cell = self.cell.cell
inv_cell = np.linalg.inv(cell.T)
grid_size = [int(ceil(x/initial_resolution)) for x in params[:3]]
print(grid_size)
grid_resolution = [params[0]/grid_size[0],
params[1]/grid_size[1],
params[2]/grid_size[2]]
print(grid_resolution)
grid = np.zeros(grid_size, dtype=bool)
atoms = [(atom.ipos(cell), inv_cell.tolist(),
atom.ifpos(inv_cell),
atom.vdw_radius) for atom in self.atoms]
for x_idx in range(grid_size[0]):
print(x_idx)
for y_idx in range(grid_size[1]):
print(y_idx)
print(grid.sum())
for z_idx in range(grid_size[2]):
grid_pos = [x_idx*grid_resolution[0],
y_idx*grid_resolution[1],
z_idx*grid_resolution[2]]
grid_fpos = [float(x_idx)/grid_size[0],
float(y_idx)/grid_size[1],
float(z_idx)/grid_size[2]]
for pos, fpos, radius in atoms:
dist = min_dist(pos, fpos, grid_pos, grid_fpos, cell)
if dist < radius:
grid[x_idx, y_idx, z_idx] = 1
break
print(grid.sum())
@property
def types(self):
"""Ordered list of atom types."""
return [atom.type for atom in self.atoms]
@property
def atomic_numbers(self):
"""Ordered list of atomic numbers."""
return [atom.atomic_number for atom in self.atoms]
@property
def weight(self):
"""Unit cell weight."""
return sum([atom.mass for atom in self.atoms])
@property
def volume(self):
"""Unit cell volume."""
return self.cell.volume
@property
def natoms(self):
"""Number of atoms in the unit cell."""
return len(self.atoms)
@property
def symmetry_tree(self):
"""Tree of atoms that are symmetrically equivalent."""
tree = {}
for atom_id, atom in enumerate(self.atoms):
if atom.site in tree:
tree[atom.site].append(atom_id)
else:
tree[atom.site] = [atom_id]
if len(tree) == 1 and None in tree:
return dict((i, [i]) for i in range(self.natoms))
else:
return tree
def get_gcmc_supercell(self):
"""Supercell used for gcmc."""
return self.properties.get('supercell', (1, 1, 1))
def set_gcmc_supercell(self, value):
"""Set the supercell property for the structure."""
self.properties['supercell'] = value
gcmc_supercell = property(get_gcmc_supercell, set_gcmc_supercell)
# TODO(tdaff): properties: density, surface area, dft_energy, absorbance
class Cell(object):
"""
Crystollagraphic cell representations and interconversion methods.
Setter methods can be defined for different file types, however
.cell and .params will be self-consistent if set directly.
"""
def __init__(self):
"""Default to a 1A cubic box."""
self._cell = array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
self._params = (1.0, 1.0, 1.0, 90.0, 90.0, 90.0)
self._inverse = None
def from_pdb(self, line):
"""Extract cell from CRYST1 line in a pdb."""
# Must use fixed widths as -ve numbers do not leave gaps to .split()
self.params = (float(line[6:15]),
float(line[15:24]),
float(line[24:33]),
float(line[33:40]),
float(line[40:47]),
float(line[47:54]))
def from_lines(self, lines, scale=1.0):
"""Extract cell from a 3-line POSCAR cell representation."""
self.cell = array([[float(x) * scale for x in lines[0].split()],
[float(x) * scale for x in lines[1].split()],
[float(x) * scale for x in lines[2].split()]])
def to_vector_strings(self, scale=1, bohr=False, fmt="%20.12f"):
"""Generic [Super]cell vectors in Angstrom as a list of strings."""
out_format = 3 * fmt + "\n"
# If the supercell is more than 2147483647 in any direction this
# will fail in python 2, but 'long' removed for py3k forward
# compatibility
if isinstance(scale, int):
scale = [scale, scale, scale]
# else assume an iterable 3-vector
if bohr:
scale = [x / BOHR2ANG for x in scale]
return [out_format % tuple(scale[0] * self.cell[0]),
out_format % tuple(scale[1] * self.cell[1]),
out_format % tuple(scale[2] * self.cell[2])]
def minimum_supercell(self, cutoff):
"""Calculate the smallest supercell with a half-cell width cutoff."""
a_cross_b = cross(self.cell[0], self.cell[1])
b_cross_c = cross(self.cell[1], self.cell[2])
c_cross_a = cross(self.cell[2], self.cell[0])
volume = dot(self.cell[0], b_cross_c)
widths = [volume / norm(b_cross_c),
volume / norm(c_cross_a),
volume / norm(a_cross_b)]
return tuple(int(ceil(2*cutoff/x)) for x in widths)
@property
def minimum_width(self):
"""The shortest perpendicular distance within the cell."""
a_cross_b = cross(self.cell[0], self.cell[1])
b_cross_c = cross(self.cell[1], self.cell[2])
c_cross_a = cross(self.cell[2], self.cell[0])
volume = dot(self.cell[0], b_cross_c)
return volume / min(norm(b_cross_c), norm(c_cross_a), norm(a_cross_b))
@property
def imcon(self):
"""Guess cell shape and return DL_POLY imcon key."""
keys = {'none': 0,
'cubic': 1,
'orthorhombic': 2,
'parallelepiped': 3}
if np.all(self.cell == 0):
return keys['none']
elif np.all(self.params[3:] == 90):
if self.params[0] == self.params[1] == self.params[2]:
return keys['cubic']
else:
return keys['orthorhombic']
else:
return keys['parallelepiped']
@property
def crystal_system(self):
"""Return the IUCr designation for the crystal system."""
#FIXME(tdaff): must be aligned with x to work
if self.alpha == self.beta == self.gamma == 90:
if self.a == self.b == self.c:
return 'cubic'
elif self.a == self.b or self.a == self.c or self.b == self.c:
return 'tetragonal'
else:
return 'orthorhombic'
elif self.alpha == self.beta == 90:
if self.a == self.b and self.gamma == 120:
return 'hexagonal'
else:
return 'monoclinic'
elif self.alpha == self.gamma == 90:
if self.a == self.c and self.beta == 120:
return 'hexagonal'
else:
return 'monoclinic'
elif self.beta == self.gamma == 90:
if self.b == self.c and self.alpha == 120:
return 'hexagonal'
else:
return 'monoclinic'
elif self.a == self.b == self.c and self.alpha == self.beta == self.gamma:
return 'trigonal'
else:
return 'triclinic'
@property
def volume(self):
"""Calculate cell volume a.bxc."""
b_cross_c = cross(self.cell[1], self.cell[2])
return dot(self.cell[0], b_cross_c)
def get_cell(self):
"""Get the 3x3 vector cell representation."""
return self._cell
def set_cell(self, value):
"""Set cell and params from the cell representation."""
# Class internally expects an array
self._cell = array(value).reshape((3,3))
self.__mkparam()
self._inverse = np.linalg.inv(self.cell.T)
# Property so that params are updated when cell is set
cell = property(get_cell, set_cell)
def get_params(self):
"""Get the six parameter cell representation as a tuple."""
return tuple(self._params)
def set_params(self, value):
"""Set cell and params from the cell parameters."""
self._params = value
self.__mkcell()
self._inverse = np.linalg.inv(self.cell.T)
# Property so that cell is updated when params are set
params = property(get_params, set_params)
@property
def inverse(self):
"""Inverted cell matrix for converting to fractional coordinates."""
try:
if self._inverse is None:
self._inverse = np.linalg.inv(self.cell.T)
except AttributeError:
self._inverse = np.linalg.inv(self.cell.T)
return self._inverse
@property
def a(self):
"""Magnitude of cell a vector."""
return self.params[0]
@property
def b(self):
"""Magnitude of cell b vector."""
return self.params[1]
@property
def c(self):
"""Magnitude of cell c vector."""
return self.params[2]
@property
def alpha(self):
"""Cell angle alpha."""
return self.params[3]
@property
def beta(self):
"""Cell angle beta."""
return self.params[4]
@property
def gamma(self):
"""Cell angle gamma."""
return self.params[5]
# Implementation details -- directly access the private _{cell|param}
# attributes; please don't break.
def __mkcell(self):
"""Update the cell representation to match the parameters."""
a_mag, b_mag, c_mag = self.params[:3]
alpha, beta, gamma = [x * DEG2RAD for x in self.params[3:]]
a_vec = array([a_mag, 0.0, 0.0])
b_vec = array([b_mag * cos(gamma), b_mag * sin(gamma), 0.0])
c_x = c_mag * cos(beta)
c_y = c_mag * (cos(alpha) - cos(gamma) * cos(beta)) / sin(gamma)
c_vec =
|
array([c_x, c_y, (c_mag**2 - c_x**2 - c_y**2)**0.5])
|
numpy.array
|
import numpy as np
from scipy import optimize
import z_l_v
import logging
from numerik import nr_ls
from numerik import rref, ref
import itertools
from setup_results_log import notify_status_func, setup_log_file
import timeit
eps = np.finfo(float).eps
np.set_printoptions(linewidth=200)
setup_log_file('log_bsp_pat_ue_03_2.log', with_console=False)
# Modell feststellen
alpha_tr, epsilon, sigma, psi, omega = z_l_v.use_pr_eos()
p = 35. # bar
temp = 273.15 + 220. # K
t_flash = 273.15 + 60 # K
t0_ref = 298.15 # K
r = 8.314 # J/(mol K)
namen = ['CO', 'H2', 'CO2', 'H2O', 'CH4', 'NH3', 'AR', 'O2', 'N2']
elemente = ['C', 'O', 'N', 'H', 'AR']
atom_m = np.array([
[1, 0, 1, 0, 1, 0, 0, 0, 0],
[1, 0, 2, 1, 0, 0, 0, 2, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 2],
[0, 2, 0, 2, 4, 3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0]
], dtype=float)
mm_el = np.array([
12.011,
15.999,
14.007,
1.008,
39.948,
]) / 1000. # kg/gmol
mm_k = atom_m.T.dot(mm_el)
red_atom_m = rref(ref(atom_m)[0])
rho = int(
|
np.linalg.matrix_rank(red_atom_m)
|
numpy.linalg.matrix_rank
|
"""
Classes for hierarchical parameter inference.
"""
from itertools import compress
import bilby
import numpy as np
from lintegrate import logtrapz
from scipy.interpolate import splev, splrep
from scipy.stats import expon, gaussian_kde, truncnorm
from .utils import ellipticity_to_q22, q22_to_ellipticity
#: Allowed distributions and their required hyperparameters
DISTRIBUTION_REQUIREMENTS = {
"exponential": ["mu"],
"gaussian": ["mu", "sigma", "weight"],
"deltafunction": ["peak"],
"powerlaw": ["alpha", "minimum", "maximum"],
"histogram": ["weight"],
}
class BaseDistribution(object):
"""
The base class for the distribution, as defined by a set of
hyperparameters, that you want to fit.
Parameters
----------
name: str
The parameter for which this distribution is the prior.
disttype: str
The type of distribution, e.g., 'exponential', 'gaussian'.
hyperparameters: dict
A dictionary of hyperparameters for the distribution with the keys
giving the parameter names, and values giving their fixed value, or
a :class:`bilby.core.prior.Prior` for values that are to be inferred.
low: float
The lower bound of the distribution
high: float
The upper bound of the distribution
"""
def __init__(self, name, disttype, hyperparameters={}, low=-np.inf, high=np.inf):
self.name = name # the parameter name
self.disttype = disttype
self.hyperparameters = hyperparameters
self.low = low
self.high = high
if self.low >= self.high:
raise ValueError("Lower bound is higher than upper bound!")
@property
def disttype(self):
return self._disttype
@disttype.setter
def disttype(self, disttype):
if disttype.lower() not in DISTRIBUTION_REQUIREMENTS.keys():
raise ValueError('Distribution name "{}" is not known'.format(disttype))
else:
self._disttype = disttype.lower()
@property
def hyperparameters(self):
return self._hyperparameters
@hyperparameters.setter
def hyperparameters(self, hyperparameters):
if isinstance(hyperparameters, dict):
# check is contains the required parameter names
for key in hyperparameters.keys():
if key.lower() not in DISTRIBUTION_REQUIREMENTS[self.disttype]:
raise KeyError(
'Unknown parameter "{}" for distribution '
'"{}"'.format(key, self.disttype)
)
self._hyperparameters = {
key.lower(): value for key, value in hyperparameters.items()
}
else:
raise TypeError("hyperparameters must be a dictionary")
# set fixed values
self.fixed = self._hyperparameters
@property
def parameters(self):
return list(self.hyperparameters.keys())
@property
def values(self):
return list(self.hyperparameters.values())
@property
def unpacked_parameters(self):
params = []
for key, value in self.hyperparameters.items():
if isinstance(value, (list, np.ndarray)):
for i in range(len(value)):
params.append("{0}{1:d}".format(key, i))
else:
params.append(key)
return params
@property
def unpacked_values(self):
values = []
for key, value in self.hyperparameters.items():
if isinstance(value, (list, np.ndarray)):
for i in range(len(value)):
values.append(value[i])
else:
values.append(value)
return values
def __getitem__(self, item):
if item.lower() in self.parameters:
return self.hyperparameters[item.lower()]
elif item.lower() in self.unpacked_parameters:
return self.unpacked_values[self.unpacked_parameters.index(item.lower())]
elif item.lower() in DISTRIBUTION_REQUIREMENTS[self.disttype]:
return None
else:
raise KeyError('"{}" is not a parameter in this distribution'.format(item))
def __setitem__(self, item, value):
if item.lower() not in self.hyperparameters.keys():
if item.lower() in DISTRIBUTION_REQUIREMENTS[self.disttype]:
self._hyperparameters[item.lower()] = value
else:
raise KeyError(
'"{}" is not a parameter in this distribution'.format(item)
)
else:
self._hyperparameters[item.lower()] = value
@property
def fixed(self):
"""
Return a dictionary keyed to parameter names and with boolean values
indicating whether the parameter is fixed (True), or to be inferred
(False).
"""
return self._fixed
@fixed.setter
def fixed(self, hyperparameters):
self._fixed = dict()
for param, value in hyperparameters.items():
if isinstance(value, (bilby.core.prior.Prior, bilby.core.prior.PriorDict)):
self._fixed[param] = False
elif isinstance(value, (list, np.ndarray)):
self._fixed[param] = []
for i in range(len(value)):
if isinstance(
value[i], (bilby.core.prior.Prior, bilby.core.prior.PriorDict)
):
self._fixed[param].append(False)
elif isinstance(value[i], (int, float)):
self._fixed[param].append(True)
else:
raise TypeError("Hyperparameter type is not valid")
elif isinstance(value, (int, float)):
self._fixed[param] = True
else:
raise TypeError("Hyperparameter type is not valid")
@property
def unpacked_fixed(self):
"""
Return a flattened version of ``fixed``, with multivalued parameters
indexed.
"""
fixed = dict()
for param, value in zip(self.unpacked_parameters, self.unpacked_values):
if isinstance(value, (bilby.core.prior.Prior, bilby.core.prior.PriorDict)):
fixed[param] = False
elif isinstance(value, (int, float)):
fixed[param] = True
else:
raise TypeError("Hyperparameter type is not valid")
return fixed
@property
def unknown_parameters(self):
"""
A list of the parameters that are to be inferred.
"""
return list(
compress(
self.unpacked_parameters, ~np.array(list(self.unpacked_fixed.values()))
)
)
@property
def unknown_priors(self):
"""
A list of the :class:`~bilby.core.prior.Prior` for the parameters
that are to be inferred.
"""
return list(
compress(
self.unpacked_values, ~np.array(list(self.unpacked_fixed.values()))
)
)
def log_pdf(self, value, hyperparameters):
"""
The natural logarithm of the distribution's probability density
function at the given value.
Parameters
----------
value: float
The value at which to evaluate the probability.
hyperparameters: dict
A dictionary of the hyperparameter values that define the current
state of the distribution.
Returns
-------
lnpdf:
The natural logarithm of the probability.
"""
return np.nan
def pdf(self, value, hyperparameters):
"""
The distribution's probability density function at the given value.
Parameters
----------
value: float
The value at which to evaluate the probability.
hyperparameters: dict
A dictionary of the hyperparameter values that define the current
state of the distribution.
Returns
-------
pdf:
The probability density.
"""
return np.exp(self.log_pdf(value, hyperparameters))
def sample(self, hyperparameters, size=1):
"""
Draw a sample from the distribution as defined by the given
hyperparameters.
Parameters
----------
hyperparameters: dict
A dictionary of the hyperparameter values that define the current
state of the distribution.
size: int
The number of samples to draw from the distribution.
Returns
-------
sample:
A sample, or set of samples, from the distribution.
"""
return None
class BoundedGaussianDistribution(BaseDistribution):
"""
A distribution to define estimating the parameters of a (potentially
multi-modal) bounded Gaussian distribution.
An example of using this distribution for a two component Gaussian
distribution bounded at zero and with unknown mean, standard deviations and
weights would be:
>>> from bilby.core.prior import HalfNormal, LogUniform, DirichletPriorDict
>>> # set priors for means (half-Normal distributions with mode at 0)
>>> mus = [HalfNormal(10.0, name="mu0"), HalfNormal(10.0, name="mu1")]
>>> # set priors for standard deviations (log uniform distributions)
>>> sigmas = [LogUniform(name="sigma0", minimum=0.0001, maximum=100.0),
LogUniform(name="sigma1", minimum=0.0001, maximum=100.0)]
>>> # set a Dirichlet prior on the weights (i.e., they must add up to 1)
>>> weights = DirichletPriorDict(n_dim=2, label="weight")
>>> dist = BoundedGaussianDistribution("x", mus=mus, sigmas=sigmas, weights=weights)
Note that if using a Dirichlet prior on the weights all weights must be
included and none can be set as fixed.
Parameters
----------
name: str
See :class:`~cwinpy.hierarchical.BaseDistribution`
mus: array_like
A list of values of the means of each mode of the Gaussian.
sigmas: array_like
A list of values of the standard deviations of each mode of the
Gaussian.
weights: array_like
A list of values of the weights (relative probabilities) of
each mode. This will default to equal weights if not given. If wanting
to estimate multiple weights a DirichletPriorDict should be used as in
the example above.
low: float
The lower bound of the distribution (defaults to 0, i.e., only positive
values are allowed)
high: float
The upper bound of the distribution (default to infinity)
"""
def __init__(self, name, mus=[], sigmas=[], weights=None, low=0.0, high=np.inf):
gaussianparameters = {"mu": [], "sigma": [], "weight": []}
if isinstance(mus, (int, float, bilby.core.prior.Prior)):
mus = [mus]
elif not isinstance(mus, (list, np.ndarray)):
raise TypeError("Unknown type for 'mus'")
if isinstance(sigmas, (int, float, bilby.core.prior.Prior)):
sigmas = [sigmas]
elif not isinstance(sigmas, (list, np.ndarray)):
raise TypeError("Unknown type for 'sigmas'")
if weights is None:
weights = [1] * len(mus)
elif not isinstance(
weights, (list, np.ndarray, bilby.core.prior.DirichletPriorDict)
):
raise TypeError("Unknown type for 'weights'")
if isinstance(weights, bilby.core.prior.DirichletPriorDict):
# DirichletPriorDict has length one less than the number of weights
nweights = len(weights) + 1
for wv in weights.values():
gaussianparameters["weight"].append(wv)
else:
nweights = len(weights)
# set the number of modes
self.nmodes = len(mus)
if len(mus) != len(sigmas) or nweights != len(mus):
raise ValueError("'mus', 'sigmas' and 'weights' must be the same length")
if self.nmodes < 1:
raise ValueError("Gaussian must have at least one mode")
for i in range(self.nmodes):
gaussianparameters["mu"].append(mus[i])
gaussianparameters["sigma"].append(sigmas[i])
if isinstance(weights, (list, np.ndarray)):
gaussianparameters["weight"].append(weights[i])
# initialise
super().__init__(
name, "gaussian", hyperparameters=gaussianparameters, low=low, high=high
)
def log_pdf(self, value, hyperparameters={}):
"""
The natural logarithm of the pdf of a 1d (potentially multi-modal)
Gaussian probability distribution.
Parameters
----------
value: float
The value at which the probability is to be evaluated.
hyperparameters: dict
A dictionary containing the current values of the hyperparameters
that need to be inferred. If there are multiple modes and weights
are not fixed then the hyperparameters should include ``n-1``
weights values, where ``n`` is the number of modes.
Returns
-------
logpdf:
The natural logarithm of the probability density at the given
value.
"""
if np.any((value < self.low) | (value > self.high)):
return -np.inf
mus = self["mu"]
sigmas = self["sigma"]
if isinstance(self.fixed["weight"], (list, np.ndarray)):
if np.any(np.asarray(self.fixed["weight"]) == True): # noqa: E712
weights = self["weight"]
else:
# all should be False for Dirichlet priors
weights = np.zeros(self.nmodes)
else:
weights = np.zeros(self.nmodes)
# get current mus and sigmas from values
for i in range(self.nmodes):
if not self.fixed["mu"][i]:
param = "mu{}".format(i)
try:
mus[i] = hyperparameters[param]
except KeyError:
raise KeyError(
"Cannot calculate log probability when "
"value '{}' is not given".format(param)
)
if not self.fixed["sigma"][i]:
param = "sigma{}".format(i)
try:
sigmas[i] = hyperparameters[param]
except KeyError:
raise KeyError(
"Cannot calculate log probability when "
"value '{}' is not given".format(param)
)
if i < (self.nmodes - 1):
if not self.fixed["weight"][i]:
param = "weight{}".format(i)
try:
weights[i] = hyperparameters[param]
except KeyError:
raise KeyError(
"Cannot calculate log probability when "
"value '{}' is not given".format(param)
)
if weights[self.nmodes - 1] == 0.0:
# set final weight
weights[self.nmodes - 1] = 1.0 - np.sum(weights[:-1])
if np.any(np.asarray(sigmas) <= 0.0):
return -np.inf
if np.any(np.asarray(weights) <= 0.0):
return -np.inf
# normalise weights
lweights = np.log(np.asarray(weights) / np.sum(weights))
# get log pdf
if isinstance(value, (float, int)):
logpdf = -np.inf
elif isinstance(value, (list, np.ndarray)):
logpdf =
|
np.full_like(value, -np.inf)
|
numpy.full_like
|
import matplotlib.pyplot as plt
import numpy as np
import pywt
import time
import pandas as pd
from sklearn.metrics import accuracy_score, log_loss, f1_score, confusion_matrix
from sklearn.utils.multiclass import unique_labels
def plot_sample(idx, data, lx_cols, ly_cols, rx_cols, ry_cols):
"""Plotting left and right eyes movements for a sample.
Args:
idx: Index of observation in data.
data: Data to be sampled with idx
lx_cols: Columns labels for left eye values on X axis
ly_cols: Columns labels for left eye values on Y axis
rx_cols: Columns labels for right eye values on X axis
ry_cols: Columns labels for right eye values on Y axis
"""
# Plotting left and right eyes movements for samples
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(15,4))
ax[0].set_title('Eye coordinates on X axis - index:' + str(idx) + ' | subject:' + str(data.loc[idx,['sid']][0]))
ax[0].plot(np.arange(0, len(lx_cols)), data.loc[idx, lx_cols], label='left', color='b')
ax[0].plot(np.arange(0, len(rx_cols)), data.loc[idx, rx_cols], label='right', color='r')
ax[0].legend(loc='lower left')
ax[0].set_xlabel('Frame')
ax[0].set_ylabel('Position')
ax[0].set_ylim((-1500, 1500))
ax[1].set_title('Eye coordinates on Y axis - index:' + str(idx) + ' | subject:' + str(data.loc[idx,['sid']][0]))
ax[1].plot(np.arange(0, len(ly_cols)), data.loc[idx, ly_cols], label='left', color='b')
ax[1].plot(np.arange(0, len(ry_cols)), data.loc[idx, ry_cols], label='right', color='r')
ax[1].legend(loc='lower left')
ax[1].set_xlabel('Frame')
ax[1].set_ylabel('Position')
ax[1].set_ylim((-1500, 1500))
plt.show()
def train_predict(clf, X_train, y_train, X_test, y_test, comments, logloss=True, train=True):
''' Fit and predict data, evaluating metrics.
Args:
clf: Classifier
X_train: Features training set
y_train: Income training set
X_test: Features testing set
y_test: Income testing set
comments: Textual comments and notes
logloss: Wheter to evaluate logloss metric
train: If false does not train, just predict
Returns:
res: Dict containing results
'''
res = {} # results
start = time.time()
if train:
clf.fit(X_train, y_train)
# predicted values
pred_train = clf.predict(X_train)
pred_test = clf.predict(X_test)
# storing results
res['train_acc'] = accuracy_score(y_train, pred_train)
res['test_acc'] = accuracy_score(y_test, pred_test)
res['train_f1'] = f1_score(y_train, pred_train, average='weighted')
res['test_f1'] = f1_score(y_test, pred_test, average='weighted')
# predicted prob
if logloss:
pred_probs_train = clf.predict_proba(X_train)
pred_probs_test = clf.predict_proba(X_test)
# storing results
res['train_logloss'] = log_loss(y_train, pred_probs_train)
res['test_logloss'] = log_loss(y_test, pred_probs_test)
else:
res['train_logloss'] = None
res['test_logloss'] = None
# storing results
res['notes'] = comments
res['time'] = time.time() - start
# Return the results
return res, pred_train, pred_test
def aggPosition(x):
"""Aggregate position data inside a segment
Args:
x: Position values in a segment
Returns:
Aggregated position (single value)
"""
return x.mean()
def aggVelocity(x):
"""Aggregate velocity inside a segment of data
Args:
x: Position values in a segment
Returns:
Aggregated velocity (single value)
"""
v = np.gradient(x)
# Return the highest absolute peak
return v[np.argmax(np.abs(v))]
def aggAcceleration(x):
"""Aggregate acceleration inside a segment of data
Args:
x: Position values in a segment
Returns:
Aggregated velocity (single value)
"""
a = np.gradient(np.gradient(x))
# Return the highest absolute peak
return a[np.argmax(np.abs(a))]
def applyfiltering(x, wavelet='db2', mode='smooth'):
"""Applying low-pass filtering for the elimination of high-frequency effects
using single level Discrete Wavelet Transform (DWT)
Args:
x: Input signal
wavelet: Wavelet to use
mode: Signal extension mode
Returns:
Filtered signal
"""
# Applying Discrete Wavelet Transform (DWT)
cA, cD = pywt.dwt(x, wavelet, mode=mode)
# # Recovering signal filtered
# x_rec = pywt.idwt(cA, None, wavelet, mode=mode)
return np.copy(cA)
def getArrays(data, sid_col, sx_cols, sy_cols, lx_cols, ly_cols, rx_cols, ry_cols):
"""Applying low-pass filtering for the elimination of high-frequency effects
using single level Discrete Wavelet Transform (DWT)
Args:
data: Dataset with samples of the experiment
sid_col: Columns with subject identifier
sx_cols: Columns with stimulus point placements on X axis
sy_cols: Columns with stimulus point placements on Y axis
lx_cols: Columns with left eye gaze points on X axis
ly_cols: Columns with left eye gaze points on Y axis
rx_cols: Columns with right eye gaze points on X axis
ry_cols: Columns with right eye gaze points on Y axis
Returns:
Filtered signal
"""
# Converting to array type
sid = np.asarray(data[sid_col]) # 'subject id' column
sx = np.asarray(data[sx_cols]) # 'sx0'..'sx2047' columns
sy = np.asarray(data[sy_cols]) # 'sy0'..'sy2047' columns
lx = np.asarray(data[lx_cols]) # 'lx0'..'lx2047' columns
ly = np.asarray(data[ly_cols]) # 'ly0'..'ly2047' columns
rx = np.asarray(data[rx_cols]) # 'rx0'..'rx2047' columns
ry = np.asarray(data[ry_cols]) # 'ry0'..'ry2047' columns
return sid, sx, sy, lx, ly, rx, ry
def doSplit(lx, ly, rx, ry, nsegments, trim=False):
"""" Segments each sample in a number of parts with the same lenght
Arguments:
lx: Left eye gaze points on X axis
ly: Left eye gaze points on X axis
rx: Left eye gaze points on X axis
ry: Left eye gaze points on X axis
nsegments: Number of segments
Returns:
slx: List of arrays, one array per segment
sly: List of arrays, one array per segment
srx: List of arrays, one array per segment
sry: List of arrays, one array per segment
"""
assert len(lx)==len(ly)==len(rx)==len(ry), 'Arrays with different size'
# Splitting and defining segments
nrows = len(lx)
slx = []
sly = []
srx = []
sry = []
for i in
|
np.arange(0,nrows)
|
numpy.arange
|
"""
Experimented with simple VAE, decided to keep the code.
Heavily inspired by https://github.com/FelixMohr/Deep-learning-with-Python/blob/master/VAE.ipynb
"""
import sys
import numpy as np
from matplotlib import pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
from snake_rl.utils import init_logger
from snake_rl.utils.misc import experiment_dir, model_dir
from snake_rl.utils.dnn_utils import *
logger = logging.getLogger(os.path.basename(__file__))
EXPERIMENT = 'mnist_vae_experiment'
N_LATENT = 20
def make_prior():
mu = tf.zeros(N_LATENT)
sigma = tf.ones(N_LATENT)
return tf.contrib.distributions.MultivariateNormalDiag(mu, sigma)
def make_encoder(x_input):
x_input = tf.reshape(x_input, shape=[-1, 28, 28, 1])
x = conv(x_input, 32, 3, 2)
x = conv(x, 64, 3, 2)
x = conv(x, 128, 3, 2)
x = tf.contrib.layers.flatten(x)
mu = dense(x, N_LATENT)
sigma = dense(x, N_LATENT, activation=tf.nn.softplus) # softplus is log(exp(x) + 1)
return tf.contrib.distributions.MultivariateNormalDiag(mu, sigma)
def make_mlp_encoder(x_input):
x = tf.layers.flatten(x_input)
x = tf.layers.dense(x, 200, tf.nn.relu)
x = tf.layers.dense(x, 200, tf.nn.relu)
loc = tf.layers.dense(x, N_LATENT)
scale = tf.layers.dense(x, N_LATENT, tf.nn.softplus)
return tf.contrib.distributions.MultivariateNormalDiag(loc, scale)
def make_decoder(sampled_z):
x = tf.layers.dense(sampled_z, 24, tf.nn.relu)
x = tf.layers.dense(x, 7 * 7 * 64, tf.nn.relu)
x = tf.reshape(x, [-1, 7, 7, 64])
x = tf.layers.conv2d_transpose(x, 64, 3, 2, 'SAME', activation=tf.nn.relu)
x = tf.layers.conv2d_transpose(x, 32, 3, 2, 'SAME', activation=tf.nn.relu)
x = tf.layers.conv2d_transpose(x, 1, 3, 1, 'SAME')
img = tf.reshape(x, [-1, 28, 28])
img_distribution = tf.contrib.distributions.Bernoulli(img)
img = img_distribution.probs
img_distribution = tf.contrib.distributions.Independent(img_distribution, 2)
return img, img_distribution
def make_mlp_decoder(sampled_z, data_shape):
x = sampled_z
x = tf.layers.dense(x, 200, tf.nn.relu)
x = tf.layers.dense(x, 200, tf.nn.relu)
logit = tf.layers.dense(x, np.prod(data_shape))
logit = tf.reshape(logit, [-1] + data_shape)
img_distribution = tf.contrib.distributions.Bernoulli(logit)
img = img_distribution.probs
img_distribution = tf.contrib.distributions.Independent(img_distribution, 2)
return img, img_distribution
def main():
"""Script entry point."""
init_logger()
mnist = input_data.read_data_sets(os.path.join(experiment_dir(EXPERIMENT), 'MNIST_data'))
tf.reset_default_graph()
batch_size = 128
x_input = tf.placeholder(dtype=tf.float32, shape=[None, 28, 28], name='X')
prior = make_prior()
posterior = make_encoder(x_input)
mu, sigma = posterior.mean(), posterior.stddev()
z = posterior.sample()
generated_img, output_distribution = make_decoder(z)
likelihood = output_distribution.log_prob(x_input)
divergence = tf.distributions.kl_divergence(posterior, prior)
elbo = tf.reduce_mean(likelihood - divergence)
loss = -elbo
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.AdamOptimizer(1e-3).minimize(loss, global_step=global_step)
with tf.Session() as session:
saver = tf.train.Saver(max_to_keep=3)
checkpoint_dir = model_dir(EXPERIMENT)
try:
saver.restore(session, tf.train.latest_checkpoint(checkpoint_dir=checkpoint_dir))
except ValueError:
logger.info('Didn\'t find a valid restore point, start from scratch')
session.run(tf.global_variables_initializer())
batch = [np.reshape(b, [28, 28]) for b in mnist.train.next_batch(batch_size=batch_size)[0]]
i = tf.train.global_step(session, tf.train.get_global_step())
for epoch in range(20):
values = session.run(
[generated_img, loss, likelihood, divergence, mu, sigma],
feed_dict={x_input: batch},
)
# unpack the tuple
img, ls, likelihood_ls, kl_ls, mu_val, sigma_val = values
plt.imshow(np.reshape(batch[0], [28, 28]), cmap='gray')
plt.show()
plt.imshow(img[0], cmap='gray')
plt.show()
logger.info(
'i: %d, loss: %f, likeli: %f, kl_ls: %f, mu: %f, sigma: %f',
i, ls,
|
np.mean(likelihood_ls)
|
numpy.mean
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
First created on Sun Mar 11 2018
Final version on Sun Jun 24 2018
@authors: <NAME> and <NAME>
"""
#TO DO: comment and clean both methods and utils
try:
import pylab as pb
except:
pass
import numpy as np
from operator import itemgetter
# import matplotlib.pyplot as plt
import gpflow
import keras
from keras import regularizers
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.optimizers import RMSprop
from keras.utils import plot_model
from keras.layers.core import Lambda
from keras import backend as K
from scipy.optimize import minimize
from sklearn import preprocessing
from scipy.misc import logsumexp
from codes.utils import fct_minimise_Shubert, fct_minimise_Malherbe
from codes.utils import Gauss_log_likelihood, Lip_log_likelihood
#%%
class LipModel:
"""
Class containing all relevant functionalities for Lipschitz interpolation model
"""
def __init__(self, X: np.array, Y: np.array, alpha=0.5, L=0.0, norm = None):
"""
Builds basic Lipschitz interpolation model
:X: training data X
:Y: training data y, needs to be passed as array of shape (n,1);
:param alpha: weight of upper bound vs. lower bound
:param L: Best Lipschitz constant of target function, if known
:param norm: norm to be used in input space
"""
if np.shape(X)[0] == len(Y):
assert np.shape(X)[1] >= 1
else:
assert np.shape(X)[1] == len(Y)
X = np.transpose(X)
self.X = X
assert np.shape(Y)[1] == 1
self.Y = Y
self.support_idx = range(0,np.shape(X)[0])
self.diam_X = []
self.alpha = alpha
self.iter = 0
if L == 0.0:
print('Warning: Lipschitz constant currently 0. Please train model before inference')
self.L = L
self.trained_params = [0]
self.norm = norm
return
def loss_vec(self,L, x_cond, y_cond, x_eval, y_eval):
"""
Calculates vector containing absolute differences between prediction and target for each point in evaluation set
:L: value of hyperparameter L used to predict on evaluation set
:x_cond: part of training data X used as conditioning set
:y_cond: part of training data y used as conditioning set
:x_eval: part of training data X used as evaluation set to be predicted on
:y_eval: part of training data y used as evaluation set, i.e. true target values at x_eval
"""
y_eval = np.squeeze(y_eval)
assert np.shape(y_eval) == (len(y_eval),)
if L < 0:
L = 0
pred_temp = np.squeeze(np.zeros(shape=(len(x_eval[:,0]), 1)))
for ii in range(0, len(x_eval[:,0])):
pred_temp[ii] = self.inference(x_eval[ii,:], L, x_cond, y_cond)
loss_vector = np.absolute(y_eval - pred_temp)
return loss_vector
def upper_bound(self,xx,l_n,X,Y):
'''
Computes upper_bound u(xx) for target function value at xx for Lipschitz constant hyperparameter l_n and sample data X and Y
\mathfrak{u}_{l_n}(xx;X,Y) &:= \min_{s_i \in X} (f_i + l_n \mathfrak{d}(s_i,xx))
:xx: single test point
:l_n: value of hyperparameter used for prediction
:X: input of sample data X used for prediction
:Y: output of sample data Y used for prediction
'''
u_vec = Y + l_n * np.expand_dims(np.linalg.norm((X - xx),self.norm, axis=1),axis=1)
u = np.amin(u_vec)
return u
def lower_bound(self,xx, l_n,X,Y):
'''
Computes lower bound l(xx) for target function value at xx for Lipschitz constant hyperparameter l_n and sample data X and Y
\mathfrak{l}_{l_n}(xx;X,Y) &:= \max_{s_i \in X} (f_i - l_n \mathfrak{d}(s_i,xx))
:xx: single test point
:l_n: value of hyperparameter used for prediction
:X: input of sample data X used for prediction
:Y: output of sample data Y used for prediction
'''
l_vec = Y - l_n * np.expand_dims(np.linalg.norm((X - xx),self.norm, axis=1),axis=1)
l = np.amax(l_vec)
return l
def inference(self,xx, l_n, X, Y):
'''
Computes prediction of target function value at xx for Lipschitz constant hyperparameter l_n and sample data X and Y
\hat{f}_{l_n}(xx;X,Y) = \alpha \mathfrak{l}_{l_n}(xx;X,Y) + (1-\alpha)\mathfrak{u}_{l_n}(xx;X,Y)
:xx: single test point
:l_n: value of hyperparameter used for prediction
:X: input of sample data X used for prediction
:Y: output of sample data Y used for prediction
'''
y = self.alpha*self.upper_bound(xx,l_n,X,Y) + (1.0-self.alpha)*self.lower_bound(xx,l_n,X,Y)
return y
def train(self, method='LACKI', losstype='l2', split=0.5, optimizer='Malherbe', ARD = False, maxevals = 1000, cv = 0, number = 1):
"""
Trains Lipschitz interpolation model by learning hyperparameter l_n from data
:param method: determines which estimation is used. (sparse) LACKI or POKI
:param losstype: only relevant for POKI: which loss function is used? maxi, l1 or l2
:param split: only relevant for POKI: determines split into conditioning and evaluation data
:param optimizer: Choice of optimizer only relevant for POKI, Malherbe, Shubert and standard Scipy available
:param ARD: if true, ARD metric is used, i.e. delta(x,x') = max_(i = 1,...,d) theta_i * |x_i - x'_i| -- not implemented yet
:param maxevals: only relevant for POKI, maximum steps in optimisation in POKI
:param cv: if value for cv is passed, this is how often data is reshuffled for either POKI or (sparse) LACKI, which is automatically applied in case cv is passed
:number: number of optimizer initializations / restarts of Scipy, has no effect if Malherbe or Shubert optimizer are chosen
"""
self.trained_params = [1]
if ARD:
assert optimizer == 'Malherbe'
print('ARD not implemented yet')
pass
if ARD == False:
d = 1
if method == 'POKI':
assert losstype is not None
diam_low = 0.0
for ll in range(0,len(self.Y)):
for mm in range(1,len(self.Y)-ll):
candidate = np.linalg.norm(self.X[ll,:]-self.X[ll+mm,:],self.norm)
if (candidate > diam_low):
diam_low = candidate
self.diam_X = diam_low
data = np.c_[self.X, self.Y]
l_new_vec = np.zeros(shape = (cv+1,number))
l_new_vec[:,:] = np.nan
for ii in range(0,cv+1):
np.random.shuffle(data)
split_pos = int(np.floor(len(self.Y)*split))
data_cond ,data_eval = data[:split_pos, :], data[split_pos:,:]
x_cond = data_cond[:,0:-1]
y_cond = np.expand_dims(np.array(data_cond[:,-1]),axis=1)
x_eval = data_eval[:,0:-1]
y_eval = np.expand_dims(np.array(data_eval[:,-1]),axis=1)
def loss_fun(L):
# loss function as a function of the vector containing absolute errors for each evaluation point
if losstype =='l1':
loss = np.average(self.loss_vec(L, x_cond, y_cond, x_eval, y_eval))
elif losstype=='l2':
loss = np.average(self.loss_vec(L, x_cond, y_cond, x_eval, y_eval)**2)
elif losstype=='maxi':
loss = np.amax(self.loss_vec(L, x_cond, y_cond, x_eval, y_eval))
else: print('losstype not defined, could not train model')
return loss
if optimizer == 'Malherbe':
curr_argmin,curr_fmin,n,timer = fct_minimise_Malherbe(loss_fun,d,0.0,500.0,L=self.diam_X, maxevals=maxevals)
l_new_vec[ii,0] = curr_argmin
elif optimizer == 'Shubert':
upper_bound = 50.0
cont = True
while cont:
curr_argmin,curr_fmin,n = fct_minimise_Shubert(loss_fun,0.0,upper_bound, L=self.diam_X,errthresh =0.05, maxevals=maxevals)
if curr_argmin < 0.9*upper_bound:
cont = False
else:
upper_bound = 2.0*upper_bound
l_new_vec[ii,0] = curr_argmin
self.iter = n
elif optimizer == 'Scipy':
for pp in range(0,number):
res = minimize(loss_fun, np.random.uniform(0.0,100.0), method='nelder-mead', options={'xtol': 1e-8, 'disp': False})
l_new = res.x
l_new_vec[ii,pp] = l_new
else:
print('Error: Chosen optimizer not implemented / check spelling')
self.L = np.mean(l_new_vec[:,0])
temp = np.mean(l_new_vec, axis = 0)
self.trained_params = temp[~np.isnan(temp)]
elif method == 'LACKI':
if cv < 1.0: #corresponds to LAC, exhaustive method of evaluating all combinatorically possible slopes
L_low = 0.0
for ll in range(0,len(self.Y)-1):
for mm in range(1,len(self.Y)-ll):
if np.linalg.norm(self.X[ll,:]-self.X[ll+mm,:]) > 0.0: #ignore doublettes in input space
candidate = np.absolute(self.Y[ll]-self.Y[ll+mm])/np.linalg.norm(self.X[ll,:]-self.X[ll+mm,:])
if candidate > L_low:
L_low = candidate
self.L=L_low
else: #corresponds to SLAC, i.e. the sparse approximation of LAC
L_low = 0.0
data = np.c_[self.X, self.Y]
if np.shape(self.X)[1] == 1:
cv = 1
for ii in range(0,cv):
if np.shape(self.X)[1] == 1:
data = data[data[:,0].argsort()] #for 1D we get SLAC=LAC by sorting data in X
else:
np.random.shuffle(data)
x_cond = data[:,0:-1]
y_cond = np.expand_dims(np.array(data[:,-1]),axis=1)
for ll in range(0,len(self.Y)-1):
if np.linalg.norm(x_cond[ll,:]-x_cond[ll+1,:]) > 0.0: #ignore doublettes in input space
candidate = np.absolute(y_cond[ll]-y_cond[ll+1])/np.linalg.norm(x_cond[ll,:]-x_cond[ll+1,:])
if candidate > L_low:
L_low = candidate
self.L=L_low
else:
print("Method not defined yet, could not train model")
def correct_support(self):
'''
Corrects support of interpolation method for predictions from \mathcal{D}_n) to \tilde{\mathcal{D}}_n,
i.e. removes those points in X that for current value of hyperparameter L yield in ill-defined areas of model
'''
self.support_idx = []
for uu in range(0,np.shape(self.X)[0]):
up = self.upper_bound(self.X[uu,:],self.L,self.X,self.Y)
low = self.lower_bound(self.X[uu,:],self.L,self.X,self.Y)
if ((self.Y[uu] >= low) and (self.Y[uu] <= up)):
self.support_idx.append(uu)
def predict(self,xx):
'''
Predicts value of target function at test point xx and returns prediction as well as upper and lower bound at xx
:xx: test point xx
'''
up = self.upper_bound(xx,self.L, self.X[self.support_idx,:], self.Y[self.support_idx])
low = self.lower_bound(xx,self.L, self.X[self.support_idx,:], self.Y[self.support_idx])
dist = up-low
up = low + 1.0*dist #keep bounds at 100% bounds
low = low + 0.0*dist #keep bounds at 100% bounds
mean = self.inference(xx, self.L, self.X[self.support_idx,:], self.Y[self.support_idx])
return mean, up, low
def evaluate(self,X_test, Y_test):
'''
Evaluates Lipschitz interpolation model on test set, returning RMSE, pseudo-loglikelihood,
precentage of test points in bounds and average distance of bounds. If model was trained multiple times
in SLAC fashion then also standard deviation of those metrics are returned
:X_test: input values of test set
:Y_test: output values of test set
'''
if len(self.trained_params) == 1:
self.trained_params[0] = self.L
xx = X_test
M_pred = np.zeros((3*len(self.trained_params),len(xx[:,0])))
percentage = np.zeros(len(self.trained_params))
A = np.zeros(len(self.trained_params))
ll = np.zeros(len(self.trained_params))
RMSE = np.zeros(len(self.trained_params))
for jj in range(0,len(self.trained_params)):
self.L = self.trained_params[jj]
for ii in range(0,len(xx[:,0])):
if self.trained_params[0] == 0:
M_pred[3*jj+0,ii],M_pred[3*jj+2,ii],M_pred[3*jj+1,ii] = self.predict(xx[ii,:]) #if L = 0, swaps bounds so that you can calculate what appens for u = max f_i, l = min f_i
else:
M_pred[3*jj+0,ii],M_pred[3*jj+1,ii],M_pred[3*jj+2,ii] = self.predict(xx[ii,:])
A[jj] += np.abs(M_pred[3*jj+2,ii] - M_pred[3*jj+1,ii])
if ((Y_test[ii] <= M_pred[3*jj+1,ii]) and (Y_test[ii] >= M_pred[3*jj+2,ii])):
percentage[jj] += 1.0
percentage[jj] /= len(xx[:,0])
A[jj] /= len(xx[:,0])
v_ll = Lip_log_likelihood(Y_test, np.expand_dims(M_pred[3*jj+2,:],axis=1), np.expand_dims(M_pred[3*jj+1,:],axis=1))
ll[jj] = np.sum(v_ll)
v_SE = (Y_test - np.expand_dims(M_pred[3*jj+0,:],axis=1))**2
RMSE[jj] = np.mean(v_SE)**(0.5)
i =
|
np.argmin(RMSE)
|
numpy.argmin
|
from unittest import TestCase, mock
from unittest.mock import MagicMock, call
import numpy as np
from source.constants import Constants
from source.preprocessing.activity_count.activity_count_feature_service import ActivityCountFeatureService
from source.preprocessing.activity_count.activity_count_service import ActivityCountService
from source.preprocessing.epoch import Epoch
from source.preprocessing.activity_count.activity_count_collection import ActivityCountCollection
class TestActivityCountFeatureService(TestCase):
@mock.patch('source.preprocessing.activity_count.activity_count_feature_service.pd')
def test_load(self, mock_pd):
mock_pd.read_csv.return_value = mock_return = MagicMock()
mock_return.values = expected_return = np.array([1, 2, 3, 4, 5])
actual_returned_value = ActivityCountFeatureService.load("subjectA")
self.assertListEqual(expected_return.tolist(), actual_returned_value.tolist())
mock_pd.read_csv.assert_called_once_with(str(ActivityCountFeatureService.get_path("subjectA")))
def test_get_path(self):
expected_path = Constants.FEATURE_FILE_PATH.joinpath("subjectA" + '_count_feature.out')
self.assertEqual(expected_path, ActivityCountFeatureService.get_path("subjectA"))
@mock.patch('source.preprocessing.activity_count.activity_count_feature_service.np')
def test_write(self, mock_np):
feature_to_write = np.array([1, 2, 3, 4])
subject_id = "subjectA"
ActivityCountFeatureService.write(subject_id, feature_to_write)
mock_np.savetxt.assert_called_once_with(ActivityCountFeatureService.get_path(subject_id), feature_to_write,
fmt='%f')
def test_get_window(self):
timestamps = np.array([-2000, 22, 32, 50, 60, 800, 1000])
epoch = Epoch(timestamp=55, index=120)
expected_indices_in_range = np.array([1, 2, 3, 4])
actual_indices_in_range = ActivityCountFeatureService.get_window(timestamps, epoch)
self.assertEqual(expected_indices_in_range.tolist(), actual_indices_in_range.tolist())
@mock.patch.object(ActivityCountFeatureService, 'get_feature')
@mock.patch.object(ActivityCountService, 'load_cropped')
def test_build_feature_array(self, mock_load_cropped, mock_get_feature):
subject_id = "subjectA"
data = np.array(
[[1, 10], [10, 220], [20, 0], [40, 500], [70, 200], [90, 0], [100, 0], [120, 4]])
activity_count_collection = ActivityCountCollection(subject_id=subject_id, data=data)
mock_load_cropped.return_value = activity_count_collection
expected_features = [np.array([0.1]),
|
np.array([0.2])
|
numpy.array
|
from __future__ import absolute_import, division, print_function
import difflib
import functools
import math
import numbers
import os
import numpy as np
from toolz import frequencies, concat
from .core import Array
from ..highlevelgraph import HighLevelGraph
try:
AxisError = np.AxisError
except AttributeError:
try:
np.array([0]).sum(axis=5)
except Exception as e:
AxisError = type(e)
def normalize_to_array(x):
if 'cupy' in str(type(x)): # TODO: avoid explicit reference to cupy
return x.get()
else:
return x
def meta_from_array(x, ndim=None, dtype=None):
""" Normalize an array to appropriate meta object
Parameters
----------
x: array-like
ndim: int
dtype: dtype
Returns
-------
array-like
"""
# x._meta must be a Dask Array, some libraries (e.g. zarr) implement a
# _meta attribute that are incompatible with Dask Array._meta
if hasattr(x, '_meta') and isinstance(x, Array):
x = x._meta
if not hasattr(x, 'shape') or not hasattr(x, 'dtype'):
return x
if isinstance(x, list) or isinstance(x, tuple):
ndims = [0 if isinstance(a, numbers.Number)
else a.ndim if hasattr(a, 'ndim') else len(a) for a in x]
a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)]
return a if isinstance(x, list) else tuple(x)
if ndim is None:
ndim = x.ndim
try:
meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))]
if meta.ndim != ndim:
if ndim > x.ndim:
meta = meta[(Ellipsis, ) + tuple(None for _ in range(ndim - meta.ndim))]
meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))]
elif ndim == 0:
meta = meta.sum()
else:
meta = meta.reshape((0,) * ndim)
except Exception:
meta = np.empty((0,) * ndim, dtype=dtype or x.dtype)
if dtype and meta.dtype != dtype:
meta = meta.astype(dtype)
return meta
def allclose(a, b, equal_nan=False, **kwargs):
a = normalize_to_array(a)
b = normalize_to_array(b)
if getattr(a, 'dtype', None) != 'O':
return np.allclose(a, b, equal_nan=equal_nan, **kwargs)
if equal_nan:
return (a.shape == b.shape and
all(np.isnan(b) if np.isnan(a) else a == b
for (a, b) in zip(a.flat, b.flat)))
return (a == b).all()
def same_keys(a, b):
def key(k):
if isinstance(k, str):
return (k, -1, -1, -1)
else:
return k
return sorted(a.dask, key=key) == sorted(b.dask, key=key)
def _not_empty(x):
return x.shape and 0 not in x.shape
def _check_dsk(dsk):
""" Check that graph is well named and non-overlapping """
if not isinstance(dsk, HighLevelGraph):
return
assert all(isinstance(k, (tuple, str)) for k in dsk.layers)
freqs = frequencies(concat(dsk.dicts.values()))
non_one = {k: v for k, v in freqs.items() if v != 1}
assert not non_one, non_one
def assert_eq_shape(a, b, check_nan=True):
for aa, bb in zip(a, b):
if math.isnan(aa) or math.isnan(bb):
if check_nan:
assert math.isnan(aa) == math.isnan(bb)
else:
assert aa == bb
def assert_eq(a, b, check_shape=True, check_graph=True, check_meta=True, **kwargs):
a_original = a
b_original = b
if isinstance(a, Array):
assert a.dtype is not None
adt = a.dtype
if check_graph:
_check_dsk(a.dask)
a_meta = getattr(a, '_meta', None)
a = a.compute(scheduler='sync')
a_computed = a
if hasattr(a, 'todense'):
a = a.todense()
if not hasattr(a, 'dtype'):
a = np.array(a, dtype='O')
if _not_empty(a):
assert a.dtype == a_original.dtype
if check_shape:
assert_eq_shape(a_original.shape, a.shape, check_nan=False)
else:
if not hasattr(a, 'dtype'):
a = np.array(a, dtype='O')
adt = getattr(a, 'dtype', None)
if isinstance(b, Array):
assert b.dtype is not None
bdt = b.dtype
if check_graph:
_check_dsk(b.dask)
b_meta = getattr(b, '_meta', None)
b = b.compute(scheduler='sync')
b_computed = b
if not hasattr(b, 'dtype'):
b = np.array(b, dtype='O')
if hasattr(b, 'todense'):
b = b.todense()
if _not_empty(b):
assert b.dtype == b_original.dtype
if check_shape:
assert_eq_shape(b_original.shape, b.shape, check_nan=False)
else:
if not hasattr(b, 'dtype'):
b = np.array(b, dtype='O')
bdt = getattr(b, 'dtype', None)
if str(adt) != str(bdt):
# Ignore check for matching length of flexible dtypes, since Array._meta
# can't encode that information
if adt.type == bdt.type and not (adt.type == np.bytes_ or adt.type == np.str_):
diff = difflib.ndiff(str(adt).splitlines(), str(bdt).splitlines())
raise AssertionError('string repr are different' + os.linesep +
os.linesep.join(diff))
try:
assert a.shape == b.shape
if check_meta:
if hasattr(a, '_meta') and hasattr(b, '_meta'):
assert_eq(a._meta, b._meta)
if hasattr(a_original, '_meta'):
assert a_original._meta.ndim == a.ndim
if a_meta is not None:
assert type(a_original._meta) == type(a_meta)
if not (np.isscalar(a_meta) or np.isscalar(a_computed)):
assert type(a_meta) == type(a_computed)
if hasattr(b_original, '_meta'):
assert b_original._meta.ndim == b.ndim
if b_meta is not None:
assert type(b_original._meta) == type(b_meta)
if not (np.isscalar(b_meta) or np.isscalar(b_computed)):
assert type(b_meta) == type(b_computed)
assert allclose(a, b, **kwargs)
return True
except TypeError:
pass
c = a == b
if isinstance(c, np.ndarray):
assert c.all()
else:
assert c
return True
def safe_wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS):
"""Like functools.wraps, but safe to use even if wrapped is not a function.
Only needed on Python 2.
"""
if all(hasattr(wrapped, attr) for attr in assigned):
return functools.wraps(wrapped, assigned=assigned)
else:
return lambda x: x
def empty_like_safe(a, shape, **kwargs):
"""
Return np.empty_like(a, shape=shape, **kwargs) if the shape argument
is supported (requires NumPy >= 1.17), otherwise falls back to
using the old behavior, returning np.empty(shape, **kwargs).
"""
try:
return np.empty_like(a, shape=shape, **kwargs)
except TypeError:
return np.empty(shape, **kwargs)
def full_like_safe(a, fill_value, shape, **kwargs):
"""
Return np.full_like(a, fill_value, shape=shape, **kwargs) if the
shape argument is supported (requires NumPy >= 1.17), otherwise
falls back to using the old behavior, returning
np.full(shape, fill_value, **kwargs).
"""
try:
return np.full_like(a, fill_value, shape=shape, **kwargs)
except TypeError:
return
|
np.full(shape, fill_value, **kwargs)
|
numpy.full
|
#!/usr/bin/python
# MIT License
#
# Copyright (c) 2018-2020 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import scipy as sp
import multiprocessing as mp
from functools import partial
from scipy import spatial
import timeit
try:
import torch
except ImportError:
_has_torch = False
else:
_has_torch = True
def _from_r_alias(obj, r, lat_and_inv=None):
"""
Alias for instance method that allows the method to be called in a
multiprocessing pool
"""
return obj._from_r(r, lat_and_inv=lat_and_inv)
class Desc(object):
def __init__(self, n_atoms, max_processes=None, use_descriptor='coulomb_matrix'):
"""
Generate descriptors and their Jacobians for molecular geometries,
including support for periodic boundary conditions.
Parameters
----------
n_atoms : int
Number of atoms in the represented system.
max_processes : int, optional
Limit the max. number of processes. Otherwise
all CPU cores are used. This parameters has no
effect if `use_torch=True`.
"""
self.n_atoms = n_atoms
self.dim_i = 3 * n_atoms
self.use_descriptor = use_descriptor
# Size of the resulting descriptor vector.
if self.use_descriptor == 'coulomb_matrix' or self.use_descriptor == 'exp_decay_matrix':
# Size of the resulting descriptor vector.
self.dim = (n_atoms * (n_atoms - 1)) // 2
# Precompute indices for nonzero entries in desriptor derivatives.
self.d_desc_mask = np.zeros((n_atoms, n_atoms - 1), dtype=np.int)
for a in range(n_atoms): # for each partial derivative
rows, cols = np.tril_indices(n_atoms, -1)
self.d_desc_mask[a, :] = np.concatenate(
[np.where(rows == a)[0], np.where(cols == a)[0]]
)
self.M = np.arange(1, n_atoms) # indexes matrix row-wise, skipping diagonal
for a in range(1, n_atoms):
self.M = np.concatenate((self.M, np.delete(np.arange(n_atoms), a)))
self.A = np.repeat(
np.arange(n_atoms), n_atoms - 1
) # [0, 0, ..., 1, 1, ..., 2, 2, ...]
self.d_desc = np.zeros(
(self.dim, n_atoms, 3)
) # template for descriptor matrix (zeros are important)
# --- TODO: Add precomputable variables for new descriptor here
# elif self.use_descriptor == '':
# pass
self.max_processes = max_processes
def from_R(self, R, lat_and_inv=None, callback=None):
"""
Generate descriptor and its Jacobian for multiple molecular geometries
in Cartesian coordinates.
Parameters
----------
R : :obj:`numpy.ndarray`
Array of size M x 3N containing the Cartesian coordinates of
each atom.
lat_and_inv : tuple of :obj:`numpy.ndarray`, optional
Tuple of 3 x 3 matrix containing lattice vectors as columns and its inverse.
callback : callable, optional
Descriptor and descriptor Jacobian generation status.
current : int
Current progress (number of completed descriptors).
total : int
Task size (total number of descriptors to create).
sec_disp_str : :obj:`str`, optional
Once complete, this string contains the
time it took complete this task (seconds).
Returns
-------
:obj:`numpy.ndarray`
Array of size M x N(N-1)/2 containing the descriptor representation
for each geometry.
:obj:`numpy.ndarray`
Array of size M x N(N-1)/2 x 3N containing all partial
derivatives of the descriptor for each geometry.
"""
# Add singleton dimension if input is (,3N).
if R.ndim == 1:
R = R[None, :]
M = R.shape[0]
if M == 1:
return self._from_r(R, lat_and_inv)
R_desc =
|
np.empty([M, self.dim])
|
numpy.empty
|
# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
from .mpi import MPITestCase
import os
import healpy as hp
import numpy as np
from .._libtoast import pointing_matrix_healpix
from ..healpix import HealpixPixels
from ..todmap import TODHpixSpiral, OpPointingHpix
from .. import qarray as qa
from ._helpers import create_outdir, create_distdata, boresight_focalplane
class OpPointingHpixTest(MPITestCase):
def setUp(self):
fixture_name = os.path.splitext(os.path.basename(__file__))[0]
self.outdir = create_outdir(self.comm, fixture_name)
# Create one observation per group, and each observation will have
# one detector per process and a single chunk. Data within an
# observation is distributed by detector.
self.data = create_distdata(self.comm, obs_per_group=1)
self.ndet = self.data.comm.group_size
# Create detectors with default properties
(
dnames,
dquat,
depsilon,
drate,
dnet,
dfmin,
dfknee,
dalpha,
) = boresight_focalplane(self.ndet)
# A small number of samples
self.totsamp = 10
# Populate the observations (one per group)
tod = TODHpixSpiral(
self.data.comm.comm_group,
dquat,
self.totsamp,
detranks=self.data.comm.group_size,
)
self.data.obs[0]["tod"] = tod
def tearDown(self):
del self.data
def test_pointing_matrix_healpix2(self):
nside = 64
npix = 12 * nside ** 2
hpix = HealpixPixels(64)
nest = True
phivec = np.radians(
[-360, -270, -180, -135, -90, -45, 0, 45, 90, 135, 180, 270, 360]
)
nsamp = phivec.size
eps = 0.0
cal = 1.0
mode = "IQU"
nnz = 3
hwpang = np.zeros(nsamp)
flags = np.zeros(nsamp, dtype=np.uint8)
pixels = np.zeros(nsamp, dtype=np.int64)
weights = np.zeros([nsamp, nnz], dtype=np.float64)
theta = np.radians(135)
psi = np.radians(135)
quats = []
xaxis, yaxis, zaxis = np.eye(3)
for phi in phivec:
phirot = qa.rotation(zaxis, phi)
quats.append(qa.from_angles(theta, phi, psi))
quats = np.vstack(quats)
pointing_matrix_healpix(
hpix,
nest,
eps,
cal,
mode,
quats.reshape(-1),
hwpang,
flags,
pixels,
weights.reshape(-1),
)
failed = False
bad = np.logical_or(pixels < 0, pixels > npix - 1)
nbad = np.sum(bad)
if nbad > 0:
print(
"{} pixels are outside of the map. phi = {} deg".format(
nbad, np.degrees(phivec[bad])
)
)
failed = True
self.assertFalse(failed)
return
def test_pointing_matrix_healpix(self):
nside = 64
hpix = HealpixPixels(64)
nest = True
psivec = np.radians([-180, -135, -90, -45, 0, 45, 90, 135, 180])
# psivec = np.radians([-180, 180])
nsamp = psivec.size
eps = 0.0
cal = 1.0
mode = "IQU"
nnz = 3
hwpang = np.zeros(nsamp)
flags = np.zeros(nsamp, dtype=np.uint8)
pixels = np.zeros(nsamp, dtype=np.int64)
weights =
|
np.zeros([nsamp, nnz], dtype=np.float64)
|
numpy.zeros
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.