prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import csv
import click
import numpy as np
from tqdm import tqdm
from src.helpers import paths
from src.helpers.flags import AttackModes, Verbose
from src.multimodal import multimodal
from src.multimodal.data import make_dataset
from src.multimodal.features import build_features
from src.regnet.data.make_dataset import cleanUp
INP_NRM_LOG = None
TRA_NRM_LOG = None
INP_ATK_LOG = None
TRA_ATK_LOG = None
###############################################################################
# HYPERPARAMETERS
###############################################################################
config = paths.config.read(paths.config.multimodal())
BATCH_SIZE = config['HYPERPARAMETERS']['BATCH_SIZE']
# @click.command()
# def main():
# train_model.evaluate(dataset='valid')
# print()
# train_model.evaluate(dataset='test')
def get_all_dates():
path = paths.DATA_EXTERNAL_PATH.joinpath('KITTI')
assert path.exists(), 'No Dataset Found'
dates = list()
for current_file in path.iterdir():
if current_file.is_dir():
filename = current_file.name.split('_')
if len(filename) >= 3:
dates.append(current_file.name)
return np.sort(dates)
def get_all_drives(drive_date):
path = paths.depth.external_frame(drive_date, 0, 0).parents[3]
if not path.exists():
return list()
drives = list()
for current_file in path.iterdir():
if current_file.is_dir():
drive = current_file.name.split('_')
if len(drive) < 4:
continue
drive = int(drive[4])
drives.append(drive)
drives = np.sort(drives)
drive_info = list()
for drive in drives:
drive_info.append((drive_date, drive))
return drive_info
def get_all_frames(drive_date, drive):
path = paths.depth.external_frame(drive_date, drive, 0)
path_dir = path.parents[3]
if not path_dir.exists():
return list()
frames = paths.similar_files(path, as_int=True)
frames = np.sort(frames)
frame_info = list()
for frame in frames:
frame_info.append((drive_date, int(drive), int(frame)))
return frame_info
def load_pretrained_model():
# Create the network
net = multimodal.Multimodal()
# Load pretrained model
model_path = str(paths.checkpoints.multimodal()) # ./checkpoints/multimodal/train
net.model.load_weights(model_path)
return net.model
def init_logs():
output_path = paths.DATA_PROCESSED_PATH.joinpath('logs')
output_path.mkdir(exist_ok=True, parents=True) # ensure directory exists
global INP_NRM_LOG, TRA_NRM_LOG
global INP_ATK_LOG, TRA_ATK_LOG
INP_NRM_LOG = output_path.joinpath('inp_nrm.csv')
TRA_NRM_LOG = output_path.joinpath('tra_nrm.csv')
INP_ATK_LOG = output_path.joinpath('inp_atk.csv')
TRA_ATK_LOG = output_path.joinpath('tra_atk.csv')
logs = [
INP_NRM_LOG,
TRA_NRM_LOG,
INP_ATK_LOG,
TRA_ATK_LOG,
]
csv_data = [['drive_date', 'drive', 'frame', 'result']]
for log in logs:
with open(log, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerows(csv_data)
@click.command()
def main(batch_size=BATCH_SIZE):
print('# Initiating logs...')
init_logs()
print('# Getting all frames...')
drive_dates = get_all_dates()
drives_info = list()
for drive_date in drive_dates:
drives_info.extend(get_all_drives(drive_date))
frames_info = list()
for drive_date, drive in drives_info:
frames_info.extend(get_all_frames(drive_date, drive))
print('# Loading model...')
model = load_pretrained_model()
num_batches = int(len(frames_info)//batch_size)
for i in tqdm(range(num_batches), desc='# Testing all frames', ascii=True):
batch = frames_info[i*batch_size: min((i+1)*batch_size, len(frames_info))]
feed_forward(model, batch, attack_type=AttackModes.INPAINT)
feed_forward(model, batch, attack_type=AttackModes.TRANSLATE)
def feed_forward(model, batch, attack_type, batch_size=BATCH_SIZE):
# Generate Data
make_dataset.make_data(batch, name='test', attack_type=attack_type,
verbose=Verbose.SILENT, keep=False)
# Load Data
batch_nrm = build_features.get_test_batches(batch_size=batch_size, infinite=False, attack=False)
itr_nrm = build_features.make_iterator(batch_nrm)
batch_atk = build_features.get_test_batches(batch_size=batch_size, infinite=False, normal=False)
itr_atk = build_features.make_iterator(batch_atk)
# Predict
pred_nrm = model.predict_generator(generator=itr_nrm, steps=1, workers=0)
pred_nrm = np.argmax(pred_nrm, axis=1)
pred_atk = model.predict_generator(generator=itr_atk, steps=1, workers=0)
pred_atk = np.argmax(pred_atk, axis=1)
# Log Restults
if attack_type == AttackModes.INPAINT:
log_atk = INP_ATK_LOG
log_nrm = INP_NRM_LOG
elif attack_type == AttackModes.TRANSLATE:
log_atk = TRA_ATK_LOG
log_nrm = TRA_NRM_LOG
batch = np.array(batch)
batch = batch.T
data_atk = zip(batch[0],
np.array(batch[1]).astype(int),
|
np.array(batch[2])
|
numpy.array
|
import pytest
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_almost_equal, assert_equal)
from nose.tools import assert_raises
from pystruct.models import NodeTypeEdgeFeatureGraphCRF, EdgeFeatureGraphCRF
from pystruct.inference.linear_programming import lp_general_graph
from pystruct.inference import compute_energy, get_installed
from pystruct.utils import make_grid_edges, edge_list_to_features
from pystruct.datasets import generate_blocks_multinomial
def test_checks():
g = NodeTypeEdgeFeatureGraphCRF(
1 #how many node type?
, [4] #how many labels per node type?
, [3] #how many features per node type?
, np.array([[3]]) #how many features per node type X node type?
)
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2], [2,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
3 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2], [2,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5, 3] #how many features per node type?
, np.array([[1, 2], [2,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
3 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2], [2,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2, 3], [2,3,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2], [99,4]]) #how many features per node type X node type?
)
def debug_joint_feature():
# -------------------------------------------------------------------------------------------
#print "---MORE COMPLEX GRAPH :) ---------------------------------------------------------------------"
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3] #how many possible labels per node type?
, [3, 4] #how many features per node type?
, np.array([ [1, 2]
, [2, 3]]) #how many features per node type X node type?
)
l_node_f = [ np.array([ [1,1,1], [2,2,2] ])
, np.array([ [.11, .12, .13, .14], [.21, .22, .23, .24], [.31, .32, .33, .34]])
]
l_edges = [ np.array([[0, 1]]) #type 0 node 0 to type 0 node 0
, np.array([[0, 1]])
, None
, None
]
l_edge_f = [ np.array([[.111]])
, np.array([[.221, .222]])
, None
, None
]
x = (l_node_f, l_edges, l_edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([ np.array([0, 1]),
np.array([0, 1, 2])
])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array(
[ 1. , 1., 1. , 2., 2., 2.
, 0.11 , 0.12 , 0.13 , 0.14 , 0.21 , 0.22 , 0.23 , 0.24 , 0.31 , 0.32 , 0.33 , 0.34
, 0. , 0.111, 0. , 0. , 0. , 0.221,
0. , 0. , 0. , 0. , 0. , 0.222, 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.
]))
def get_simple_graph_structure():
g = NodeTypeEdgeFeatureGraphCRF(
1 #how many node type?
, [4] #how many labels per node type?
, [3] #how many features per node type?
, np.array([[3]]) #how many features per node type X node type?
)
return g
def get_simple_graph():
node_f = [ np.array([[1,1,1],
[2,2,2]])
]
edges = [ np.array([[0,1]])
] #an edge from 0 to 1
edge_f = [ np.array([[3,3,3]])
]
return (node_f, edges, edge_f)
def get_simple_graph2():
node_f = [ np.array([ [1,1,1]
, [2,2,2]]) ]
edges = [ np.array( [[0,1], #an edge from 0 to 1
[0,0] #an edge from 0 to 0
]) ]
edge_f = [ np.array([
[3,3,3],
[4,4,4]
]) ]
return (node_f, edges, edge_f)
def test_flatten_unflattenY():
g, (node_f, edges, edge_f) = get_simple_graph_structure(), get_simple_graph()
y = np.array([1,2])
l_nf = [ np.zeros((2,3)) ] #list of node feature , per type
X = (l_nf, None, None) #we give no edge
y_ref = [ np.array([1,2]) ]
assert all( [ (y_typ1 == y_typ2).all() for y_typ1, y_typ2 in zip(g.unflattenY(X, y), y_ref) ])
assert (y == g.flattenY(g.unflattenY(X, y))).all()
#============================================
g, x, y = more_complex_graph()
Y = [ np.array([0, 0])
, np.array([0, 0, 0]) #we start again at zero on 2nd type
]
y = np.hstack([ np.array([0, 0])
, 2+np.array([0, 0, 0])
])
l_nf = [ np.zeros( (2,3) ), np.zeros( (3, 4) )] #2 node with 3 features, 3 node with 4 features
X = (l_nf, None, None) #we give no edge
assert (g.flattenY(Y) == y).all()
#print g.unflattenY(X, y)
assert all( [ (y_typ1 == y_typ2).all() for y_typ1, y_typ2 in zip(g.unflattenY(X, y), Y) ])
l_nf = [ np.zeros( (1,3) ), np.zeros( (3, 4) )] #2 node with 3 features, 3 node with 4 features
X = (l_nf, None, None) #we give no edge
assert_raises(ValueError, g.unflattenY, X, y)
def test_joint_feature():
#print "---SIMPLE---------------------------------------------------------------------"
g, (node_f, edges, edge_f) = get_simple_graph_structure(), get_simple_graph()
x = (node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([1,2])
# y = np.array([1,0])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(g.joint_feature(x,y)
, np.array([ 0., 0., 0., 1., 1., 1., 2., 2., 2., 0., 0., 0.
, 0.,
0., 0., 0., 0., 0., 3., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 3., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 3., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([0,0])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(g.joint_feature(x,y)
, np.array([ 3., 3., 3., 0., 0., 0., 0., 0., 0., 0., 0., 0., 3.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 3., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 3., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([0,1])
node_f = [ np.array([[1.1,1.2,1.3], [2.1,2.2,2.3]]) ]
edge_f = [ np.array([[3.1,3.2,3.3]]) ]
x = (node_f, edges, edge_f)
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
assert_array_equal(g.joint_feature(x,y)
, np.array([ 1.1, 1.2, 1.3, 2.1, 2.2, 2.3, 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 3.1, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 3.2, 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 3.3, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ])
)
#print "---SIMPLE + 2nd EDGE--------------------------------------------------------"
node_f, edges, edge_f = get_simple_graph2()
x = (node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([1,2])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf
, np.array([ 0., 0., 0., 1., 1., 1., 2., 2., 2., 0., 0., 0., 0.,
0., 0., 0., 0., 4., 3., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 4., 3., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 4., 3., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([0,0])
#print y
g.initialize(x, y)
#print "joint_feature = \n", `g.joint_feature(x,y)`
#print
assert_array_equal(g.joint_feature(x,y)
, np.array([ 3., 3., 3., 0., 0., 0., 0., 0., 0., 0., 0., 0., 7.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 7., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 7., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
)
def more_complex_graph():
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3] #how many labels per node type?
, [3, 4] #how many features per node type?
, np.array([ [1, 2]
, [2, 3]]) #how many features per node type X node type?
)
# nodes = np.array( [[0,0], [0,1], [1, 0], [1, 1], [1, 2]] )
node_f = [ np.array([ [1,1,1], [2,2,2] ])
, np.array([ [.11, .12, .13, .14], [.21, .22, .23, .24], [.31, .32, .33, .34]])
]
edges = [ np.array( [ [0,1] #an edge from 0 to 1
])
, np.array( [
[0,0] #an edge from typ0:0 to typ1:0
])
, None
, None
]
edge_f = [ np.array([[.111]])
, np.array([[.221, .222]])
, None
, None
]
x = (node_f, edges, edge_f)
y = np.hstack([ np.array([0, 0])
, 2+np.array([0, 0, 0])
])
return g, x, y
def test_joint_feature2():
# -------------------------------------------------------------------------------------------
#print "---MORE COMPLEX GRAPH :) ---------------------------------------------------------------------"
g, x, y = more_complex_graph()
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 3. , 3. , 3. , 0. , 0. , 0. , 0.63 , 0.66 ,
0.69 , 0.72 , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.111, 0. , 0. , 0. , 0.221, 0. ,
0. , 0. , 0. , 0. , 0.222, 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]))
#print "---MORE COMPLEX GRAPH :) -- BIS -------------------------------------------------------------------"
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3] #how many labels per node type?
, [3, 4] #how many features per node type?
, np.array([ [1, 2]
, [2, 3]]) #how many features per node type X node type?
)
node_f = [ np.array([ [1,1,1], [2,2,2] ])
, np.array([ [.11, .12, .13, .14], [.21, .22, .23, .24], [.31, .32, .33, .34]])
]
edges = [ np.array( [ [0,1]] ), #an edge from 0 to 1
np.array( [ [0,2]] ) #an edge from 0 to 2
, None, None
]
edge_f = [ np.array([[.111]])
, np.array([[.221, .222]])
, None
, None
]
x = ( node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([np.array([0, 1]),
2+np.array([0, 1, 2])])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 1. , 1. , 1. , 2. , 2. , 2. , 0.11 , 0.12 ,
0.13 , 0.14 , 0.21 , 0.22 , 0.23 , 0.24 , 0.31 , 0.32 ,
0.33 , 0.34 , 0. , 0.111, 0. , 0. , 0. , 0. ,
0.221, 0. , 0. , 0. , 0. , 0. , 0.222, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]))
#print "MORE COMPLEX GRAPH :) -- BIS OK"
#print "--- REORDERED MORE COMPLEX GRAPH :) ---------------------------------------------------------------------"
node_f = [ np.array([ [2,2,2], [1,1,1] ])
, np.array([ [.31, .32, .33, .34], [.11, .12, .13, .14], [.21, .22, .23, .24]])
]
edges = [ np.array( [ [1, 0]] ),
np.array( [ [1,0]] ) #an edge from 0 to 2
, None, None
]
edge_f = [ np.array([[.111]])
, np.array([[.221, .222]])
, None
, None
]
x = ( node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([np.array([1, 0]),
2+np.array([2, 0, 1])])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 1. , 1. , 1. , 2. , 2. , 2. , 0.11 , 0.12 ,
0.13 , 0.14 , 0.21 , 0.22 , 0.23 , 0.24 , 0.31 , 0.32 ,
0.33 , 0.34 , 0. , 0.111, 0. , 0. , 0. , 0. ,
0.221, 0. , 0. , 0. , 0. , 0. , 0.222, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]))
def test_joint_feature3():
# -------------------------------------------------------------------------------------------
#print "---MORE COMPLEX GRAPH AGAIN :) ---------------------------------------------------------------------"
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3] #how many labels per node type?
, [3, 4] #how many features per node type?
, np.array([ [0, 2]
, [2, 3]]) #how many features per node type X node type?
)
# nodes = np.array( [[0,0], [0,1], [1, 0], [1, 1], [1, 2]] )
node_f = [ np.array([ [1,1,1], [2,2,2] ])
, np.array([ [.11, .12, .13, .14], [.21, .22, .23, .24], [.31, .32, .33, .34]])
]
edges = [ None
, np.array( [
[0,1] #an edge from typ0:0 to typ1:1
])
, None
, np.array( [
[0,1], #an edge from typ0:0 to typ1:1
[1,2] #an edge from typ1:1 to typ1:2
])
]
edge_f = [ None
, np.array([[.221, .222]])
, None
, np.array([[.01, .02, .03 ],
[.001, .002, .003]])
]
x = (node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([ np.array([0, 0])
, 2+np.array([0, 0, 0])
])
#print y
g.initialize(x, y)
#print g.size_unaries
#print g.size_pairwise
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 3. , 3. , 3. , 0. , 0. , 0. ,
0.63 , 0.66 , 0.69 , 0.72 , 0. , 0., 0., 0. , 0., 0., 0. , 0.,
#edges 0 to 0 2x2 states
#typ0 typ0 EMPTY
#typ0 typ1
.221, 0., 0., 0., 0., 0.,
.222, 0., 0., 0., 0., 0.,
#typ1 typ0
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
#typ1 typ1
0.011, 0., 0., 0., 0., 0., 0., 0., 0.,
0.022, 0., 0., 0., 0., 0., 0., 0., 0.,
0.033, 0., 0., 0., 0., 0., 0., 0., 0.
])
)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([ np.array([0, 1])
, 2+np.array([1, 1, 0])
])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 1. , 1. , 1. , 2. , 2. , 2. ,
.31, .32, .33, .34 , .32, .34, .36, .38 , 0., 0., 0. , 0.,
#edges 0 to 0 2x2 states
#typ0 typ0 EMPTY
#typ0 typ1
0., .221, 0., 0., 0., 0.,
0., .222, 0., 0., 0., 0.,
#typ1 typ0
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
#typ1 typ1
0., 0., 0., 0.001, 0.01, 0., 0., 0., 0.,
0., 0., 0., 0.002, 0.02, 0., 0., 0., 0.,
0., 0., 0., 0.003, 0.03, 0., 0., 0., 0.
])
)
w = np.array([ 1,1,1, 2,2,2, 10,10,10,10, 20,20,20,20, 30,30,30,30 ]
+[1.0]*51, dtype=np.float64
)
#print `w`
ret_u = g._get_unary_potentials(x, w)
#print `ret_u`
assert len(ret_u) == 2
assert_array_almost_equal(ret_u[0], np.array([ #n_nodes x n_states
[3, 6],
[6, 12]]))
assert_array_almost_equal(ret_u[1], np.array([ #n_nodes x n_states
[5, 10, 15],
[9, 18, 27],
[13, 26, 39]]))
assert len(w) == g.size_joint_feature
ret_pw = g._get_pairwise_potentials(x, w)
# for _pw in ret_pw:
# print "_pw ", `_pw`
pw00, pw01, pw10, pw11 = ret_pw
assert len(pw00) == 0
assert_array_almost_equal(pw01,np.array([ #n_edges, n_states, n_states
[[0.443, 0.443, 0.443],
[0.443, 0.443, 0.443]]
]))
assert len(pw10) == 0
assert_array_almost_equal(pw11,np.array([ #n_edges, n_states, n_states
[[0.06 , 0.06 , 0.06],
[0.06 , 0.06 , 0.06],
[0.06 , 0.06 , 0.06]]
,
[[0.006, 0.006, 0.006],
[0.006, 0.006, 0.006],
[0.006, 0.006, 0.006]]
]))
def test_unary_potentials():
#print "---SIMPLE---------------------------------------------------------------------"
#g, (node_f, edges, edge_f) = get_simple_graph_structure(), get_simple_graph()
g = NodeTypeEdgeFeatureGraphCRF(
1 #how many node type?
, [4] #how many labels per node type?
, [3] #how many features per node type?
, np.array([[3]]) #how many features per node type X node type?
)
node_f = [ np.array([[1,1,1],
[2,2,2]])
]
edges = [ np.array([[0,1]])
] #an edge from 0 to 1
edge_f = [ np.array([[3,3,3]])
]
x = (node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([ np.array([1,2])])
# y = np.array([1,0])
#print y
g.initialize(x, y)
gref = EdgeFeatureGraphCRF(4,3,3)
xref = (node_f[0], edges[0], edge_f[0])
wref = np.arange(gref.size_joint_feature)
potref = gref._get_unary_potentials(xref, wref)
#print `potref`
w = np.arange(g.size_joint_feature)
pot = g._get_unary_potentials(x, w)
#print `pot`
assert_array_equal(pot, [potref])
pwpotref = gref._get_pairwise_potentials(xref, wref)
#print `pwpotref`
pwpot = g._get_pairwise_potentials(x, w)
#print `pwpot`
assert_array_equal(pwpot, [pwpotref])
# def test_inference_util():
# g = NodeTypeEdgeFeatureGraphCRF(
# 3 #how many node type?
# , [2, 3, 1] #how many labels per node type?
# , [3, 4, 1] #how many features per node type?
# , np.array([ [1, 2, 2]
# , [2, 3, 2]
# , [2, 2, 1]]) #how many features per node type X node type?
# )
# node_f = [ np.array([ [2,2,2], [1,1,1] ])
# , np.array([ [.31, .32, .33, .34], [.11, .12, .13, .14], [.21, .22, .23, .24]])
# , np.array([ [77], [88], [99]])
# ]
# edges = [ np.array( [ [1, 0]] ),
# np.array( [ [1,0]] ) #an edge from 0 to 2
# , None
#
# , None
# , None
# , None
#
# , np.array( [[1,1]] )
# , None
# , None ]
#
# x = ( node_f, edges, None)
#
# reindexed_exdges = g._index_all_edges(x)
# #print `reindexed_exdges`
# assert_array_equal(reindexed_exdges,
# np.array( [[1,0],
# [1,2],
# [6,1]]))
#
# def report_model_config(crf):
# print crf.n_states
# print crf.n_features
# print crf.n_edge_features
def inference_data():
"""
Testing with a single type of nodes. Must do as well as EdgeFeatureGraphCRF
"""
# Test inference with different weights in different directions
X, Y = generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
x, y = X[0], Y[0]
n_states = x.shape[-1]
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
pw_horz = -1 * np.eye(n_states)
xx, yy = np.indices(pw_horz.shape)
# linear ordering constraint horizontally
pw_horz[xx > yy] = 1
# high cost for unequal labels vertically
pw_vert = -1 * np.eye(n_states)
pw_vert[xx != yy] = 1
pw_vert *= 10
# generate edge weights
edge_weights_horizontal = np.repeat(pw_horz[np.newaxis, :, :],
edge_list[0].shape[0], axis=0)
edge_weights_vertical = np.repeat(pw_vert[np.newaxis, :, :],
edge_list[1].shape[0], axis=0)
edge_weights = np.vstack([edge_weights_horizontal, edge_weights_vertical])
# do inference
res = lp_general_graph(-x.reshape(-1, n_states), edges, edge_weights)
edge_features = edge_list_to_features(edge_list)
x = ([x.reshape(-1, n_states)], [edges], [edge_features])
y = y.ravel()
return x, y, pw_horz, pw_vert, res, n_states
def test_inference_ad3plus():
x, y, pw_horz, pw_vert, res, n_states = inference_data()
# same inference through CRF inferface
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]
, inference_method="ad3+")
crf.initialize(x, y)
#crf.initialize([x], [y])
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
y_pred = crf.inference(x, w, relaxed=True)
if isinstance(y_pred, tuple):
# ad3 produces an integer result if it found the exact solution
#np.set_printoptions(precision=2, threshold=9999)
assert_array_almost_equal(res[0], y_pred[0][0].reshape(-1, n_states), 5)
assert_array_almost_equal(res[1], y_pred[1][0], 5)
assert_array_equal(y, np.argmax(y_pred[0][0], axis=-1), 5)
# again, this time discrete predictions only
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]
, inference_method="ad3+")
#crf.initialize([x], [y])
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
crf.initialize(x)
y_pred = crf.inference(x, w, relaxed=False)
assert_array_equal(y, y_pred)
def test_inference_ad3():
x, y, pw_horz, pw_vert, res, n_states = inference_data()
# same inference through CRF inferface
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]
, inference_method="ad3")
crf.initialize(x, y)
#crf.initialize([x], [y])
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
y_pred = crf.inference(x, w, relaxed=True)
if isinstance(y_pred, tuple):
# ad3 produces an integer result if it found the exact solution
#np.set_printoptions(precision=2, threshold=9999)
assert_array_almost_equal(res[0], y_pred[0][0].reshape(-1, n_states), 5)
assert_array_almost_equal(res[1], y_pred[1][0], 5)
assert_array_equal(y, np.argmax(y_pred[0][0], axis=-1), 5)
# again, this time discrete predictions only
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]
, inference_method="ad3")
#crf.initialize([x], [y])
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
crf.initialize(x)
y_pred = crf.inference(x, w, relaxed=False)
assert_array_equal(y, y_pred)
def test_joint_feature_discrete():
"""
Testing with a single type of nodes. Must de aw well as EdgeFeatureGraphCRF
"""
X, Y = generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
x, y = X[0], Y[0]
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
edge_features = edge_list_to_features(edge_list)
x = ([x.reshape(-1, 3)], [edges], [edge_features])
y_flat = y.ravel()
#for inference_method in get_installed(["lp", "ad3", "qpbo"]):
if True:
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]])
joint_feature_y = crf.joint_feature(x, y_flat)
assert_equal(joint_feature_y.shape, (crf.size_joint_feature,))
# first horizontal, then vertical
# we trust the unaries ;)
n_states = crf.l_n_states[0]
n_features = crf.l_n_features[0]
pw_joint_feature_horz, pw_joint_feature_vert = joint_feature_y[n_states *
n_features:].reshape(
2, n_states, n_states)
assert_array_equal(pw_joint_feature_vert, np.diag([9 * 4, 9 * 4, 9 * 4]))
vert_joint_feature = np.diag([10 * 3, 10 * 3, 10 * 3])
vert_joint_feature[0, 1] = 10
vert_joint_feature[1, 2] = 10
assert_array_equal(pw_joint_feature_horz, vert_joint_feature)
def test_joint_feature_continuous():
"""
Testing with a single type of nodes. Must de aw well as EdgeFeatureGraphCRF
"""
# FIXME
# first make perfect prediction, including pairwise part
X, Y = generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
x, y = X[0], Y[0]
n_states = x.shape[-1]
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
edge_features = edge_list_to_features(edge_list)
#x = (x.reshape(-1, 3), edges, edge_features)
x = ([x.reshape(-1, 3)], [edges], [edge_features])
y = y.ravel()
pw_horz = -1 * np.eye(n_states)
xx, yy = np.indices(pw_horz.shape)
# linear ordering constraint horizontally
pw_horz[xx > yy] = 1
# high cost for unequal labels vertically
pw_vert = -1 *
|
np.eye(n_states)
|
numpy.eye
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col, group_col):
if isinstance(a, DataFrame):
x = a.copy()
if not all([group_col, val_col]):
raise ValueError('group_col, val_col must be explicitly specified')
else:
x = np.array(a)
if not all([group_col, val_col]):
try:
groups = np.array([len(a) * [i + 1] for i, a in enumerate(x)])
groups = sum(groups.tolist(), [])
x = sum(x.tolist(), [])
x = np.column_stack([x, groups])
val_col = 0
group_col = 1
except:
raise ValueError('array cannot be processed, provide val_col and group_col args')
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
x.rename(columns={group_col: 'groups', val_col: 'y'}, inplace=True)
group_col = 'groups'
val_col = 'y'
return x
def __convert_to_block_df(a, y_col, group_col, block_col, melted):
if isinstance(a, DataFrame) and not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x = a.melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col = None, group_col = None, p_adjust = None, sort = True):
'''
Post-hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains values.
group_col : str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] <NAME> and <NAME> (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
array([ [-1. , 0.00119517, 0.00278329],
[ 0.00119517, -1. , 0.18672227],
[ 0.00278329, 0.18672227, -1. ]])
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg[i] - x_ranks_avg[j])
B = (1. / x_lens[i] + 1. / x_lens[j])
D = (x_len_overall - 1. - H) / (x_len_overall - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = x_len_overall - x_len)
return p_value
def get_ties(x):
x_sorted = np.array(np.sort(x))
tie_sum = 0
pos = 0
while pos < x_len_overall:
n_ties = len(x_sorted[x_sorted == x_sorted[pos]])
pos = pos + n_ties
if n_ties > 1:
tie_sum += n_ties ** 3. - n_ties
c = np.min([1., 1. - tie_sum / (x_len_overall ** 3. - x_len_overall)])
return c
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_groups_unique = x[group_col].unique()
x_len = x_groups_unique.size
x_lens = x.groupby(by=group_col)[val_col].count().values
x_flat = x[val_col].values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x_flat[j:j + x_lens[i]] for i, j in enumerate(x_lens_cumsum)])
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_flat = np.concatenate(x_grouped)
x_len = len(x_grouped)
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_len_overall = len(x_flat)
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_ranks = ss.rankdata(x_flat)
x_ranks_grouped = np.array([x_ranks[j:j + x_lens[i]] for i, j in enumerate(x_lens_cumsum)])
x_ranks_avg = [np.mean(z) for z in x_ranks_grouped]
x_ties = get_ties(x_ranks) #ss.tiecorrect(x_ranks)
H = ss.kruskal(*x_grouped)[0]
if x_ties == 1:
S2 = x_len_overall * (x_len_overall + 1.) / 12.
else:
S2 = (1. / (x_len_overall - 1.)) * (np.sum(x_ranks ** 2.) - (x_len_overall * (((x_len_overall + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = compare_conover(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
else:
return vs
def posthoc_dunn(a, val_col = None, group_col = None, p_adjust = None, sort = True):
'''
Post-hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains values.
group_col : str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] <NAME> (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] <NAME> (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
array([[-1. 0.01764845 0.04131415]
[ 0.01764845 -1. 0.45319956]
[ 0.04131415 0.45319956 -1. ]])
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg[i] - x_ranks_avg[j])
A = x_len_overall * (x_len_overall + 1.) / 12.
B = (1. / x_lens[i] + 1. / x_lens[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
def get_ties(x):
x_sorted = np.array(np.sort(x))
tie_sum = 0
pos = 0
while pos < x_len_overall:
n_ties = len(x_sorted[x_sorted == x_sorted[pos]])
pos = pos + n_ties
if n_ties > 1:
tie_sum += n_ties ** 3. - n_ties
c = tie_sum / (12. * (x_len_overall - 1))
return c
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_groups_unique = x[group_col].unique()
x_len = x_groups_unique.size
x_lens = x.groupby(by=group_col)[val_col].count().values
x_flat = x[val_col].values
else:
x = np.array(a)
x = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_flat = np.concatenate(x)
x_len = len(x)
x_lens = np.asarray([len(a) for a in x])
x_len_overall = len(x_flat)
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_ranks = ss.rankdata(x_flat)
x_ranks_grouped = np.array([x_ranks[j:j + x_lens[i]] for i, j in enumerate(x_lens_cumsum)])
x_ranks_avg = [np.mean(z) for z in x_ranks_grouped]
x_ties = get_ties(x_ranks)
vs = np.zeros((x_len, x_len), dtype=np.float)
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
else:
return vs
def posthoc_nemenyi(a, val_col = None, group_col = None, dist = 'chi', sort = True):
'''
Post-hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains values.
group_col : str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] <NAME> (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
array([[-1. , 0.02206238, 0.06770864],
[ 0.02206238, -1. , 0.75361555],
[ 0.06770864, 0.75361555, -1. ]])
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg[i] - x_ranks_avg[j])
A = x_len_overall * (x_len_overall + 1.) / 12.
B = (1. / x_lens[i] + 1. / x_lens[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg[i] - x_ranks_avg[j])
B = (1. / x_lens[i] + 1. / x_lens[j])
q = diff / np.sqrt((x_len_overall * (x_len_overall + 1.) / 12.) * B)
return q
def get_ties(x):
x_sorted = np.array(np.sort(x))
tie_sum = 0
pos = 0
while pos < x_len_overall:
n_ties = len(x_sorted[x_sorted == x_sorted[pos]])
pos = pos + n_ties
if n_ties > 1:
tie_sum += n_ties ** 3. - n_ties
c = np.min([1., 1. - tie_sum / (x_len_overall ** 3. - x_len_overall)])
return c
def get_ties_conover(x):
x_sorted = np.array(np.sort(x))
tie_sum = 0
pos = 0
while pos < x_len_overall:
n_ties = len(x_sorted[x_sorted == x_sorted[pos]])
pos = pos + n_ties
if n_ties > 1:
tie_sum += n_ties ** 3. - n_ties
c = np.min([1., 1. - tie_sum / (x_len_overall ** 3. - x_len_overall)])
return c
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_groups_unique = x[group_col].unique()
x_len = x_groups_unique.size
x_lens = x.groupby(by=group_col)[val_col].count().values
x_flat = x[val_col].values
else:
x = np.array(a)
x = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_flat = np.concatenate(x)
x_len = len(x)
x_lens = np.asarray([len(a) for a in x])
x_len_overall = len(x_flat)
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_ranks = ss.rankdata(x_flat)
x_ranks_grouped = np.array([x_ranks[j:j + x_lens[i]] for i, j in enumerate(x_lens_cumsum)])
x_ranks_avg = [np.mean(z) for z in x_ranks_grouped]
x_ties = get_ties(x_ranks)
vs = np.zeros((x_len, x_len), dtype=np.float)
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(i, j) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(i, j) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
else:
return vs
def posthoc_nemenyi_friedman(a, y_col = None, block_col = None, group_col = None, melted = False, sort = False):
'''
Calculate pairwise comparisons using Nemenyi post-hoc test for unreplicated
blocked data. This test is usually conducted post-hoc after
significant results of the Friedman's test. The statistics refer to upper
quantiles of the studentized range distribution (Tukey) [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains block names.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post-hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] <NAME> (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] <NAME> (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] <NAME> (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col, block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col = None, block_col = None, group_col = None, melted = False, sort = False, p_adjust = None):
'''
Calculate pairwise comparisons using Conover post-hoc test for unreplicated
blocked data. This test is usually conducted post-hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains block names.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post-hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] <NAME> and <NAME> (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] <NAME> (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
tval = dif / np.sqrt(A / B)
pval = 2. * ss.t.sf(np.abs(tval), df = (n-1)*(k-1))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
C1 = (n * k * (k + 1) ** 2) / 4
TT = np.sum([((R[g] - ((n * (k + 1))/2)) ** 2) for g in groups])
T1 = ((k - 1) * TT) / (A1 - C1)
A = 2 * k * (1 - T1 / (k * (n-1))) * ( A1 - C1)
B = (n - 1) * (k - 1)
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, y_col = None, group_col = None, sort = False, p_adjust = None):
'''
Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p-values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] <NAME>., <NAME>., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x = __convert_to_df(a, y_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
x_groups_unique = x[group_col].unique()
x['ranks'] = x.rank()
Ri = x.groupby(group_col)[val_col].mean()
ni = x.groupby(group_col)[val_col].count()
k = x[group_col].unique().size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
return (Ri[u] - Ri[m]) / (sigma / np.sqrt(2) * np.sqrt(1. / ni[m] + 1. / ni[u]))
stat =
|
np.empty((k-1, k-1))
|
numpy.empty
|
import numpy as np
import time
import pytest
import os, pickle, re, shutil
from flare import struc, env, gp
from flare import otf_parser
from flare.mgp.mgp_en import MappedGaussianProcess
from flare.lammps import lammps_calculator
from flare.ase.calculator import FLARE_Calculator
from ase.calculators.lammpsrun import LAMMPS
from tests.fake_gp import get_gp, get_random_structure
body_list = ['2', '3']
multi_list = [False, True]
curr_path = os.getcwd()
def clean():
for f in os.listdir("./"):
if re.search(r"grid.*npy", f):
os.remove(f)
if re.search("kv3", f):
os.rmdir(f)
# ASSUMPTION: You have a Lammps executable with the mgp pair style with $lmp
# as the corresponding environment variable.
@pytest.mark.skipif(not os.environ.get('lmp',
False), reason='lmp not found '
'in environment: Please install LAMMPS '
'and set the $lmp env. '
'variable to point to the executatble.')
@pytest.fixture(scope='module')
def all_gp():
allgp_dict = {}
np.random.seed(0)
for bodies in ['2', '3', '2+3']:
for multihyps in [False, True]:
gp_model = get_gp(bodies, 'mc', multihyps)
gp_model.parallel = True
gp_model.n_cpus = 2
allgp_dict[f'{bodies}{multihyps}'] = gp_model
yield allgp_dict
del allgp_dict
@pytest.fixture(scope='module')
def all_mgp():
allmgp_dict = {}
for bodies in ['2', '3', '2+3']:
for multihyps in [False, True]:
allmgp_dict[f'{bodies}{multihyps}'] = None
yield allmgp_dict
del allmgp_dict
@pytest.fixture(scope='module')
def all_ase_calc():
all_ase_calc_dict = {}
for bodies in ['2', '3', '2+3']:
for multihyps in [False, True]:
all_ase_calc_dict[f'{bodies}{multihyps}'] = None
yield all_ase_calc_dict
del all_ase_calc_dict
@pytest.fixture(scope='module')
def all_lmp_calc():
if 'tmp' not in os.listdir("./"):
os.mkdir('tmp')
all_lmp_calc_dict = {}
for bodies in ['2', '3', '2+3']:
for multihyps in [False, True]:
all_lmp_calc_dict[f'{bodies}{multihyps}'] = None
yield all_lmp_calc_dict
del all_lmp_calc_dict
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_init(bodies, multihyps, all_mgp, all_gp):
"""
test the init function
"""
gp_model = all_gp[f'{bodies}{multihyps}']
grid_num_2 = 64
grid_num_3 = 16
lower_cut = 0.01
two_cut = gp_model.cutoffs[0]
three_cut = gp_model.cutoffs[1]
lammps_location = f'{bodies}{multihyps}.mgp'
# set struc params. cell and masses arbitrary?
mapped_cell = np.eye(3) * 20
struc_params = {'species': [1, 2],
'cube_lat': mapped_cell,
'mass_dict': {'0': 2, '1': 4}}
# grid parameters
blist = []
if ('2' in bodies):
blist+= [2]
if ('3' in bodies):
blist+= [3]
train_size = len(gp_model.training_data)
grid_params = {'bodies': blist,
'cutoffs':gp_model.cutoffs,
'bounds_2': [[lower_cut], [two_cut]],
'bounds_3': [[lower_cut, lower_cut, lower_cut],
[three_cut, three_cut, three_cut]],
'grid_num_2': grid_num_2,
'grid_num_3': [grid_num_3, grid_num_3, grid_num_3],
'svd_rank_2': 14,
'svd_rank_3': 14,
'load_grid': None,
'update': False}
struc_params = {'species': [1, 2],
'cube_lat': np.eye(3)*2,
'mass_dict': {'0': 27, '1': 16}}
mgp_model = MappedGaussianProcess(grid_params, struc_params, n_cpus=4,
mean_only=True, lmp_file_name=lammps_location)
all_mgp[f'{bodies}{multihyps}'] = mgp_model
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_build_map(all_gp, all_mgp, all_ase_calc, bodies, multihyps):
"""
test the mapping for mc_simple kernel
"""
# multihyps = False
gp_model = all_gp[f'{bodies}{multihyps}']
mgp_model = all_mgp[f'{bodies}{multihyps}']
mgp_model.build_map(gp_model)
all_ase_calc[f'{bodies}{multihyps}'] = FLARE_Calculator(gp_model,
mgp_model, par=False, use_mapping=True)
clean()
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_lmp_calc(bodies, multihyps, all_lmp_calc):
label = f'{bodies}{multihyps}'
# set up input params
by = 'no'
ty = 'no'
if '2' in bodies:
by = 'yes'
if '3' in bodies:
ty = 'yes'
parameters = {'command': os.environ.get('lmp'), # set up executable for ASE
'newton': 'off',
'pair_style': 'mgp',
'pair_coeff': [f'* * {label}.mgp H He {by} {ty}'],
'mass': ['1 2', '2 4']}
files = [f'{label}.mgp']
# create ASE calc
lmp_calc = LAMMPS(label=f'tmp{label}', keep_tmp_files=True, tmp_dir='./tmp/',
parameters=parameters, files=files)
all_lmp_calc[label] = lmp_calc
@pytest.mark.skipif(not os.environ.get('lmp',
False), reason='lmp not found '
'in environment: Please install LAMMPS '
'and set the $lmp env. '
'variable to point to the executatble.')
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_lmp_predict(all_ase_calc, all_lmp_calc, bodies, multihyps):
"""
test the lammps implementation
"""
label = f'{bodies}{multihyps}'
for f in os.listdir("./"):
if label in f:
os.remove(f)
if f in ['log.lammps']:
os.remove(f)
clean()
flare_calc = all_ase_calc[label]
lmp_calc = all_lmp_calc[label]
gp_model = flare_calc.gp_model
mgp_model = flare_calc.mgp_model
lammps_location = mgp_model.lmp_file_name
# lmp file is automatically written now every time MGP is constructed
mgp_model.write_lmp_file(lammps_location)
# create test structure
cell = np.diag(np.array([1, 1, 1.5])) * 4
nenv = 10
unique_species = gp_model.training_data[0].species
cutoffs = gp_model.cutoffs
struc_test, f = get_random_structure(cell, unique_species, nenv)
struc_test.positions *= 4
# build ase atom from struc
ase_atoms_flare = struc_test.to_ase_atoms()
ase_atoms_flare.set_calculator(flare_calc)
ase_atoms_lmp = struc_test.to_ase_atoms()
ase_atoms_lmp.set_calculator(lmp_calc)
lmp_en = ase_atoms_lmp.get_potential_energy()
flare_en = ase_atoms_flare.get_potential_energy()
lmp_stress = ase_atoms_lmp.get_stress()
flare_stress = ase_atoms_flare.get_stress()
lmp_forces = ase_atoms_lmp.get_forces()
flare_forces = ase_atoms_flare.get_forces()
# check that lammps agrees with gp to within 1 meV/A
assert np.all(np.abs(lmp_en - flare_en) < 1e-4)
assert np.all(np.abs(lmp_forces - flare_forces) < 1e-4)
assert np.all(
|
np.abs(lmp_stress - flare_stress)
|
numpy.abs
|
import pandas as pd
import numpy as np
from scipy.stats import chi2
def get_data(input_file):
'''
Read data from a csv file. For the meaning of each variable,
please see Ben-Tal et al. (2013)
'''
df = pd.read_csv(input_file)
c_vec = np.array(df["c"])
v_vec = np.array(df["v"])
s_vec = np.array(df["s"])
l_vec =
|
np.array(df["l"])
|
numpy.array
|
"""
Includes functions to assist in spreadsheet functionality
"""
import re
from PyQt5.QtCore import QDateTime
import datetime
import numpy as np
import difflib
global lists
"""
'lists' contains pre-defined sequences that autofill when using the fill-handle
"""
lists = [
['SUN','MON','TUE','WED','THU','FRI','SAT','SUN','MON','TUE','WED','THU','FRI','SAT'],
['SUNDAY','MONDAY','TUESDAY','THURSDAY','FRIDAY','SATURDAY','SUNDAY','MONDAY','TUESDAY','THURSDAY','FRIDAY','SATURDAY'],
['JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC','JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC'],
['JANUARY','FEBRUARY','MARCH','APRIL','MAY','JUNE','JULY','AUGUST','SEPTEMBER','OCTOBER','NOVEMBER','DECEMBER','JANUARY','FEBRUARY','MARCH','APRIL','MAY','JUNE','JULY','AUGUST','SEPTEMBER','OCTOBER','NOVEMBER','DECEMBER']
]
def is_sublist(smallList, bigList):
"""
Checks if smallList is a sublist of bigList
"""
def n_slices(n, list_):
for i in range(len(list_)+1-n):
yield(list_[i:i+n])
for slice_ in n_slices(len(smallList), bigList):
if slice_ == smallList:
return True
return False
def propogate_values(values, direction='down'):
"""
This function is responsible for
propogating values when using the
fill handle. It can propogate formulas,
dates/times, sequences (integers, floats), and
static text (strings, floats)
"""
moving_range_types = []
for value in values:
if isinstance(value, str):
if value[0] == '=':
moving_range_types.append('formula')
else:
moving_range_types.append('str')
elif isinstance(value, float):
moving_range_types.append('numeric')
elif isinstance(value, int):
moving_range_types.append('numeric')
elif isinstance(value, QDateTime):
moving_range_types.append('date')
else:
moving_range_types.append('str')
if len(list(set(moving_range_types))) != 1:
"""
Mismatch of types, cant really do anything except return the
first value in the moving range
"""
return values[0]
type_ = moving_range_types[0]
if type_ == 'numeric':
"""
Simplest case, we check for a defined sequence,
and if there isn't one, we simply use the first value
"""
diff = np.diff(values)
diff = [
|
np.round(value,9)
|
numpy.round
|
import os
import unittest
import numpy as np
import statsmodels.api as sm
import ConfigSpace as CS
from hpbandster.optimizers.kde.mvkde import MultivariateKDE
from pdb import set_trace
rapid_development=True
rapid_development=False
class Base1dTest(object):
n_train = 128
n_test = 1024
def setUp(self):
self.configspace = CS.ConfigurationSpace(42)
self.add_hyperparameters()
x_train_confs = [ self.configspace.sample_configuration() for i in range(self.n_train)]
self.x_train = np.array( [c.get_array() for c in x_train_confs])
x_test_confs = [ self.configspace.sample_configuration() for i in range(self.n_test)]
self.x_test= np.array( [c.get_array() for c in x_test_confs])
self.sm_x_train = self.sm_transform_data(self.x_train)
self.sm_x_test = self.sm_transform_data(self.x_test)
self.sm_kde = sm.nonparametric.KDEMultivariate(data=self.sm_x_train, var_type=self.var_types, bw='cv_ml')
self.hp_kde_full = MultivariateKDE(self.configspace, fully_dimensional=True, fix_boundary=False)
self.hp_kde_factor = MultivariateKDE(self.configspace, fully_dimensional=False, fix_boundary=False)
self.hp_kde_full.fit(self.x_train, bw_estimator='mlcv')
self.hp_kde_factor.fit(self.x_train, bw_estimator='mlcv')
def sm_transform_data(self, data):
return(data)
def tearDown(self):
self.configspace = None
self.x_train = None
self.x_test = None
self.sm_kde = None
self.hp_kde_full = None
self.hp_kde_factor = None
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_bandwidths_estimation(self):
# This test sometimes fails, as statsmodels uses a different optimizer with a larger tolerance
self.assertAlmostEqual(self.sm_kde.bw[0], self.hp_kde_full.bandwidths[0], delta=2e-3)
self.assertAlmostEqual(self.sm_kde.bw[0], self.hp_kde_factor.bandwidths[0], delta=2e-3)
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_pdfs(self):
for bw in np.logspace(-0.5,-0.1,5):
self.sm_kde.bw = np.array([bw])
self.hp_kde_full.set_bandwidths(np.array([bw]))
self.hp_kde_factor.set_bandwidths(np.array([bw]))
p1 = self.sm_kde.pdf(self.sm_x_test)
p2 = self.hp_kde_full.pdf(self.x_test)
p3 = self.hp_kde_factor.pdf(self.x_test)
self.assertTrue(np.allclose(p1, p2))
self.assertTrue(np.allclose(p1, p3))
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_loo_likelihood(self):
for bw in np.logspace(-1,-0.1,5):
self.sm_kde.bw = np.array([bw])
self.hp_kde_full.set_bandwidths(np.array([bw]))
self.hp_kde_factor.set_bandwidths(np.array([bw]))
sm_ll = self.sm_kde.loo_likelihood(bw=np.array([bw]), func=np.log)
hp_full_ll = self.hp_kde_full.loo_negloglikelihood()
hp_factor_ll = self.hp_kde_factor.loo_negloglikelihood()
n = self.x_train.shape[0]
delta = 1e-3 * np.abs((sm_ll + hp_full_ll)/2)
# note: statsmodels' ll is not normalized, so we have to transform our result to get the same number!
self.assertAlmostEqual(sm_ll, n*(hp_full_ll - np.log(n-1)), delta=delta)
self.assertAlmostEqual(sm_ll, n*(hp_factor_ll - np.log(n-1)), delta=delta)
class BaseNdTest(object):
n_train = 128
n_test = 512
def setUp(self):
self.configspace = CS.ConfigurationSpace(42)
self.add_hyperparameters()
x_train_confs = [ self.configspace.sample_configuration() for i in range(self.n_train)]
self.x_train = np.array( [c.get_array() for c in x_train_confs])
x_test_confs = [ self.configspace.sample_configuration() for i in range(self.n_test)]
self.x_test= np.array( [c.get_array() for c in x_test_confs])
self.sm_x_train = self.sm_transform_data(self.x_train)
self.sm_x_test = self.sm_transform_data(self.x_test)
self.sm_kde = sm.nonparametric.KDEMultivariate(data=self.sm_x_train, var_type=self.var_types, bw='cv_ml')
self.sm_1d_kdes = [sm.nonparametric.KDEMultivariate(data=self.sm_x_train[:,i], var_type=self.var_types[i], bw='cv_ml') for i in range(len(self.var_types))]
self.hp_kde_full = MultivariateKDE(self.configspace, fully_dimensional=True, fix_boundary=False)
self.hp_kde_factor = MultivariateKDE(self.configspace, fully_dimensional=False, fix_boundary=False)
self.hp_kde_full.fit(self.x_train, bw_estimator='mlcv')
self.hp_kde_factor.fit(self.x_train, bw_estimator='mlcv')
def sm_transform_data(self, data):
return(data)
def tearDown(self):
self.configspace = None
self.x_train = None
self.x_test = None
self.sm_kde = None
self.sm_1d_kdes = None
self.hp_kde_full = None
self.hp_kde_factor = None
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_bandwidths_estimation(self):
# This test sometimes fails, as statsmodels uses a different optimizer with a larger tolerance
for d in range(len(self.var_types)):
self.assertAlmostEqual(self.sm_kde.bw[d], self.hp_kde_full.bandwidths[d], delta=5e-2)
self.assertAlmostEqual(self.sm_1d_kdes[d].bw[0], self.hp_kde_factor.bandwidths[d], delta=5e-2)
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_pdfs(self):
for bw in np.logspace(-0.5,-0.1,5):
self.sm_kde.bw = np.array([bw]*len(self.var_types))
self.hp_kde_full.set_bandwidths(np.array([bw]*len(self.var_types)))
self.hp_kde_factor.set_bandwidths(np.array([bw]*len(self.var_types)))
p1 = self.sm_kde.pdf(self.sm_x_test)
p2 = self.hp_kde_full.pdf(self.x_test)
p3 = self.hp_kde_factor.pdf(self.x_test)
p4_tmp = []
for i, kde in enumerate(self.sm_1d_kdes):
kde.bw = np.array([bw])
p4_tmp.append(kde.pdf(self.sm_x_test[:,i]))
p4_tmp = np.array(p4_tmp)
p4 = np.array(p4_tmp).prod(axis=0)
self.assertTrue(np.allclose(p1, p2))
self.assertTrue(np.allclose(p3, p4))
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_loo_likelihood(self):
for bw in np.logspace(-1,-0.1,5):
self.sm_kde.bw = np.array([bw]*len(self.var_types))
self.hp_kde_full.set_bandwidths(np.array([bw]*len(self.var_types)))
self.hp_kde_factor.set_bandwidths(np.array([bw]*len(self.var_types)))
sm_full_ll = self.sm_kde.loo_likelihood(bw=np.array([bw]*len(self.var_types)), func=np.log)
hp_full_ll = self.hp_kde_full.loo_negloglikelihood()
hp_factor_ll = self.hp_kde_factor.loo_negloglikelihood()
sm_factor_ll = []
for i, kde in enumerate(self.sm_1d_kdes):
kde.bw = np.array([bw])
sm_factor_ll.append(kde.loo_likelihood(bw=np.array([bw]), func=np.log))
sm_factor_ll = np.array(sm_factor_ll)
n = self.x_train.shape[0]
delta = 1e-2 * np.abs((sm_full_ll + hp_full_ll)/2)
# note: statsmodels' ll is not normalized, so we have to transform our result to get the same number!
self.assertAlmostEqual(sm_full_ll, n*(hp_full_ll - np.log(n-1)), delta=delta)
# same here, but it is easier to apply the normalization to the SM KDE's likelihoods
delta = 1e-2 *
|
np.abs(hp_factor_ll)
|
numpy.abs
|
import copy
import concurrent.futures
import numpy as np
import scipy.sparse as sparse
import scipy.sparse.linalg as splinalg
from pyslam.losses import L2Loss
class Options:
"""Class for specifying optimization options."""
def __init__(self):
self.max_iters = 100
"""Maximum number of iterations before terminating."""
self.min_update_norm = 1e-6
"""Minimum update norm before terminating."""
self.min_cost = 1e-12
"""Minimum cost value before terminating."""
self.min_cost_decrease = 0.9
"""Minimum cost decrease factor to continue optimization."""
self.linesearch_alpha = 0.8
"""Factor by which line search step size decreases each iteration."""
self.linesearch_max_iters = 10
"""Maximum number of line search steps."""
self.linesearch_min_cost_decrease = 0.9
"""Minimum cost decrease factor to continue the line search."""
self.allow_nondecreasing_steps = False
"""Enable non-decreasing steps to escape local minima."""
self.max_nondecreasing_steps = 3
"""Maximum number of non-dereasing steps before terminating."""
self.num_threads = 1
"""Number of threads to use for residual and jacobian evaluation."""
class Problem:
"""Class for building optimization problems."""
def __init__(self, options=Options()):
self.options = options
"""Optimization options."""
self.param_dict = dict()
"""Dictionary of all parameters with their current values."""
self.residual_blocks = []
"""List of residual blocks."""
self.block_param_keys = []
"""List of parameter keys in param_dict that each block depends on."""
self.block_loss_functions = []
"""List of loss functions applied to each block. Default: L2Loss."""
self.constant_param_keys = []
"""List of parameter keys in param_dict to be held constant."""
self._update_partition_dict = {}
"""Autogenerated list of update vector ranges corresponding to each parameter."""
self._covariance_matrix = None
"""Covariance matrix of final parameter estimates."""
self._cost_history = []
"""History of cost values at each iteration of solve."""
if self.options.num_threads > 1:
self._thread_pool = concurrent.futures.ThreadPoolExecutor(
max_workers=self.options.num_threads)
"""Thread pool for parallel evaluations."""
def add_residual_block(self, block, param_keys, loss=L2Loss()):
"""Add a cost block to the problem."""
# param_keys must be a list, but don't force the user to create a
# 1-element list
if isinstance(param_keys, str):
param_keys = [param_keys]
self.residual_blocks.append(block)
self.block_param_keys.append(param_keys)
self.block_loss_functions.append(loss)
def initialize_params(self, param_dict):
"""Initialize the parameters in the problem."""
# update does a shallow copy, which is no good for immutable parameters
self.param_dict.update(copy.deepcopy(param_dict))
def set_parameters_constant(self, param_keys):
"""Hold a list of parameters constant."""
# param_keys must be a list, but don't force the user to create a
# 1-element list
if isinstance(param_keys, str):
param_keys = [param_keys]
for key in param_keys:
if key not in self.constant_param_keys:
self.constant_param_keys.append(key)
def set_parameters_variable(self, param_keys):
"""Allow a list of parameters to vary."""
# param_keys must be a list, but don't force the user to create a
# 1-element list
if isinstance(param_keys, str):
param_keys = [param_keys]
for key in param_keys:
if key in self.constant_param_keys:
self.constant_param_keys.remove(key)
def eval_cost(self, param_dict=None):
"""Evaluate the cost function using given parameter values."""
if param_dict is None:
param_dict = self.param_dict
cost = 0.
for block, keys, loss in zip(self.residual_blocks,
self.block_param_keys,
self.block_loss_functions):
try:
params = [param_dict[key] for key in keys]
except KeyError as e:
print(
"Parameter {} has not been initialized".format(e.args[0]))
residual = block.evaluate(params)
cost += np.sum(loss.loss(residual))
return cost
def solve(self):
"""Solve the problem using Gauss - Newton."""
self._update_partition_dict = self._get_update_partition_dict()
cost = self.eval_cost()
dx = np.array([100])
optimization_iters = 0
nondecreasing_steps_taken = 0
self._cost_history = [cost]
done_optimization = False
while not done_optimization:
optimization_iters += 1
prev_cost = cost
dx, cost = self.solve_one_iter()
# print("Update vector:\n", str(dx))
# print("Update norm = %f" % np.linalg.norm(dx))
# Update cost history
self._cost_history.append(cost)
# Update parameters
for k, r in self._update_partition_dict.items():
self._perturb_by_key(k, dx[r])
# Check if done optimizing
done_optimization = optimization_iters > self.options.max_iters or \
np.linalg.norm(dx) < self.options.min_update_norm or \
cost < self.options.min_cost
if self.options.allow_nondecreasing_steps:
if nondecreasing_steps_taken == 0:
best_params = copy.deepcopy(self.param_dict)
if cost >= self.options.min_cost_decrease * prev_cost:
nondecreasing_steps_taken += 1
else:
nondecreasing_steps_taken = 0
if nondecreasing_steps_taken \
>= self.options.max_nondecreasing_steps:
done_optimization = True
self.param_dict.update(best_params)
else:
done_optimization = done_optimization or \
cost >= self.options.min_cost_decrease * prev_cost
return self.param_dict
def solve_one_iter(self):
"""Solve one iteration of Gauss-Newton."""
# precision * dx = information
precision, information, cost = self._get_precision_information_and_cost()
dx = splinalg.spsolve(precision, information)
# Backtrack line search
if self.options.linesearch_max_iters > 0:
best_step_size, best_cost = self._do_line_search(dx)
else:
best_step_size, best_cost = 1., cost
return best_step_size * dx, best_cost
def compute_covariance(self):
"""Compute the covariance matrix after solve has terminated."""
try:
# Re-evaluate the precision matrix with the final parameters
precision, _, _ = self._get_precision_information_and_cost()
self._covariance_matrix = splinalg.inv(precision.tocsc()).toarray()
except Exception as e:
print('Covariance computation failed!\n{}'.format(e))
def get_covariance_block(self, param0, param1):
"""Get the covariance block corresponding to two parameters."""
try:
p0_range = self._update_partition_dict[param0]
p1_range = self._update_partition_dict[param1]
return np.squeeze(self._covariance_matrix[
p0_range.start:p0_range.stop, p1_range.start:p1_range.stop])
except KeyError as e:
print(
'Cannot compute covariance for constant parameter {}'.format(e.args[0]))
return None
def summary(self, format='brief'):
"""Return a summary of the optimization.
format='brief' : Number of iterations, initial/final cost
format='full' : Initial/final cost and relative change at each iteration
"""
if not self._cost_history:
raise ValueError('solve has not yet been called')
if format is 'brief':
entry_format_string = 'Iterations: {:3} | Cost: {:12e} --> {:12e}'
summary = entry_format_string.format(len(self._cost_history),
self._cost_history[0],
self._cost_history[-1])
elif format is 'full':
header_string = '{:>5s} | {:>12s} --> {:>12s} | {:>10s}\n'.format(
'Iter', 'Initial cost', 'Final cost', 'Rel change')
entry_format_string = '{:5} | {:12e} --> {:12e} | {:+10f}\n'
summary = [header_string, '-' * len(header_string) + '\n']
for i, ic, fc in zip(range(len(self._cost_history)),
self._cost_history[:-1],
self._cost_history[1:]):
summary.append(entry_format_string.format(
i, ic, fc, (fc - ic) / ic))
summary = ''.join(summary)
else:
raise ValueError(
'Invalid summary format \'{}\'.'.format(format) +
'Valid formats are \'brief\' and \'full\'')
return summary
def _get_update_partition_dict(self):
"""Helper function to partition the full update vector."""
update_partition_dict = {}
prev_key = ''
for key, param in self.param_dict.items():
if key not in self.constant_param_keys:
if hasattr(param, 'dof'):
# Check if parameter specifies a tangent space
dof = param.dof
elif hasattr(param, '__len__'):
# Check if parameter is a vector
dof = len(param)
else:
# Must be a scalar
dof = 1
if not update_partition_dict:
update_partition_dict.update({key: range(dof)})
else:
update_partition_dict.update({key: range(
update_partition_dict[prev_key].stop,
update_partition_dict[prev_key].stop + dof)})
prev_key = key
return update_partition_dict
def _get_precision_information_and_cost(self):
"""Helper function to build the precision matrix and information vector for the Gauss - Newton update. Also returns the total cost."""
# The Gauss-Newton step is given by
# (H.T * W * H) dx = -H.T * W * e
# or
# precision * dx = information
#
# However, in our case, W is subsumed into H and e by the stiffness parameter
# so instead we have
# (H'.T * H') dx = -H'.T * e'
# where H' = sqrt(W) * H and e' = sqrt(W) * e
#
# Note that this is an exactly equivalent formulation, but avoids needing
# to explicitly construct and multiply the (possibly very large) W
# matrix.
HT_blocks = [[None for _ in self.residual_blocks]
for _ in self.param_dict]
e_blocks = [None for _ in self.residual_blocks]
cost_blocks = [None for _ in self.residual_blocks]
block_cidx_dict = dict(zip(self.param_dict.keys(),
list(range(len(self.param_dict)))))
if self.options.num_threads > 1:
# Evaluate residual and jacobian blocks in parallel
threads = []
for block_ridx, (block, keys, loss) in \
enumerate(zip(self.residual_blocks,
self.block_param_keys,
self.block_loss_functions)):
threads.append(self._thread_pool.submit(
self._populate_residual_jacobian_and_cost_blocks,
HT_blocks, e_blocks, cost_blocks,
block_cidx_dict, block_ridx,
block, keys, loss))
concurrent.futures.wait(threads)
else:
# Single thread: Call directly instead of submitting a job
for block_ridx, (block, keys, loss) in \
enumerate(zip(self.residual_blocks,
self.block_param_keys,
self.block_loss_functions)):
self._populate_residual_jacobian_and_cost_blocks(
HT_blocks, e_blocks, cost_blocks,
block_cidx_dict, block_ridx,
block, keys, loss)
HT = sparse.bmat(HT_blocks, format='csr')
e = np.squeeze(np.bmat(e_blocks).A)
precision = HT.dot(HT.T)
information = -HT.dot(e)
cost = np.sum(
|
np.array(cost_blocks)
|
numpy.array
|
import numpy as np
h = 0.2
c = np.zeros(4)
c[0] = 0.1 # estimate
c[1] = 0.3
c[2] = 0.1 # estimate
c[3] = 0.12
m = np.zeros(3)
m[0] = 9. # estimate
m[1] = 28.
m[2] = 18.
real_y = np.loadtxt('y3.txt')
def get_A(m, c):
A = np.zeros((6, 6))
A[0, 1] = 1.
A[1, 0] = -(c[1]+c[0])/m[0]
A[1, 2] = c[1]/m[0]
A[2, 3] = 1.
A[3, 0] = c[1]/m[1]
A[3, 2] = -(c[1]+c[2])/m[1]
A[3, 4] = c[2]/m[1]
A[4, 5] = 1.
A[5, 2] = c[2]/m[2]
A[5, 4] = -(c[3]+c[2])/m[2]
return A
def get_U_deriv(A, U, y, m, c):
dAy_db = np.zeros((6, 3))
dAy_db[1, 0] = -y[0]/m[0]
dAy_db[1, 2] = (y[0] * (c[1]+c[0]) - y[2]*c[1])/(m[0]**2)
dAy_db[3, 1] = (-y[2] + y[4])/m[1]
dAy_db[5, 1] = (y[2] - y[4])/m[2]
return A @ U + dAy_db
def runge_kutta_step(y, h, f):
k_1 = h * f(y)
k_2 = h * f(y + k_1/2.)
k_3 = h * f(y + k_2/2.)
k_4 = h * f(y + k_3)
return y + (k_1 + 2.*k_2 + 2.*k_3 + k_4)/6.
while True:
A = get_A(m, c)
y = real_y[:, [0]]
U = np.zeros((6, 3))
delta_y = np.zeros((6, 1))
# Integrals for delta_beta
UU_integral = np.zeros((3, 3))
Uy_integral = np.zeros((3, 1))
# Identification quality
Ib = 0
for i in range(1, real_y.shape[1]):
U_new = runge_kutta_step(U, h, lambda U: get_U_deriv(A, U, y, m, c))
y_new = runge_kutta_step(y, h, lambda y: A @ y)
delta_y_new = real_y[:, [i]] - y_new
# Add to Riemann integrals for delta_b
UU_integral = UU_integral + h * (np.transpose(U) @ U + np.transpose(U_new) @ U_new) / 2.
Uy_integral = Uy_integral + h * (np.transpose(U) @ delta_y + np.transpose(U_new) @ delta_y_new) / 2.
# Add to Riemann integral for dentification quality
Ib = Ib + h * (np.transpose(delta_y) @ delta_y +
|
np.transpose(delta_y_new)
|
numpy.transpose
|
# -*- coding: utf-8 -*-
# @Time : 2019/3/13 14:17
# @Author : LunaFire
# @Email : <EMAIL>
# @File : trainningmodel.py
import glob
import pickle
import numpy as np
from PIL import Image
from tensorflow import gfile
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Conv2D, Activation, MaxPooling2D, Flatten, Dropout, Dense, Concatenate
from keras.utils.vis_utils import plot_model
NUMBER = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
LOWERCASE = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
UPPERCASE = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
CAPTCHA_CHARSET = NUMBER # 验证码字符集
CAPTCHA_LEN = 4 # 验证码长度
CAPTCHA_HEIGHT = 60 # 验证码高度
CAPTCHA_WIDTH = 160 # 验证码宽度
TRAIN_DATA_DIR = '../data/chapter6/train/' # 训练集数据路径
TEST_DATA_DIR = '../data/chapter6/test/' # 测试集数据路径
BATCH_SIZE = 128 # Batch大小
EPOCHS = 50 # 训练轮数
OPT = 'adadelta' # 优化器
LOSS = 'binary_crossentropy' # 损失函数
MODEL_DIR = '../model/captcha/' # 模型文件路径
MODEL_FORMAT = '.h5' # 模型文件类型
HISTORY_DIR = '../history/captcha/' # 训练记录文件路径
HISTORY_FORMAT = '.history' # 训练记录文件类型
FILENAME = '{}captcha_{}_{}_bs_{}_epochs_{}{}'
# 神经网络结构文件
MODEL_VIS_FILE = 'captcha_classfication.png'
# 模型文件
MODEL_FILE = FILENAME.format(MODEL_DIR, OPT, LOSS, str(BATCH_SIZE), str(EPOCHS), MODEL_FORMAT)
# 训练记录文件
HISTORY_FILE = FILENAME.format(HISTORY_DIR, OPT, LOSS, str(BATCH_SIZE), str(EPOCHS), HISTORY_FORMAT)
def rgb2gray(images):
"""将RGB图像转为灰度图"""
# Y' = 0.299 R + 0.587 G + 0.114 B
# https://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
return
|
np.dot(images[..., :3], [0.299, 0.587, 0.114])
|
numpy.dot
|
# Posterior inference.
import pyro
import pyro.distributions as dist
import torch
import numpy as np
import scipy.sparse as sp
import cellbender.remove_background.consts as consts
from typing import Tuple, List, Dict, Optional
from abc import ABC, abstractmethod
import logging
class Posterior(ABC):
"""Base class Posterior handles posterior count inference.
Args:
dataset_obj: Dataset object.
vi_model: Trained RemoveBackgroundPyroModel.
counts_dtype: Data type of posterior count matrix. Can be one of
[np.uint32, np.float]
float_threshold: For floating point count matrices, counts below
this threshold will be set to zero, for the purposes of constructing
a sparse matrix. Unused if counts_dtype is np.uint32
Properties:
mean: Posterior count mean, as a sparse matrix.
"""
def __init__(self,
dataset_obj: 'SingleCellRNACountsDataset', # Dataset
vi_model: 'RemoveBackgroundPyroModel',
counts_dtype: np.dtype = np.uint32,
float_threshold: float = 0.5):
self.dataset_obj = dataset_obj
self.vi_model = vi_model
self.use_cuda = vi_model.use_cuda
self.analyzed_gene_inds = dataset_obj.analyzed_gene_inds
self.count_matrix_shape = dataset_obj.data['matrix'].shape
self.barcode_inds = np.arange(0, self.count_matrix_shape[0])
self.dtype = counts_dtype
self.float_threshold = float_threshold
self._mean = None
self._latents = None
super(Posterior, self).__init__()
@abstractmethod
def _get_mean(self):
"""Obtain mean posterior counts and store in self._mean"""
pass
@property
def mean(self) -> sp.csc_matrix:
if self._mean is None:
self._get_mean()
return self._mean
@property
def latents(self) -> sp.csc_matrix:
if self._latents is None:
self._get_latents()
return self._latents
@property
def variance(self):
raise NotImplemented("Posterior count variance not implemented.")
@torch.no_grad()
def _get_latents(self):
"""Calculate the encoded latent variables."""
data_loader = self.dataset_obj.get_dataloader(use_cuda=self.use_cuda,
analyzed_bcs_only=True,
batch_size=500,
shuffle=False)
z = np.zeros((len(data_loader), self.vi_model.encoder['z'].output_dim))
d = np.zeros(len(data_loader))
p = np.zeros(len(data_loader))
epsilon = np.zeros(len(data_loader))
for i, data in enumerate(data_loader):
if 'chi_ambient' in pyro.get_param_store().keys():
chi_ambient = pyro.param('chi_ambient').detach()
else:
chi_ambient = None
enc = self.vi_model.encoder.forward(x=data, chi_ambient=chi_ambient)
ind = i * data_loader.batch_size
z[ind:(ind + data.shape[0]), :] = enc['z']['loc'].detach().cpu().numpy()
phi_loc = pyro.param('phi_loc')
phi_scale = pyro.param('phi_scale')
d[ind:(ind + data.shape[0])] = \
dist.LogNormal(loc=enc['d_loc'],
scale=pyro.param('d_cell_scale')).mean.detach().cpu().numpy()
p[ind:(ind + data.shape[0])] = enc['p_y'].sigmoid().detach().cpu().numpy()
epsilon[ind:(ind + data.shape[0])] = dist.Gamma(enc['epsilon'] * self.vi_model.epsilon_prior,
self.vi_model.epsilon_prior).mean.detach().cpu().numpy()
self._latents = {'z': z, 'd': d, 'p': p,
'phi_loc_scale': [phi_loc.item(), phi_scale.item()],
'epsilon': epsilon}
@torch.no_grad()
def _param_map_estimates(self,
data: torch.Tensor,
chi_ambient: torch.Tensor) -> Dict[str, torch.Tensor]:
"""Calculate MAP estimates of mu, the mean of the true count matrix, and
lambda, the rate parameter of the Poisson background counts.
Args:
data: Dense tensor minibatch of cell by gene count data.
chi_ambient: Point estimate of inferred ambient gene expression.
Returns:
mu_map: Dense tensor of Negative Binomial means for true counts.
lambda_map: Dense tensor of Poisson rate params for noise counts.
alpha_map: Dense tensor of Dirichlet concentration params that
inform the overdispersion of the Negative Binomial.
"""
# Encode latents.
enc = self.vi_model.encoder.forward(x=data,
chi_ambient=chi_ambient)
z_map = enc['z']['loc']
chi_map = self.vi_model.decoder.forward(z_map)
phi_loc = pyro.param('phi_loc')
phi_scale = pyro.param('phi_scale')
phi_conc = phi_loc.pow(2) / phi_scale.pow(2)
phi_rate = phi_loc / phi_scale.pow(2)
alpha_map = 1. / dist.Gamma(phi_conc, phi_rate).mean
y = (enc['p_y'] > 0).float()
d_empty = dist.LogNormal(loc=pyro.param('d_empty_loc'),
scale=pyro.param('d_empty_scale')).mean
d_cell = dist.LogNormal(loc=enc['d_loc'],
scale=pyro.param('d_cell_scale')).mean
epsilon = dist.Gamma(enc['epsilon'] * self.vi_model.epsilon_prior,
self.vi_model.epsilon_prior).mean
if self.vi_model.include_rho:
rho = pyro.param("rho_alpha") / (pyro.param("rho_alpha")
+ pyro.param("rho_beta"))
else:
rho = None
# Calculate MAP estimates of mu and lambda.
mu_map = self.vi_model.calculate_mu(epsilon=epsilon,
d_cell=d_cell,
chi=chi_map,
y=y,
rho=rho)
lambda_map = self.vi_model.calculate_lambda(epsilon=epsilon,
chi_ambient=chi_ambient,
d_empty=d_empty,
y=y,
d_cell=d_cell,
rho=rho,
chi_bar=self.vi_model.avg_gene_expression)
return {'mu': mu_map, 'lam': lambda_map, 'alpha': alpha_map}
def dense_to_sparse(self,
chunk_dense_counts: np.ndarray) -> Tuple[List, List, List]:
"""Distill a batch of dense counts into sparse format.
Barcode numbering is relative to the tensor passed in.
"""
# TODO: speed up by keeping it a torch tensor as long as possible
if chunk_dense_counts.dtype != np.int32:
if self.dtype == np.uint32:
# Turn the floating point count estimates into integers.
decimal_values, _ = np.modf(chunk_dense_counts) # Stuff after decimal.
roundoff_counts = np.random.binomial(1, p=decimal_values) # Bernoulli.
chunk_dense_counts = np.floor(chunk_dense_counts).astype(dtype=int)
chunk_dense_counts += roundoff_counts
elif self.dtype == np.float32:
# Truncate counts at a threshold value.
chunk_dense_counts = (chunk_dense_counts *
(chunk_dense_counts > self.float_threshold))
else:
raise NotImplementedError(f"Count matrix dtype {self.dtype} is not "
f"supported. Choose from [np.uint32, "
f"np.float32]")
# Find all the nonzero counts in this dense matrix chunk.
nonzero_barcode_inds_this_chunk, nonzero_genes_trimmed = \
np.nonzero(chunk_dense_counts)
nonzero_counts = \
chunk_dense_counts[nonzero_barcode_inds_this_chunk,
nonzero_genes_trimmed].flatten(order='C')
# Get the original gene index from gene index in the trimmed dataset.
nonzero_genes = self.analyzed_gene_inds[nonzero_genes_trimmed]
return nonzero_barcode_inds_this_chunk, nonzero_genes, nonzero_counts
class ImputedPosterior(Posterior):
"""Posterior count inference using imputation to infer cell mean (d * chi).
Args:
dataset_obj: Dataset object.
vi_model: Trained RemoveBackgroundPyroModel.
guide: Variational posterior pyro guide function, optional. Only
specify if the required guide function is not vi_model.guide.
encoder: Encoder that provides encodings of data.
counts_dtype: Data type of posterior count matrix. Can be one of
[np.uint32, np.float]
float_threshold: For floating point count matrices, counts below
this threshold will be set to zero, for the purposes of constructing
a sparse matrix. Unused if counts_dtype is np.uint32
Properties:
mean: Posterior count mean, as a sparse matrix.
encodings: Encoded latent variables, one per barcode in the dataset.
"""
def __init__(self,
dataset_obj: 'SingleCellRNACountsDataset', # Dataset
vi_model: 'RemoveBackgroundPyroModel', # Trained variational inference model
guide=None,
encoder=None, #: Union[CompositeEncoder, None] = None,
counts_dtype: np.dtype = np.uint32,
float_threshold: float = 0.5):
self.vi_model = vi_model
self.use_cuda = vi_model.use_cuda
self.guide = guide if guide is not None else vi_model.encoder
self.encoder = encoder if encoder is not None else vi_model.encoder
self._encodings = None
self._mean = None
super(ImputedPosterior, self).__init__(dataset_obj=dataset_obj,
vi_model=vi_model,
counts_dtype=counts_dtype,
float_threshold=float_threshold)
@torch.no_grad()
def _get_mean(self):
"""Send dataset through a guide that returns mean posterior counts.
Keep track of only what is necessary to distill a sparse count matrix.
"""
data_loader = self.dataset_obj.get_dataloader(use_cuda=self.use_cuda,
analyzed_bcs_only=False,
batch_size=500,
shuffle=False)
barcodes = []
genes = []
counts = []
ind = 0
for data in data_loader:
# Get return values from guide.
dense_counts_torch = self._param_map_estimates(data=data,
chi_ambient=pyro.param("chi_ambient"))
dense_counts = dense_counts_torch.detach().cpu().numpy()
bcs_i_chunk, genes_i, counts_i = self.dense_to_sparse(dense_counts)
# Translate chunk barcode inds to overall inds.
bcs_i = self.barcode_inds[bcs_i_chunk + ind]
# Add sparse matrix values to lists.
barcodes.append(bcs_i)
genes.append(genes_i)
counts.append(counts_i)
# Increment barcode index counter.
ind += data.shape[0] # Same as data_loader.batch_size
# Convert the lists to numpy arrays.
counts = np.array(np.concatenate(tuple(counts)), dtype=self.dtype)
barcodes = np.array(np.concatenate(tuple(barcodes)), dtype=np.uint32)
genes = np.array(np.concatenate(tuple(genes)), dtype=np.uint32)
# Put the counts into a sparse csc_matrix.
self._mean = sp.csc_matrix((counts, (barcodes, genes)),
shape=self.count_matrix_shape)
class ProbPosterior(Posterior):
"""Posterior count inference using a noise count probability distribution.
Args:
dataset_obj: Dataset object.
vi_model: Trained model: RemoveBackgroundPyroModel
fpr: Desired false positive rate for construction of the final regularized
posterior on true counts. False positives are true counts that are
(incorrectly) removed from the dataset.
float_threshold: For floating point count matrices, counts below
this threshold will be set to zero, for the purposes of constructing
a sparse matrix. Unused if counts_dtype is np.uint32
Properties:
mean: Posterior count mean, as a sparse matrix.
encodings: Encoded latent variables, one per barcode in the dataset.
"""
def __init__(self,
dataset_obj: 'SingleCellRNACountsDataset',
vi_model: 'RemoveBackgroundPyroModel',
fpr: float = 0.01,
float_threshold: float = 0.5):
self.vi_model = vi_model
self.use_cuda = vi_model.use_cuda
self.fpr = fpr
self.lambda_multiplier = None
self._encodings = None
self._mean = None
self.random = np.random.RandomState(seed=1234)
super(ProbPosterior, self).__init__(dataset_obj=dataset_obj,
vi_model=vi_model,
counts_dtype=np.uint32,
float_threshold=float_threshold)
@torch.no_grad()
def _get_mean(self):
"""Send dataset through a guide that returns mean posterior counts.
Keep track of only what is necessary to distill a sparse count matrix.
"""
# Get a dataset of ten cells.
cell_inds =
|
np.where(self.latents['p'] > 0.9)
|
numpy.where
|
''' Testing trackvis module '''
from __future__ import division, print_function, absolute_import
from functools import partial
import numpy as np
from io import BytesIO
from .. import trackvis as tv
from ..orientations import aff2axcodes
from ..volumeutils import native_code, swapped_code
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
from ..testing import error_warnings, suppress_warnings
def test_write():
streams = []
out_f = BytesIO()
tv.write(out_f, [], {})
assert_equal(out_f.getvalue(), tv.empty_header().tostring())
out_f.truncate(0)
out_f.seek(0)
# Write something not-default
tv.write(out_f, [], {'id_string': 'TRACKb'})
# read it back
out_f.seek(0)
streams, hdr = tv.read(out_f)
assert_equal(hdr['id_string'], b'TRACKb')
# check that we can pass none for the header
out_f.truncate(0)
out_f.seek(0)
tv.write(out_f, [])
out_f.truncate(0)
out_f.seek(0)
tv.write(out_f, [], None)
# check that we check input values
out_f.truncate(0)
out_f.seek(0)
assert_raises(tv.HeaderError,
tv.write, out_f, [], {'id_string': 'not OK'})
assert_raises(tv.HeaderError,
tv.write, out_f, [], {'version': 3})
assert_raises(tv.HeaderError,
tv.write, out_f, [], {'hdr_size': 0})
def test_write_scalars_props():
# Test writing of scalar array with streamlines
N = 6
M = 2
P = 4
points = np.arange(N * 3).reshape((N, 3))
scalars = np.arange(N * M).reshape((N, M)) + 100
props = np.arange(P) + 1000
# If scalars not same size for each point, error
out_f = BytesIO()
streams = [(points, None, None),
(points, scalars, None)]
assert_raises(tv.DataError, tv.write, out_f, streams)
out_f.seek(0)
streams = [(points, np.zeros((N, M + 1)), None),
(points, scalars, None)]
assert_raises(tv.DataError, tv.write, out_f, streams)
# Or if scalars different N compared to points
bad_scalars = np.zeros((N + 1, M))
out_f.seek(0)
streams = [(points, bad_scalars, None),
(points, bad_scalars, None)]
assert_raises(tv.DataError, tv.write, out_f, streams)
# Similarly properties must have the same length for each streamline
out_f.seek(0)
streams = [(points, scalars, None),
(points, scalars, props)]
assert_raises(tv.DataError, tv.write, out_f, streams)
out_f.seek(0)
streams = [(points, scalars, np.zeros((P + 1,))),
(points, scalars, props)]
assert_raises(tv.DataError, tv.write, out_f, streams)
# If all is OK, then we get back what we put in
out_f.seek(0)
streams = [(points, scalars, props),
(points, scalars, props)]
tv.write(out_f, streams)
out_f.seek(0)
back_streams, hdr = tv.read(out_f)
for actual, expected in zip(streams, back_streams):
for a_el, e_el in zip(actual, expected):
assert_array_equal(a_el, e_el)
# Also so if the datatype of points, scalars is already float32 (github
# issue #53)
out_f.seek(0)
streams = [(points.astype('f4'),
scalars.astype('f4'),
props.astype('f4'))]
tv.write(out_f, streams)
out_f.seek(0)
back_streams, hdr = tv.read(out_f)
for actual, expected in zip(streams, back_streams):
for a_el, e_el in zip(actual, expected):
assert_array_almost_equal(a_el, e_el)
def streams_equal(stream1, stream2):
if not np.all(stream1[0] == stream2[0]):
return False
if stream1[1] is None:
if not stream2[1] is None:
return False
if stream1[2] is None:
if not stream2[2] is None:
return False
if not np.all(stream1[1] == stream2[1]):
return False
if not np.all(stream1[2] == stream2[2]):
return False
return True
def streamlist_equal(streamlist1, streamlist2):
if len(streamlist1) != len(streamlist2):
return False
for s1, s2 in zip(streamlist1, streamlist2):
if not streams_equal(s1, s2):
return False
return True
def test_round_trip():
out_f = BytesIO()
xyz0 = np.tile(np.arange(5).reshape(5, 1), (1, 3))
xyz1 = np.tile(np.arange(5).reshape(5, 1) + 10, (1, 3))
streams = [(xyz0, None, None), (xyz1, None, None)]
tv.write(out_f, streams, {})
out_f.seek(0)
streams2, hdr = tv.read(out_f)
assert_true(streamlist_equal(streams, streams2))
# test that we can write in different endianness and get back same result,
# for versions 1, 2 and not-specified
for in_dict, back_version in (({}, 2),
({'version': 2}, 2),
({'version': 1}, 1)):
for endian_code in (native_code, swapped_code):
out_f.seek(0)
tv.write(out_f, streams, in_dict, endian_code)
out_f.seek(0)
streams2, hdr = tv.read(out_f)
assert_true(streamlist_equal(streams, streams2))
assert_equal(hdr['version'], back_version)
# test that we can get out and pass in generators
out_f.seek(0)
streams3, hdr = tv.read(out_f, as_generator=True)
# check this is a generator rather than a list
assert_true(hasattr(streams3, 'send'))
# but that it results in the same output
assert_true(streamlist_equal(streams, list(streams3)))
# write back in
out_f.seek(0)
streams3, hdr = tv.read(out_f, as_generator=True)
# Now we need a new file object, because we're still using the old one for
# our generator
out_f_write = BytesIO()
tv.write(out_f_write, streams3, {})
# and re-read just to check
out_f_write.seek(0)
streams2, hdr = tv.read(out_f_write)
assert_true(streamlist_equal(streams, streams2))
def test_points_processing():
# We may need to process points if they are in voxel or mm format
out_f = BytesIO()
def _rt(streams, hdr, points_space):
# run round trip through IO object
out_f.seek(0)
tv.write(out_f, streams, hdr, points_space=points_space)
out_f.seek(0)
res0 = tv.read(out_f)
out_f.seek(0)
return res0, tv.read(out_f, points_space=points_space)
n_pts = 5
ijk0 = np.arange(n_pts * 3).reshape((n_pts, 3)) / 2.0
ijk1 = ijk0 + 20
# Check with and without some scalars
for scalars in ((None, None),
(np.arange(n_pts)[:, None],
np.arange(n_pts)[:, None] + 99)):
vx_streams = [(ijk0, scalars[0], None), (ijk1, scalars[1], None)]
vxmm_streams = [(ijk0 * [[2, 3, 4]], scalars[0], None),
(ijk1 * [[2, 3, 4]], scalars[1], None)]
# voxmm is the default. In this case we don't do anything to the
# points, and we let the header pass through without further checks
(raw_streams, hdr), (proc_streams, _) = _rt(vxmm_streams, {}, None)
assert_true(streamlist_equal(raw_streams, proc_streams))
assert_true(streamlist_equal(vxmm_streams, proc_streams))
(raw_streams, hdr), (proc_streams, _) = _rt(vxmm_streams, {}, 'voxmm')
assert_true(streamlist_equal(raw_streams, proc_streams))
assert_true(streamlist_equal(vxmm_streams, proc_streams))
# with 'voxels' as input, check for not all voxel_size == 0, warn if any
# voxel_size == 0
for hdr in ( # these cause read / write errors
# empty header has 0 voxel sizes
{},
{'voxel_size': [0, 0, 0]}, # the default
{'voxel_size': [-2, 3, 4]}, # negative not valid
):
# Check error on write
out_f.seek(0)
assert_raises(tv.HeaderError,
tv.write, out_f, vx_streams, hdr, None, 'voxel')
out_f.seek(0)
# bypass write error and check read
tv.write(out_f, vxmm_streams, hdr, None, points_space=None)
out_f.seek(0)
assert_raises(tv.HeaderError, tv.read, out_f, False, 'voxel')
# There's a warning for any voxel sizes == 0
hdr = {'voxel_size': [2, 3, 0]}
with error_warnings():
assert_raises(UserWarning, _rt, vx_streams, hdr, 'voxel')
# This should be OK
hdr = {'voxel_size': [2, 3, 4]}
(raw_streams, hdr), (proc_streams, _) = _rt(vx_streams, hdr, 'voxel')
assert_true(streamlist_equal(vxmm_streams, raw_streams))
assert_true(streamlist_equal(vx_streams, proc_streams))
# Now we try with rasmm points. In this case we need valid voxel_size,
# and voxel_order, and vox_to_ras. The voxel_order has to match the
# vox_to_ras, and so do the voxel sizes
aff = np.diag([2, 3, 4, 1])
# In this case the trk -> vx and vx -> mm invert each other
rasmm_streams = vxmm_streams
for hdr in ( # all these cause read and write errors for rasmm
# Empty header has no valid affine
{},
# Error if ras_to_mm not defined (as in version 1)
{'voxel_size': [2, 3, 4], 'voxel_order': 'RAS', 'version': 1},
# or it's all zero
{'voxel_size': [2, 3, 4], 'voxel_order': 'RAS',
'vox_to_ras': np.zeros((4, 4))},
# as it is by default
{'voxel_size': [2, 3, 4], 'voxel_order': 'RAS'},
# or the voxel_size doesn't match the affine
{'voxel_size': [2, 2, 4], 'voxel_order': 'RAS',
'vox_to_ras': aff},
# or the voxel_order doesn't match the affine
{'voxel_size': [2, 3, 4], 'voxel_order': 'LAS',
'vox_to_ras': aff},
):
# Check error on write
out_f.seek(0)
assert_raises(tv.HeaderError,
tv.write, out_f, rasmm_streams, hdr, None, 'rasmm')
out_f.seek(0)
# bypass write error and check read
tv.write(out_f, vxmm_streams, hdr, None, points_space=None)
out_f.seek(0)
assert_raises(tv.HeaderError, tv.read, out_f, False, 'rasmm')
# This should be OK
hdr = {'voxel_size': [2, 3, 4], 'voxel_order': 'RAS',
'vox_to_ras': aff}
(raw_streams, hdr), (proc_streams, _) = _rt(rasmm_streams, hdr, 'rasmm')
assert_true(streamlist_equal(vxmm_streams, raw_streams))
assert_true(streamlist_equal(rasmm_streams, proc_streams))
# More complex test to check matrix orientation
fancy_affine = np.array([[0., -2, 0, 10],
[3, 0, 0, 20],
[0, 0, 4, 30],
[0, 0, 0, 1]])
hdr = {'voxel_size': [3, 2, 4], 'voxel_order': 'ALS',
'vox_to_ras': fancy_affine}
def f(pts): # from vx to mm
pts = pts[:, [1, 0, 2]] * [[-2, 3, 4]] # apply zooms / reorder
return pts + [[10, 20, 30]] # apply translations
xyz0, xyz1 = f(ijk0), f(ijk1)
fancy_rasmm_streams = [(xyz0, scalars[0], None),
(xyz1, scalars[1], None)]
fancy_vxmm_streams = [(ijk0 * [[3, 2, 4]], scalars[0], None),
(ijk1 * [[3, 2, 4]], scalars[1], None)]
(raw_streams, hdr), (proc_streams, _) = _rt(
fancy_rasmm_streams, hdr, 'rasmm')
assert_true(streamlist_equal(fancy_vxmm_streams, raw_streams))
assert_true(streamlist_equal(fancy_rasmm_streams, proc_streams))
def test__check_hdr_points_space():
# Test checking routine for points_space input given header
# None or voxmm -> no checks, pass through
assert_equal(tv._check_hdr_points_space({}, None), None)
assert_equal(tv._check_hdr_points_space({}, 'voxmm'), None)
# strange value for points_space -> ValueError
assert_raises(ValueError,
tv._check_hdr_points_space, {}, 'crazy')
# Input not in (None, 'voxmm', 'voxels', 'rasmm') - error
# voxels means check voxel sizes present and not all 0.
hdr = tv.empty_header()
assert_array_equal(hdr['voxel_size'], [0, 0, 0])
assert_raises(tv.HeaderError,
tv._check_hdr_points_space, hdr, 'voxel')
# Negative voxel size gives error - because it is not what trackvis does,
# and this not what we mean by 'voxmm'
hdr['voxel_size'] = [-2, 3, 4]
assert_raises(tv.HeaderError,
tv._check_hdr_points_space, hdr, 'voxel')
# Warning here only
hdr['voxel_size'] = [2, 3, 0]
with error_warnings():
assert_raises(UserWarning,
tv._check_hdr_points_space, hdr, 'voxel')
# This is OK
hdr['voxel_size'] = [2, 3, 4]
assert_equal(tv._check_hdr_points_space(hdr, 'voxel'), None)
# rasmm - check there is an affine, that it matches voxel_size and
# voxel_order
# no affine
hdr['voxel_size'] = [2, 3, 4]
assert_raises(tv.HeaderError,
tv._check_hdr_points_space, hdr, 'rasmm')
# still no affine
hdr['voxel_order'] = 'RAS'
assert_raises(tv.HeaderError,
tv._check_hdr_points_space, hdr, 'rasmm')
# nearly an affine, but 0 at position 3,3 - means not recorded in trackvis
# standard
hdr['vox_to_ras'] = np.diag([2, 3, 4, 0])
assert_raises(tv.HeaderError,
tv._check_hdr_points_space, hdr, 'rasmm')
# This affine doesn't match RAS voxel order
hdr['vox_to_ras'] = np.diag([-2, 3, 4, 1])
assert_raises(tv.HeaderError,
tv._check_hdr_points_space, hdr, 'rasmm')
# This affine doesn't match the voxel size
hdr['vox_to_ras'] = np.diag([3, 3, 4, 1])
assert_raises(tv.HeaderError,
tv._check_hdr_points_space, hdr, 'rasmm')
# This should be OK
good_aff = np.diag([2, 3, 4, 1])
hdr['vox_to_ras'] = good_aff
assert_equal(tv._check_hdr_points_space(hdr, 'rasmm'),
None)
# Default voxel order of LPS assumed
hdr['voxel_order'] = ''
# now the RAS affine raises an error
assert_raises(tv.HeaderError,
tv._check_hdr_points_space, hdr, 'rasmm')
# this affine does have LPS voxel order
good_lps = np.dot(np.diag([-1, -1, 1, 1]), good_aff)
hdr['vox_to_ras'] = good_lps
assert_equal(tv._check_hdr_points_space(hdr, 'rasmm'),
None)
def test_empty_header():
for endian in '<>':
for version in (1, 2):
hdr = tv.empty_header(endian, version)
assert_equal(hdr['id_string'], b'TRACK')
assert_equal(hdr['version'], version)
assert_equal(hdr['hdr_size'], 1000)
assert_array_equal(
hdr['image_orientation_patient'],
[0, 0, 0, 0, 0, 0])
hdr = tv.empty_header(version=2)
assert_array_equal(hdr['vox_to_ras'], np.zeros((4, 4)))
hdr_endian = tv.endian_codes[tv.empty_header().dtype.byteorder]
assert_equal(hdr_endian, tv.native_code)
def test_get_affine():
# Test get affine behavior, including pending deprecation
hdr = tv.empty_header()
# Using version 1 affine is not a good idea because is fragile and not
# very useful. The default atleast_v2=None mode raises a FutureWarning
with error_warnings():
assert_raises(FutureWarning, tv.aff_from_hdr, hdr)
# testing the old behavior
old_afh = partial(tv.aff_from_hdr, atleast_v2=False)
# default header gives useless affine
assert_array_equal(old_afh(hdr),
np.diag([0, 0, 0, 1]))
hdr['voxel_size'] = 1
assert_array_equal(old_afh(hdr),
np.diag([0, 0, 0, 1]))
# DICOM direction cosines
hdr['image_orientation_patient'] = [1, 0, 0, 0, 1, 0]
assert_array_equal(old_afh(hdr),
np.diag([-1, -1, 1, 1]))
# RAS direction cosines
hdr['image_orientation_patient'] = [-1, 0, 0, 0, -1, 0]
assert_array_equal(old_afh(hdr),
np.eye(4))
# translations
hdr['origin'] = [1, 2, 3]
exp_aff = np.eye(4)
exp_aff[:3, 3] = [-1, -2, 3]
assert_array_equal(old_afh(hdr),
exp_aff)
# check against voxel order. This one works
hdr['voxel_order'] = ''.join(aff2axcodes(exp_aff))
assert_equal(hdr['voxel_order'], b'RAS')
assert_array_equal(old_afh(hdr), exp_aff)
# This one doesn't
hdr['voxel_order'] = 'LAS'
assert_raises(tv.HeaderError, old_afh, hdr)
# This one does work because the routine allows the final dimension to
# be flipped to try and match the voxel order
hdr['voxel_order'] = 'RAI'
exp_aff = exp_aff * [[1, 1, -1, 1]]
assert_array_equal(old_afh(hdr), exp_aff)
# Check round trip case for flipped and unflipped, when positive voxels
# only allowed. This checks that the flipping heuristic works.
flipped_aff = exp_aff
unflipped_aff = exp_aff * [1, 1, -1, 1]
for in_aff, o_codes in ((unflipped_aff, b'RAS'),
(flipped_aff, b'RAI')):
hdr = tv.empty_header()
tv.aff_to_hdr(in_aff, hdr, pos_vox=True, set_order=True)
# Unset easier option
hdr['vox_to_ras'] = 0
assert_equal(hdr['voxel_order'], o_codes)
# Check it came back the way we wanted
assert_array_equal(old_afh(hdr), in_aff)
# Check that the default case matches atleast_v2=False case
with suppress_warnings():
assert_array_equal(tv.aff_from_hdr(hdr), flipped_aff)
# now use the easier vox_to_ras field
hdr = tv.empty_header()
aff = np.eye(4)
aff[:3, :] = np.arange(12).reshape(3, 4)
hdr['vox_to_ras'] = aff
# Pass v2 flag explicitly to avoid warnings
assert_array_equal(tv.aff_from_hdr(hdr, atleast_v2=False), aff)
# mappings work too
d = {'version': 1,
'voxel_size': np.array([1, 2, 3]),
'image_orientation_patient': np.array([1, 0, 0, 0, 1, 0]),
'origin': np.array([10, 11, 12])}
aff = tv.aff_from_hdr(d, atleast_v2=False)
def test_aff_to_hdr():
# The behavior is changing soon, change signaled by FutureWarnings
# This is the call to get the old behavior
old_a2h = partial(tv.aff_to_hdr, pos_vox=False, set_order=False)
hdr = {'version': 1}
affine = np.diag([1, 2, 3, 1])
affine[:3, 3] = [10, 11, 12]
old_a2h(affine, hdr)
assert_array_almost_equal(tv.aff_from_hdr(hdr, atleast_v2=False), affine)
# put flip into affine
aff2 = affine.copy()
aff2[:, 2] *= -1
old_a2h(aff2, hdr)
# Historically we flip the first axis if there is a negative determinant
assert_array_almost_equal(hdr['voxel_size'], [-1, 2, 3])
assert_array_almost_equal(tv.aff_from_hdr(hdr, atleast_v2=False), aff2)
# Test that default mode raises DeprecationWarning
with error_warnings():
assert_raises(FutureWarning, tv.aff_to_hdr, affine, hdr)
assert_raises(FutureWarning, tv.aff_to_hdr, affine, hdr, None, None)
assert_raises(FutureWarning, tv.aff_to_hdr, affine, hdr, False, None)
assert_raises(FutureWarning, tv.aff_to_hdr, affine, hdr, None, False)
# And has same effect as above
with suppress_warnings():
tv.aff_to_hdr(affine, hdr)
assert_array_almost_equal(tv.aff_from_hdr(hdr, atleast_v2=False), affine)
# Check pos_vox and order flags
for hdr in ({}, {'version': 2}, {'version': 1}):
tv.aff_to_hdr(aff2, hdr, pos_vox=True, set_order=False)
|
assert_array_equal(hdr['voxel_size'], [1, 2, 3])
|
numpy.testing.assert_array_equal
|
import csv
from collections import defaultdict
import json
import logging
import math
import os
import time
import ctk
import numpy as np
import qt
import scipy.spatial
import vtk
import slicer
from slicer.ScriptedLoadableModule import *
from slicer.util import NodeModify
# needed for topological sort. Yes, this is basically just DFS.
try:
import networkx as nx
except ModuleNotFoundError as e:
# This requires a network connection!
slicer.util.pip_install('networkx')
import networkx as nx
#
# CalculateDisplacement
#
class Q3DC(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
parent.title = "Q3DC "
parent.categories = ["Quantification"]
parent.dependencies = []
parent.contributors = [
'<NAME> (University of Michigan)',
'<NAME> (University of Michigan)',
'<NAME> (Kitware Inc)',
]
parent.helpText = """
"""
parent.acknowledgementText = """
This work was supported by the National Institute of Dental
& Craniofacial Research and the National Institute of Biomedical
Imaging and Bioengineering under Award Number R01DE024450.
The content is solely the responsibility of the authors and does
not necessarily represent the official views of the National
Institutes of Health.
"""
self.parent = parent
class Q3DCWidget(ScriptedLoadableModuleWidget):
def setup(self):
logging.debug("Q3DC Widget Setup")
ScriptedLoadableModuleWidget.setup(self)
# GLOBALS:
self.interactionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLInteractionNodeSingleton")
self.computedAnglesList = list()
self.renderer1 = None
self.actor1 = None
self.renderer2 = None
self.actor2 = None
self.renderer3 = None
self.actor3 = None
# Load widget from .ui file (created by Qt Designer)
uiWidget = slicer.util.loadUI(self.resourcePath('UI/Q3DC.ui'))
self.layout.addWidget(uiWidget)
self.ui = slicer.util.childWidgetVariables(uiWidget)
self.logic = Q3DCLogic(self.ui)
self.logic.UpdateInterface = self.UpdateInterface
# -------------------------- Scene ---------------------------
self.SceneCollapsibleButton = self.ui.SceneCollapsibleButton # this attribute is usefull for Longitudinal quantification extension
treeView = self.ui.treeView
treeView.setMRMLScene(slicer.app.mrmlScene())
treeView.sceneModel().setHorizontalHeaderLabels(["Models"])
treeView.sortFilterProxyModel().nodeTypes = ['vtkMRMLModelNode','vtkMRMLMarkupsFiducialNode']
treeView.header().setVisible(False)
# ------------------ Landmark Modification -------------------
self.inputModelLabel = self.ui.inputModelLabel # this attribute is usefull for Longitudinal quantification extension
self.inputLandmarksLabel = self.ui.inputLandmarksLabel # this attribute is usefull for Longitudinal quantification extension
self.ui.inputModelSelector.setMRMLScene(slicer.mrmlScene)
self.ui.inputModelSelector.connect('currentNodeChanged(vtkMRMLNode*)', self.onModelChanged)
self.ui.addLandmarkButton.connect('clicked()', self.onAddLandmarkButtonClicked)
self.ui.inputLandmarksSelector.setMRMLScene(slicer.mrmlScene)
self.ui.inputLandmarksSelector.setEnabled(False) # The "enable" property seems to not be imported from the .ui
self.ui.inputLandmarksSelector.connect('currentNodeChanged(vtkMRMLNode*)', self.onLandmarksChanged)
self.ui.landmarkComboBox.connect('currentIndexChanged(QString)', self.UpdateInterface)
self.ui.surfaceDeplacementCheckBox.connect('stateChanged(int)', self.onSurfaceDeplacementStateChanged)
self.ui.loadLandmarksOnSurfaceCheckBox.connect('stateChanged(int)', self.onLoadLandmarksOnSurfaceStateChanged)
# --------------------- Anatomical Legend --------------------
self.suggested_landmarks = self.logic.load_suggested_landmarks(
self.resourcePath('Data/base_fiducial_legend.csv'))
self.anatomical_legend_space = self.ui.landmarkModifLayout
self.anatomical_radio_buttons_layout = qt.QHBoxLayout()
self.anatomical_legend_space.addLayout(self.anatomical_radio_buttons_layout)
self.anatomical_legend = None
self.init_anatomical_legend()
self.anatomical_legend_view = slicer.qMRMLTableView()
self.anatomical_legend_view.setMRMLTableNode(self.anatomical_legend)
self.anatomical_legend_space.addWidget(self.anatomical_legend_view)
self.anatomical_legend_view.show()
self.anatomical_legend_view.setSelectionBehavior(
qt.QAbstractItemView.SelectRows
)
self.anatomical_legend_view.connect('selectionChanged()', self.on_legend_row_selected)
self.init_anatomical_radio_buttons()
self.ui.legendFileButton.connect('clicked()', self.on_select_legend_file_clicked)
# -------------------- Compute Mid Point ---------------------
self.ui.landmarkComboBox1.connect('currentIndexChanged(int)', self.UpdateInterface)
self.ui.landmarkComboBox2.connect('currentIndexChanged(int)', self.UpdateInterface)
self.ui.defineMiddlePointButton.connect('clicked()', self.onDefineMidPointClicked)
# ------------------- Calculate Distances --------------------
self.ui.fidListComboBoxA.setMRMLScene(slicer.mrmlScene)
self.ui.fidListComboBoxB.setMRMLScene(slicer.mrmlScene)
self.ui.computeDistancesPushButton.connect('clicked()', self.onComputeDistanceClicked)
self.ui.landmarkComboBoxA.connect('currentIndexChanged(int)', self.UpdateInterface)
self.ui.landmarkComboBoxB.connect('currentIndexChanged(int)', self.UpdateInterface)
self.ui.fidListComboBoxA.connect('currentNodeChanged(vtkMRMLNode*)',
lambda: self.logic.UpdateLandmarkComboboxA(self.ui.fidListComboBoxA, self.ui.landmarkComboBoxA))
self.ui.fidListComboBoxB.connect('currentNodeChanged(vtkMRMLNode*)',
lambda: self.logic.UpdateLandmarkComboboxA(self.ui.fidListComboBoxB, self.ui.landmarkComboBoxB))
# ---------------------- Save Distances ----------------------
self.distance_table = self.logic.createDistanceTable()
slicer.mrmlScene.AddNode(self.distance_table)
self.distance_table_view = slicer.qMRMLTableView()
self.distance_table_view.setMRMLTableNode(self.distance_table)
self.directoryExportDistance = ctk.ctkDirectoryButton()
self.filenameExportDistance = qt.QLineEdit('distance.csv')
self.exportDistanceButton = qt.QPushButton(" Export ")
self.exportDistanceButton.connect('clicked()', self.onExportButton)
self.exportDistanceButton.enabled = True
self.deleteDistanceRowButton = qt.QPushButton("Delete Selected Row")
self.deleteDistanceRowButton.connect('clicked()', self.distance_table_view.deleteRow)
self.pathExportDistanceLayout = qt.QVBoxLayout()
self.pathExportDistanceLayout.addWidget(self.directoryExportDistance)
self.pathExportDistanceLayout.addWidget(self.filenameExportDistance)
self.exportDistanceLayout = qt.QHBoxLayout()
self.exportDistanceLayout.addLayout(self.pathExportDistanceLayout)
self.exportDistanceLayout.addWidget(self.exportDistanceButton)
self.tableAndExportLayout = qt.QVBoxLayout()
self.tableAndExportLayout.addWidget(self.distance_table_view)
self.tableAndExportLayout.addWidget(self.deleteDistanceRowButton)
self.tableAndExportLayout.addLayout(self.exportDistanceLayout)
# --------------------- Calculate Angles ---------------------
self.ui.fidListComboBoxline1LA.setMRMLScene(slicer.mrmlScene)
self.ui.fidListComboBoxline1LB.setMRMLScene(slicer.mrmlScene)
self.ui.fidListComboBoxline2LA.setMRMLScene(slicer.mrmlScene)
self.ui.fidListComboBoxline2LB.setMRMLScene(slicer.mrmlScene)
self.ui.fidListComboBoxline1LA.connect('currentNodeChanged(vtkMRMLNode*)',
lambda: self.logic.UpdateLandmarkComboboxA(self.ui.fidListComboBoxline1LA, self.ui.line1LAComboBox))
self.ui.fidListComboBoxline1LB.connect('currentNodeChanged(vtkMRMLNode*)',
lambda: self.logic.UpdateLandmarkComboboxA(self.ui.fidListComboBoxline1LB, self.ui.line1LBComboBox))
self.ui.fidListComboBoxline2LA.connect('currentNodeChanged(vtkMRMLNode*)',
lambda: self.logic.UpdateLandmarkComboboxA(self.ui.fidListComboBoxline2LA, self.ui.line2LAComboBox))
self.ui.fidListComboBoxline2LB.connect('currentNodeChanged(vtkMRMLNode*)',
lambda: self.logic.UpdateLandmarkComboboxA(self.ui.fidListComboBoxline2LB, self.ui.line2LBComboBox))
self.ui.computeAnglesPushButton.connect('clicked()', self.onComputeAnglesClicked)
self.ui.line1LAComboBox.connect('currentIndexChanged(int)', self.UpdateInterface)
self.ui.line1LBComboBox.connect('currentIndexChanged(int)', self.UpdateInterface)
self.ui.line2LAComboBox.connect('currentIndexChanged(int)', self.UpdateInterface)
self.ui.line2LBComboBox.connect('currentIndexChanged(int)', self.UpdateInterface)
self.ui.pitchCheckBox.connect('clicked(bool)', self.UpdateInterface)
self.ui.rollCheckBox.connect('clicked(bool)', self.UpdateInterface)
self.ui.yawCheckBox.connect('clicked(bool)', self.UpdateInterface)
# ----------------------- Save Angles ------------------------
self.angles_table = self.logic.createAnglesTable()
slicer.mrmlScene.AddNode(self.angles_table)
self.angles_table_view = slicer.qMRMLTableView()
self.angles_table_view.setMRMLTableNode(self.angles_table)
self.directoryExportAngle = ctk.ctkDirectoryButton()
self.filenameExportAngle = qt.QLineEdit('angle.csv')
self.exportAngleButton = qt.QPushButton("Export")
self.exportAngleButton.connect('clicked()', self.onExportAngleButton)
self.exportAngleButton.enabled = True
self.deleteAngleRowButton = qt.QPushButton("Delete Selected Row")
self.deleteAngleRowButton.connect('clicked()', self.angles_table_view.deleteRow)
self.pathExportAngleLayout = qt.QVBoxLayout()
self.pathExportAngleLayout.addWidget(self.directoryExportAngle)
self.pathExportAngleLayout.addWidget(self.filenameExportAngle)
self.exportAngleLayout = qt.QHBoxLayout()
self.exportAngleLayout.addLayout(self.pathExportAngleLayout)
self.exportAngleLayout.addWidget(self.exportAngleButton)
self.tableAndExportAngleLayout = qt.QVBoxLayout()
self.tableAndExportAngleLayout.addWidget(self.angles_table_view)
self.tableAndExportAngleLayout.addWidget(self.deleteAngleRowButton)
self.tableAndExportAngleLayout.addLayout(self.exportAngleLayout)
# -------------- Calculate Line-Point Distances --------------
self.ui.fidListComboBoxlineLA.setMRMLScene(slicer.mrmlScene)
self.ui.fidListComboBoxlineLB.setMRMLScene(slicer.mrmlScene)
self.ui.fidListComboBoxlinePoint.setMRMLScene(slicer.mrmlScene)
self.ui.fidListComboBoxlineLA.connect('currentNodeChanged(vtkMRMLNode*)',
lambda: self.logic.UpdateLandmarkComboboxA(self.ui.fidListComboBoxlineLA, self.ui.lineLAComboBox))
self.ui.fidListComboBoxlineLB.connect('currentNodeChanged(vtkMRMLNode*)',
lambda: self.logic.UpdateLandmarkComboboxA(self.ui.fidListComboBoxlineLB, self.ui.lineLBComboBox))
self.ui.fidListComboBoxlinePoint.connect('currentNodeChanged(vtkMRMLNode*)',
lambda: self.logic.UpdateLandmarkComboboxA(self.ui.fidListComboBoxlinePoint, self.ui.linePointComboBox))
self.ui.computeLinePointPushButton.connect('clicked()', self.onComputeLinePointClicked)
self.ui.lineLAComboBox.connect('currentIndexChanged(int)', self.UpdateInterface)
self.ui.lineLBComboBox.connect('currentIndexChanged(int)', self.UpdateInterface)
# ---------------- Save Line-Point Distances -----------------
self.line_point_table = self.logic.createLinePointTable()
slicer.mrmlScene.AddNode(self.line_point_table)
self.line_point_table_view = slicer.qMRMLTableView()
self.line_point_table_view.setMRMLTableNode(self.line_point_table)
self.directoryExportLinePoint = ctk.ctkDirectoryButton()
self.filenameExportLinePoint = qt.QLineEdit('linePoint.csv')
self.exportLinePointButton = qt.QPushButton("Export")
self.exportLinePointButton.connect('clicked()', self.onExportLinePointButton)
self.exportLinePointButton.enabled = True
self.deleteLinePointRowButton = qt.QPushButton("Delete Selected Row")
self.deleteLinePointRowButton.connect('clicked()', self.line_point_table_view.deleteRow)
self.pathExportLinePointLayout = qt.QVBoxLayout()
self.pathExportLinePointLayout.addWidget(self.directoryExportLinePoint)
self.pathExportLinePointLayout.addWidget(self.filenameExportLinePoint)
self.exportLinePointLayout = qt.QHBoxLayout()
self.exportLinePointLayout.addLayout(self.pathExportLinePointLayout)
self.exportLinePointLayout.addWidget(self.exportLinePointButton)
self.tableAndExportLinePointLayout = qt.QVBoxLayout()
self.tableAndExportLinePointLayout.addWidget(self.line_point_table_view)
self.tableAndExportLinePointLayout.addWidget(self.deleteLinePointRowButton)
self.tableAndExportLinePointLayout.addLayout(self.exportLinePointLayout)
# INITIALISATION:
slicer.mrmlScene.AddObserver(slicer.mrmlScene.EndCloseEvent, self.onCloseScene)
self.UpdateInterface()
self.logic.initComboboxdict()
def onCloseScene(self, obj, event):
list = slicer.mrmlScene.GetNodesByClass("vtkMRMLModelNode")
end = list.GetNumberOfItems()
for i in range(0,end):
model = list.GetItemAsObject(i)
hardenModel = slicer.mrmlScene.GetNodesByName(model.GetName()).GetItemAsObject(0)
slicer.mrmlScene.RemoveNode(hardenModel)
if self.renderer1 :
self.renderer1.RemoveActor(self.actor1)
if self.renderer2 :
self.renderer2.RemoveActor(self.actor2)
if self.renderer3 :
self.renderer3.RemoveActor(self.actor2)
self.ui.landmarkComboBox1.clear()
self.ui.landmarkComboBox.clear()
self.ui.fidListComboBoxA.setCurrentNode(None)
self.ui.fidListComboBoxB.setCurrentNode(None)
self.ui.fidListComboBoxline1LA.setCurrentNode(None)
self.ui.fidListComboBoxline1LB.setCurrentNode(None)
self.ui.fidListComboBoxline2LA.setCurrentNode(None)
self.ui.fidListComboBoxline2LB.setCurrentNode(None)
self.ui.line1LAComboBox.clear()
self.ui.line1LBComboBox.clear()
self.ui.line2LAComboBox.clear()
self.ui.line2LBComboBox.clear()
self.ui.landmarkComboBox2.clear()
self.ui.fidListComboBoxline2LB.setCurrentNode(None)
self.ui.inputModelSelector.setCurrentNode(None)
self.ui.inputLandmarksSelector.setCurrentNode(None)
self.distance_table.RemoveAllColumns()
self.angles_table.RemoveAllColumns()
self.line_point_table.RemoveAllColumns()
def enter(self):
logging.debug("enter Q3DC")
model = self.ui.inputModelSelector.currentNode()
fidlist = self.ui.inputLandmarksSelector.currentNode()
if fidlist:
if fidlist.GetAttribute("connectedModelID") != model.GetID():
self.ui.inputModelSelector.setCurrentNode(None)
self.ui.inputLandmarksSelector.setCurrentNode(None)
self.ui.landmarkComboBox.clear()
self.UpdateInterface()
# Checking the names of the fiducials
list = slicer.mrmlScene.GetNodesByClass("vtkMRMLMarkupsFiducialNode")
end = list.GetNumberOfItems()
for i in range(0,end):
fidList = list.GetItemAsObject(i)
landmarkDescription = self.logic.decodeJSON(fidList.GetAttribute("landmarkDescription"))
if landmarkDescription:
for n in range(fidList.GetNumberOfMarkups()):
markupID = fidList.GetNthMarkupID(n)
markupLabel = fidList.GetNthMarkupLabel(n)
landmarkDescription[markupID]["landmarkLabel"] = markupLabel
fidList.SetAttribute("landmarkDescription",self.logic.encodeJSON(landmarkDescription))
def UpdateInterface(self):
self.ui.defineMiddlePointButton.enabled = self.ui.landmarkComboBox1.currentText != '' and \
self.ui.landmarkComboBox2.currentText != '' and \
self.ui.landmarkComboBox1.currentText != self.ui.landmarkComboBox2.currentText
self.ui.computeDistancesPushButton.enabled = self.ui.landmarkComboBoxA.currentText != '' and\
self.ui.landmarkComboBoxB.currentText != '' and\
(self.ui.fidListComboBoxA.currentNodeID, self.ui.landmarkComboBoxA.currentText) != \
(self.ui.fidListComboBoxB.currentNodeID, self.ui.landmarkComboBoxB.currentText)
self.ui.computeAnglesPushButton.enabled = self.ui.line1LAComboBox.currentText != '' and\
self.ui.line1LBComboBox.currentText != '' and\
self.ui.line2LAComboBox.currentText != '' and\
self.ui.line2LBComboBox.currentText != '' and\
(self.ui.fidListComboBoxline1LA.currentNodeID, self.ui.line1LAComboBox.currentText) != \
(self.ui.fidListComboBoxline1LB.currentNodeID, self.ui.line1LBComboBox.currentText) and\
(self.ui.fidListComboBoxline2LA.currentNodeID, self.ui.line2LAComboBox.currentText) != \
(self.ui.fidListComboBoxline2LB.currentNodeID, self.ui.line2LBComboBox.currentText) and\
(self.ui.pitchCheckBox.isChecked() or
self.ui.rollCheckBox.isChecked() or
self.ui.yawCheckBox.isChecked() )
self.ui.computeLinePointPushButton.enabled = self.ui.lineLAComboBox.currentText != '' and\
self.ui.lineLBComboBox.currentText != '' and\
self.ui.linePointComboBox.currentText != '' and\
(self.ui.fidListComboBoxlineLA.currentNodeID, self.ui.lineLAComboBox.currentText) != \
(self.ui.fidListComboBoxlineLB.currentNodeID, self.ui.lineLBComboBox.currentText)
# Clear Lines:
if self.renderer1 :
self.renderer1.RemoveActor(self.actor1)
self.renderer1 = None
if self.renderer2 :
self.renderer2.RemoveActor(self.actor2)
self.renderer2 = None
if self.renderer3 :
self.renderer3.RemoveActor(self.actor3)
self.renderer3 = None
if self.ui.line1LAComboBox.currentText != '' and\
self.ui.line1LBComboBox.currentText != '' and\
self.ui.line1LAComboBox.currentText != self.ui.line1LBComboBox.currentText :
self.renderer1, self.actor1 = \
self.logic.drawLineBetween2Landmark(self.ui.line1LAComboBox.currentText,
self.ui.line1LBComboBox.currentText,
self.ui.fidListComboBoxline1LA.currentNode(),
self.ui.fidListComboBoxline1LB.currentNode())
if self.ui.line2LAComboBox.currentText != '' and\
self.ui.line2LBComboBox.currentText != '' and\
self.ui.line2LAComboBox.currentText != self.ui.line2LBComboBox.currentText :
self.renderer2, self.actor2 = \
self.logic.drawLineBetween2Landmark(self.ui.line2LAComboBox.currentText,
self.ui.line2LBComboBox.currentText,
self.ui.fidListComboBoxline2LA.currentNode(),
self.ui.fidListComboBoxline2LB.currentNode())
if self.ui.lineLAComboBox.currentText != '' and\
self.ui.lineLBComboBox.currentText != '' and\
self.ui.lineLAComboBox.currentText != self.ui.lineLBComboBox.currentText:
self.renderer3, self.actor3 = \
self.logic.drawLineBetween2Landmark(self.ui.lineLAComboBox.currentText,
self.ui.lineLBComboBox.currentText,
self.ui.fidListComboBoxlineLA.currentNode(),
self.ui.fidListComboBoxlineLB.currentNode())
self.logic.UpdateThreeDView(self.ui.landmarkComboBox.currentText)
def init_anatomical_legend(self):
if self.anatomical_legend is None:
for table_node in slicer.mrmlScene.GetNodesByClass('vtkMRMLTableNode'):
if table_node.GetAttribute('Q3DC.is_anatomical_legend') == 'True':
self.anatomical_legend = table_node
if self.anatomical_legend is None:
self.anatomical_legend = slicer.vtkMRMLTableNode()
self.anatomical_legend.SetSaveWithScene(False)
self.anatomical_legend.SetLocked(True)
slicer.mrmlScene.AddNode(self.anatomical_legend)
self.anatomical_legend.SetAttribute('Q3DC.is_anatomical_legend', 'True')
al = self.anatomical_legend
with NodeModify(al):
al.RemoveAllColumns()
al.AddColumn().SetName('Landmark')
al.AddColumn().SetName('Description')
al.SetUseColumnNameAsColumnHeader(True)
def init_anatomical_radio_buttons(self):
self.anatomical_radio_buttons = \
[qt.QRadioButton(region) for region in self.suggested_landmarks.keys()]
for i in range(self.anatomical_radio_buttons_layout.count()-1, -1, -1):
self.anatomical_radio_buttons_layout.itemAt(i).widget().setParent(None)
for radio_button in self.anatomical_radio_buttons:
self.anatomical_radio_buttons_layout.addWidget(radio_button)
radio_button.toggled.connect(
lambda state, _radio_button=radio_button:
self.on_anatomical_radio_button_toggled(state, _radio_button)
)
self.anatomical_radio_buttons[0].toggle()
def on_anatomical_radio_button_toggled(self, state, radio_button):
if state:
self.init_anatomical_legend()
region = radio_button.text
# set this in the logic rather than the widget since events are handled there.
self.logic.current_suggested_landmarks = self.suggested_landmarks[region]
al = self.anatomical_legend
with NodeModify(al):
for landmark, description in self.logic.current_suggested_landmarks:
new_row_index = al.AddEmptyRow()
al.SetCellText(new_row_index, 0, landmark)
al.SetCellText(new_row_index, 1, description)
self.anatomical_legend_view.resizeColumnsToContents()
def on_legend_row_selected(self):
# Calculate the index of the selected point.
fidList = self.logic.selectedFidList
if not fidList:
return
selectedFidReflID = self.logic.findIDFromLabel(
fidList,
self.ui.landmarkComboBox.currentText
)
if selectedFidReflID is None:
# code would run correctly if we continued but wouldn't do anything
return
fid_index = fidList.GetNthControlPointIndexByID(selectedFidReflID)
old_name = fidList.GetNthControlPointLabel(fid_index)
# Look in the legend for the info from the selected row.
selected_indices = self.anatomical_legend_view.selectedIndexes()
if len(selected_indices) != 2:
return
name_index, description_index = selected_indices
row_index = name_index.row()
name = self.anatomical_legend.GetCellText(row_index, 0)
description = self.anatomical_legend.GetCellText(row_index, 1)
# Refuse to create multiple fiducials with the same name.
for i in range(fidList.GetNumberOfControlPoints()):
if name == fidList.GetNthControlPointLabel(i):
return
# Set the name and description of the selected point.
fidList.SetNthControlPointLabel(fid_index, name)
fidList.SetNthControlPointDescription(fid_index, description)
# Update the landmark combo boxes to reflect the name change.
self.logic.updateLandmarkComboBox(fidList, self.ui.landmarkComboBox, False)
self.ui.landmarkComboBox.setCurrentText(name)
for box in (self.ui.landmarkComboBox1, self.ui.landmarkComboBox2):
new_selection = box.currentText
if new_selection == old_name:
new_selection = name
self.logic.updateLandmarkComboBox(fidList, box)
box.setCurrentText(new_selection)
self.UpdateInterface()
def on_select_legend_file_clicked(self):
legend_filename = qt.QFileDialog.getOpenFileName(
None,'Select File', '', 'CSV (*.csv)')
if legend_filename == '':
# User canceled the file selection dialog.
return
suggested_landmarks = self.logic.load_suggested_landmarks(
legend_filename)
if suggested_landmarks is None:
return
self.suggested_landmarks = suggested_landmarks
self.init_anatomical_radio_buttons()
def onModelChanged(self):
logging.debug("Model Changed")
if self.logic.selectedModel:
Model = self.logic.selectedModel
try:
Model.RemoveObserver(self.logic.decodeJSON(self.logic.selectedModel.GetAttribute("modelModifieTagEvent")))
except:
pass
self.logic.selectedModel = self.ui.inputModelSelector.currentNode()
self.logic.ModelChanged(self.ui.inputModelSelector, self.ui.inputLandmarksSelector)
self.ui.inputLandmarksSelector.setCurrentNode(None)
def onLandmarksChanged(self):
logging.debug("Landmarks Changed")
if self.ui.inputModelSelector.currentNode():
self.logic.FidList = self.ui.inputLandmarksSelector.currentNode()
self.logic.selectedFidList = self.ui.inputLandmarksSelector.currentNode()
self.logic.selectedModel = self.ui.inputModelSelector.currentNode()
if self.ui.inputLandmarksSelector.currentNode():
onSurface = self.ui.loadLandmarksOnSurfaceCheckBox.isChecked()
self.logic.connectLandmarks(self.ui.inputModelSelector,
self.ui.inputLandmarksSelector,
onSurface)
else:
self.ui.landmarkComboBox.clear()
def onAddLandmarkButtonClicked(self):
# Add fiducial on the scene.
# If no input model selected, the addition of fiducial shouldn't be possible.
selectionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLSelectionNodeSingleton")
selectionNode.SetReferenceActivePlaceNodeClassName("vtkMRMLMarkupsFiducialNode")
self.logic.enable_legend_labels = self.ui.enableLegendLabels.isChecked()
if self.logic.selectedModel:
if self.logic.selectedFidList:
selectionNode.SetActivePlaceNodeID(self.logic.selectedFidList.GetID())
self.interactionNode.SetCurrentInteractionMode(1)
else:
self.logic.warningMessage("Please select a fiducial list")
else:
self.logic.warningMessage("Please select a model")
def onLoadLandmarksOnSurfaceStateChanged(self):
self.logic.projectNewPoints = self.ui.loadLandmarksOnSurfaceCheckBox.isChecked()
def onSurfaceDeplacementStateChanged(self):
activeInput = self.logic.selectedModel
if not activeInput:
return
fidList = self.logic.selectedFidList
if not fidList:
return
selectedFidReflID = self.logic.findIDFromLabel(fidList, self.ui.landmarkComboBox.currentText)
isOnSurface = self.ui.surfaceDeplacementCheckBox.isChecked()
landmarkDescription = self.logic.decodeJSON(fidList.GetAttribute("landmarkDescription"))
if isOnSurface:
hardenModel = slicer.app.mrmlScene().GetNodeByID(fidList.GetAttribute("hardenModelID"))
landmarkDescription[selectedFidReflID]["projection"]["isProjected"] = True
landmarkDescription[selectedFidReflID]["projection"]["closestPointIndex"] =\
self.logic.projectOnSurface(hardenModel, fidList, selectedFidReflID)
else:
landmarkDescription[selectedFidReflID]["projection"]["isProjected"] = False
landmarkDescription[selectedFidReflID]["projection"]["closestPointIndex"] = None
fidList.SetAttribute("landmarkDescription",self.logic.encodeJSON(landmarkDescription))
def onDefineMidPointClicked(self):
fidList = self.logic.selectedFidList
if not fidList:
self.logic.warningMessage("Please select a model of reference and a fiducial List.")
label1 = self.ui.landmarkComboBox1.currentText
label2 = self.ui.landmarkComboBox2.currentText
landmark1ID = self.logic.findIDFromLabel(fidList, label1)
landmark2ID = self.logic.findIDFromLabel(fidList, label2)
coord = self.logic.calculateMidPointCoord(fidList, landmark1ID, landmark2ID)
fidList.AddFiducial(coord[0],coord[1],coord[2], f'{label1}_{label2}')
fidList.SetNthFiducialSelected(fidList.GetNumberOfMarkups() - 1, False)
# update of the data structure
landmarkDescription = self.logic.decodeJSON(fidList.GetAttribute("landmarkDescription"))
numOfMarkups = fidList.GetNumberOfMarkups()
markupID = fidList.GetNthMarkupID(numOfMarkups - 1)
landmarkDescription[landmark1ID]["midPoint"]["definedByThisMarkup"].append(markupID)
landmarkDescription[landmark2ID]["midPoint"]["definedByThisMarkup"].append(markupID)
landmarkDescription[markupID]["midPoint"]["isMidPoint"] = True
landmarkDescription[markupID]["midPoint"]["Point1"] = landmark1ID
landmarkDescription[markupID]["midPoint"]["Point2"] = landmark2ID
landmarkDescription[markupID]["projection"]["isProjected"] = False
landmarkDescription[markupID]["projection"]["closestPointIndex"] = None
if self.ui.midPointOnSurfaceCheckBox.isChecked():
landmarkDescription[markupID]["projection"]["isProjected"] = True
hardenModel = slicer.app.mrmlScene().GetNodeByID(fidList.GetAttribute("hardenModelID"))
landmarkDescription[markupID]["projection"]["closestPointIndex"] = \
self.logic.projectOnSurface(hardenModel, fidList, markupID)
else:
landmarkDescription[markupID]["projection"]["isProjected"] = False
fidList.SetAttribute("landmarkDescription",self.logic.encodeJSON(landmarkDescription))
self.logic.UpdateInterface()
self.logic.updateLandmarkComboBox(fidList, self.ui.landmarkComboBox, False)
fidList.SetNthFiducialPositionFromArray(numOfMarkups - 1, coord)
def onComputeDistanceClicked(self):
fidList = self.logic.selectedFidList
fidListA = self.ui.fidListComboBoxA.currentNode()
fidListB = self.ui.fidListComboBoxB.currentNode()
nameList = [fidListA.GetName(), fidListB.GetName()]
if not fidList:
self.logic.warningMessage("Please connect a fiducial list to a model.")
return
for fidListIter in list(set(nameList)):
landmarkDescription = slicer.mrmlScene.GetNodesByName(fidListIter).GetItemAsObject(0). \
GetAttribute("landmarkDescription")
if not landmarkDescription:
self.logic.warningMessage(
f'{fidListIter} is not connected to a model. Please use "Add and Move '
'Landmarks" panel to connect the landmarks to a model.')
return
self.ui.distanceLayout.addLayout(self.tableAndExportLayout)
key, args = self.logic.getDistanceArgs(
fidListA=self.ui.fidListComboBoxA.currentNode(),
fidListB=self.ui.fidListComboBoxB.currentNode(),
fidLabelA=self.ui.landmarkComboBoxA.currentText,
fidLabelB=self.ui.landmarkComboBoxB.currentText
)
data = self.logic.computeDistance(*args)
self.logic.updateTable(self.distance_table, key, data)
self.logic.updateTableView(self.distance_table, self.distance_table_view)
def onComputeAnglesClicked(self):
fidList = self.logic.selectedFidList
fidListline1LA = self.ui.fidListComboBoxline1LA.currentNode()
fidListline1LB = self.ui.fidListComboBoxline1LB.currentNode()
fidListline2LA = self.ui.fidListComboBoxline2LA.currentNode()
fidListline2LB = self.ui.fidListComboBoxline2LB.currentNode()
nameList = [fidListline1LA.GetName(), fidListline1LB.GetName(), fidListline2LA.GetName(), fidListline2LB.GetName()]
if not fidList:
self.logic.warningMessage("Please connect a fiducial list to a model.")
return
for fidListIter in list(set(nameList)):
landmarkDescription = slicer.mrmlScene.GetNodesByName(fidListIter).GetItemAsObject(0). \
GetAttribute("landmarkDescription")
if not landmarkDescription:
self.logic.warningMessage(
f'{fidListIter} is not connected to a model. Please use "Add and Move '
'Landmarks" panel to connect the landmarks to a model.')
return
self.ui.angleLayout.addLayout(self.tableAndExportAngleLayout)
key, args = self.logic.getAnglesArgs(
fidlist1A=self.ui.fidListComboBoxline1LA.currentNode(),
fidlist1B=self.ui.fidListComboBoxline1LB.currentNode(),
fidlist2A=self.ui.fidListComboBoxline2LA.currentNode(),
fidlist2B=self.ui.fidListComboBoxline2LB.currentNode(),
fidLabel1A=self.ui.line1LAComboBox.currentText,
fidLabel1B=self.ui.line1LBComboBox.currentText,
fidLabel2A=self.ui.line2LAComboBox.currentText,
fidLabel2B=self.ui.line2LBComboBox.currentText,
yawState=self.ui.yawCheckBox.isChecked(),
pitchState=self.ui.pitchCheckBox.isChecked(),
rollState=self.ui.rollCheckBox.isChecked()
)
data = self.logic.computeAngles(*args)
self.logic.updateTable(self.angles_table, key, data)
self.logic.updateTableView(self.angles_table, self.angles_table_view)
def onComputeLinePointClicked(self):
fidList = self.logic.selectedFidList
if not fidList:
self.logic.warningMessage("Please connect a fiducial list to a model.")
return
fidListlineLA = self.ui.fidListComboBoxlineLA.currentNode()
fidListlineLB = self.ui.fidListComboBoxlineLB.currentNode()
fidListPoint = self.ui.fidListComboBoxlinePoint.currentNode()
nameList = [fidListlineLA.GetName(), fidListlineLB.GetName(), fidListPoint.GetName()]
for fidListIter in list(set(nameList)):
landmarkDescription = slicer.mrmlScene.GetNodesByName(fidListIter).GetItemAsObject(0). \
GetAttribute("landmarkDescription")
if not landmarkDescription:
self.logic.warningMessage(
f'{fidListIter} is not connected to a model. Please use "Add and Move '
'Landmarks" panel to connect the landmarks to a model.')
return
self.ui.LinePointLayout.addLayout(self.tableAndExportLinePointLayout)
key, args = self.logic.getLinePointArgs(
fidListLineA=self.ui.fidListComboBoxlineLA.currentNode(),
fidListLineB=self.ui.fidListComboBoxlineLB.currentNode(),
fidListPoint=self.ui.fidListComboBoxlinePoint.currentNode(),
fidLabelLineA=self.ui.lineLAComboBox.currentText,
fidLabelLineB=self.ui.lineLBComboBox.currentText,
fidLabelPoint=self.ui.linePointComboBox.currentText
)
data = self.logic.computeLinePoint(*args)
self.logic.updateTable(self.line_point_table, key, data)
self.logic.updateTableView(self.line_point_table, self.line_point_table_view)
def onExportButton(self):
self.logic.exportationFunction(
self.directoryExportDistance,
self.filenameExportDistance,
self.distance_table,
'distance'
)
def onExportAngleButton(self):
self.logic.exportationFunction(
self.directoryExportAngle,
self.filenameExportAngle,
self.angles_table,
'angle'
)
def onExportLinePointButton(self):
self.logic.exportationFunction(
self.directoryExportLinePoint,
self.filenameExportLinePoint,
self.line_point_table,
'linePoint'
)
class Q3DCLogic(ScriptedLoadableModuleLogic):
def __init__(self, interface):
self.interface = interface
self.selectedModel = None
self.selectedFidList = None
self.current_suggested_landmarks = None
self.enable_legend_labels = True
self.projectNewPoints = True
self.numberOfDecimals = 3
self.tolerance = 1e-5
system = qt.QLocale().system()
self.decimalPoint = chr(system.decimalPoint())
self.comboboxdict = dict()
@staticmethod
def load_suggested_landmarks(filepath):
suggested_landmarks = defaultdict(list)
try:
with open(filepath, newline='', encoding='utf8') as suggestions_file:
reader = csv.DictReader(suggestions_file)
for row in reader:
region = row['Region'].title()
landmark = row['Landmark']
name = row['Name']
suggested_landmarks[region].append((landmark, name))
return suggested_landmarks
except OSError as e:
slicer.util.delayDisplay('Unable to find/open file.')
logging.info('User attempted to open a landmark legend file.\n' + repr(e))
return None
except csv.Error as e:
slicer.util.delayDisplay('The selected file is not formatted properly.')
logging.info('User attempted to open a landmark legend file.\n' + repr(e))
return None
except KeyError as e:
slicer.util.delayDisplay('The selected file does not have the right column names.')
logging.info('User attempted to open a landmark legend file.\n' + repr(e))
return None
def initComboboxdict(self):
self.comboboxdict[self.interface.landmarkComboBoxA] = None
self.comboboxdict[self.interface.landmarkComboBoxB] = None
self.comboboxdict[self.interface.line1LAComboBox] = None
self.comboboxdict[self.interface.line1LBComboBox] = None
self.comboboxdict[self.interface.line2LAComboBox] = None
self.comboboxdict[self.interface.line2LBComboBox] = None
self.comboboxdict[self.interface.lineLAComboBox] = None
self.comboboxdict[self.interface.lineLBComboBox] = None
self.comboboxdict[self.interface.linePointComboBox] = None
def UpdateThreeDView(self, landmarkLabel):
# Update the 3D view on Slicer
if not self.selectedFidList:
return
if not self.selectedModel:
return
logging.debug("UpdateThreeDView")
active = self.selectedFidList
#deactivate all landmarks
list = slicer.mrmlScene.GetNodesByClass("vtkMRMLMarkupsFiducialNode")
end = list.GetNumberOfItems()
selectedFidReflID = self.findIDFromLabel(active,landmarkLabel)
for i in range(0,end):
fidList = list.GetItemAsObject(i)
logging.info('fidList ID: %s', fidList.GetID())
landmarkDescription = self.decodeJSON(fidList.GetAttribute("landmarkDescription"))
if landmarkDescription:
for key in landmarkDescription.keys():
markupsIndex = fidList.GetNthControlPointIndexByID(key)
if key != selectedFidReflID:
fidList.SetNthMarkupLocked(markupsIndex, True)
else:
fidList.SetNthMarkupLocked(markupsIndex, False)
fidList.SetNthMarkupLocked(markupsIndex, False)
def createIntermediateHardenModel(self, model):
hardenModel = slicer.mrmlScene.GetNodesByName("SurfaceRegistration_" + model.GetName() + "_hardenCopy_" + str(
slicer.app.applicationPid())).GetItemAsObject(0)
if hardenModel is None:
hardenModel = slicer.vtkMRMLModelNode()
hardenPolyData = vtk.vtkPolyData()
hardenPolyData.DeepCopy(model.GetPolyData())
hardenModel.SetAndObservePolyData(hardenPolyData)
hardenModel.SetName(
"SurfaceRegistration_" + model.GetName() + "_hardenCopy_" + str(slicer.app.applicationPid()))
if model.GetParentTransformNode():
hardenModel.SetAndObserveTransformNodeID(model.GetParentTransformNode().GetID())
hardenModel.HideFromEditorsOn()
slicer.mrmlScene.AddNode(hardenModel)
logic = slicer.vtkSlicerTransformLogic()
logic.hardenTransform(hardenModel)
return hardenModel
def onModelModified(self, obj, event):
#recompute the harden model
hardenModel = self.createIntermediateHardenModel(obj)
obj.SetAttribute("hardenModelID",hardenModel.GetID())
# for each fiducial list
list = slicer.mrmlScene.GetNodesByClass("vtkMRMLMarkupsFiducialNode")
end = list.GetNumberOfItems()
for i in range(0,end):
# If landmarks are projected on the modified model
fidList = list.GetItemAsObject(i)
if fidList.GetAttribute("connectedModelID"):
if fidList.GetAttribute("connectedModelID") == obj.GetID():
#replace the harden model with the new one
fidList.SetAttribute("hardenModelID",hardenModel.GetID())
#reproject the fiducials on the new model
landmarkDescription = self.decodeJSON(fidList.GetAttribute("landmarkDescription"))
for n in range(fidList.GetNumberOfMarkups()):
markupID = fidList.GetNthMarkupID(n)
if landmarkDescription[markupID]["projection"]["isProjected"] == True:
hardenModel = slicer.app.mrmlScene().GetNodeByID(fidList.GetAttribute("hardenModelID"))
markupsIndex = fidList.GetNthControlPointIndexByID(markupID)
self.replaceLandmark(hardenModel.GetPolyData(), fidList, markupsIndex,
landmarkDescription[markupID]["projection"]["closestPointIndex"])
fidList.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
def ModelChanged(self, inputModelSelector, inputLandmarksSelector):
inputModel = inputModelSelector.currentNode()
# if a Model Node is present
if inputModel:
self.selectedModel = inputModel
hardenModel = self.createIntermediateHardenModel(inputModel)
inputModel.SetAttribute("hardenModelID",hardenModel.GetID())
modelModifieTagEvent = inputModel.AddObserver(inputModel.TransformModifiedEvent, self.onModelModified)
inputModel.SetAttribute("modelModifieTagEvent",self.encodeJSON({'modelModifieTagEvent':modelModifieTagEvent}))
inputLandmarksSelector.setEnabled(True)
# if no model is selected
else:
# Update the fiducial list selector
inputLandmarksSelector.setCurrentNode(None)
inputLandmarksSelector.setEnabled(False)
def isUnderTransform(self, markups):
if markups.GetParentTransformNode():
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(" /!\ WARNING /!\ ")
messageBox.setIcon(messageBox.Warning)
messageBox.setText("Your Markup Fiducial Node is currently modified by a transform,"
"if you choose to continue the program will apply the transform"
"before doing anything else!")
messageBox.setInformativeText("Do you want to continue?")
messageBox.setStandardButtons(messageBox.No | messageBox.Yes)
choice = messageBox.exec_()
if choice == messageBox.Yes:
logic = slicer.vtkSlicerTransformLogic()
logic.hardenTransform(markups)
return False
else:
messageBox.setText(" Node not modified")
messageBox.setStandardButtons(messageBox.Ok)
messageBox.setInformativeText("")
messageBox.exec_()
return True
else:
return False
def connectedModelChangement(self):
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(" /!\ WARNING /!\ ")
messageBox.setIcon(messageBox.Warning)
messageBox.setText("The Markup Fiducial Node selected is curently projected on an"
"other model, if you chose to continue the fiducials will be "
"reprojected, and this could impact the functioning of other modules")
messageBox.setInformativeText("Do you want to continue?")
messageBox.setStandardButtons(messageBox.No | messageBox.Yes)
choice = messageBox.exec_()
if choice == messageBox.Yes:
return True
else:
messageBox.setText(" Node not modified")
messageBox.setStandardButtons(messageBox.Ok)
messageBox.setInformativeText("")
messageBox.exec_()
return False
@staticmethod
def recover_midpoint_provenance(landmarks):
'''
When a new list of fiducials is loaded from a file, we know which are
midpoints, but we don't know from which points those midpoints were
constructed. This function recovers this information.
'''
# Build the data structures we will need.
point_ids = []
points = []
ids_and_midpoints = []
all_ids = []
scratch_array = np.zeros(3)
for n in range(landmarks.GetNumberOfMarkups()):
markupID = landmarks.GetNthMarkupID(n)
is_sel = landmarks.GetNthFiducialSelected(n)
landmarks.GetNthFiducialPosition(n, scratch_array)
markup_pos = np.copy(scratch_array)
if is_sel: # not a midpoint
point_ids.append(markupID)
points.append(markup_pos)
else: # midpoint
ids_and_midpoints.append((markupID, markup_pos))
all_ids.append(markupID)
# This is the structure we want to populate to help build
# landmarkDescription in createNewDataStructure.
midpoint_data = {
point_id: {
'definedByThisMarkup': [],
'isMidPoint': False,
'Point1': None,
'Point2': None,
} for point_id in all_ids
}
# Use a kd-tree to find points that could be the missing endpoint of a
# hypothetical midpoint operation.
points = np.array(points)
n_new_points = len(points)
while n_new_points > 0 and len(ids_and_midpoints) > 0:
kdt = scipy.spatial.KDTree(points)
n_new_points = 0
new_ids_and_midpoints = []
for mp_id, mp in ids_and_midpoints:
provenance_found = False
for p_idx, p in enumerate(points):
# hp for "hypothetical point"
# mp = (hp + p) / 2
hp = 2*mp - p
max_error = np.linalg.norm(mp - p) / 10000.0
distance, kdt_p_idx = kdt.query(
hp, distance_upper_bound=max_error)
# distance = np.inf on failure
if distance < max_error:
ids = (point_ids[p_idx], point_ids[kdt_p_idx])
midpoint_data[mp_id].update({
'isMidPoint': True,
'Point1': ids[0],
'Point2': ids[1],
})
for id_ in ids:
midpoint_data[id_]['definedByThisMarkup'].append(mp_id)
provenance_found = True
point_ids.append(mp_id)
points = np.concatenate((points, mp.reshape((1, 3))))
n_new_points += 1
break
if not provenance_found:
new_ids_and_midpoints.append((mp_id, mp))
ids_and_midpoints = new_ids_and_midpoints
return midpoint_data
def createNewDataStructure(self, landmarks, model, onSurface):
landmarks.SetAttribute("connectedModelID",model.GetID())
landmarks.SetAttribute("hardenModelID",model.GetAttribute("hardenModelID"))
landmarkDescription = dict()
midpoint_data = self.recover_midpoint_provenance(landmarks)
for n in range(landmarks.GetNumberOfMarkups()):
markupID = landmarks.GetNthMarkupID(n)
landmarkDescription[markupID] = {'midPoint': midpoint_data[markupID]}
for n in range(landmarks.GetNumberOfMarkups()):
markupID = landmarks.GetNthMarkupID(n)
landmarkLabel = landmarks.GetNthMarkupLabel(n)
landmarkDescription[markupID]["landmarkLabel"] = landmarkLabel
landmarkDescription[markupID]["projection"] = dict()
if onSurface and not landmarkDescription[markupID]['midPoint']['isMidPoint']:
landmarkDescription[markupID]["projection"]["isProjected"] = True
hardenModel = slicer.app.mrmlScene().GetNodeByID(landmarks.GetAttribute("hardenModelID"))
landmarkDescription[markupID]["projection"]["closestPointIndex"] = \
self.projectOnSurface(hardenModel, landmarks, markupID)
else:
landmarkDescription[markupID]["projection"]["isProjected"] = False
landmarkDescription[markupID]["projection"]["closestPointIndex"] = None
if onSurface:
for n in range(landmarks.GetNumberOfMarkups()):
markupID = landmarks.GetNthMarkupID(n)
nth_midpoint_data = landmarkDescription[markupID]['midPoint']
if nth_midpoint_data['isMidPoint']:
parent_id1 = nth_midpoint_data['Point1']
parent_id2 = nth_midpoint_data['Point2']
coord = self.calculateMidPointCoord(landmarks, parent_id1, parent_id2)
index = landmarks.GetNthControlPointIndexByID(markupID)
landmarks.SetNthFiducialPositionFromArray(index, coord)
landmarks.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
planeDescription = dict()
landmarks.SetAttribute("planeDescription",self.encodeJSON(planeDescription))
landmarks.SetAttribute("isClean",self.encodeJSON({"isClean":False}))
landmarks.SetAttribute("lastTransformID",None)
self.conform_selectedness_to_midpoint_status(landmarks)
def conform_selectedness_to_midpoint_status(self, landmarks):
landmarkDescription = self.decodeJSON(landmarks.GetAttribute("landmarkDescription"))
for n in range(landmarks.GetNumberOfMarkups()):
markupID = landmarks.GetNthMarkupID(n)
isMidPoint = landmarkDescription[markupID]['midPoint']['isMidPoint']
landmarks.SetNthFiducialSelected(n, not isMidPoint)
def changementOfConnectedModel(self, landmarks, model, onSurface):
landmarks.SetAttribute("connectedModelID", model.GetID())
landmarks.SetAttribute("hardenModelID", model.GetAttribute("hardenModelID"))
landmarkDescription = self.decodeJSON(landmarks.GetAttribute("landmarkDescription"))
D = nx.DiGraph()
for n in range(landmarks.GetNumberOfMarkups()):
markupID = landmarks.GetNthMarkupID(n)
D.add_node(markupID)
dbtm = landmarkDescription[markupID]['midPoint']['definedByThisMarkup']
for dependent_point in dbtm:
D.add_edge(markupID, dependent_point)
for markupID in nx.topological_sort(D):
if onSurface:
if landmarkDescription[markupID]["projection"]["isProjected"] == True:
hardenModel = slicer.app.mrmlScene().GetNodeByID(landmarks.GetAttribute("hardenModelID"))
landmarkDescription[markupID]["projection"]["closestPointIndex"] = \
self.projectOnSurface(hardenModel, landmarks, markupID)
elif landmarkDescription[markupID]['midPoint']['isMidPoint']:
parent_id1 = landmarkDescription[markupID]['midPoint']['Point1']
parent_id2 = landmarkDescription[markupID]['midPoint']['Point2']
coord = self.calculateMidPointCoord(landmarks, parent_id1, parent_id2)
index = landmarks.GetNthControlPointIndexByID(markupID)
landmarks.SetNthFiducialPositionFromArray(index, coord)
else:
landmarkDescription[markupID]["projection"]["isProjected"] = False
landmarkDescription[markupID]["projection"]["closestPointIndex"] = None
landmarks.SetAttribute("landmarkDescription", self.encodeJSON(landmarkDescription))
landmarks.SetAttribute("isClean",self.encodeJSON({"isClean":False}))
def connectLandmarks(self, modelSelector, landmarkSelector, onSurface):
model = modelSelector.currentNode()
landmarks = landmarkSelector.currentNode()
self.selectedFidList = landmarks
self.selectedModel = model
if not (model and landmarks):
return
if self.isUnderTransform(landmarks):
landmarkSelector.setCurrentNode(None)
return
connectedModelID = landmarks.GetAttribute("connectedModelID")
try:
tag = self.decodeJSON(landmarks.GetAttribute("PointAddedEventTag"))
landmarks.RemoveObserver(tag["PointAddedEventTag"])
logging.debug("adding observers removed!")
except:
pass
try:
tag = self.decodeJSON(landmarks.GetAttribute("UpdatesLinesEventTag"))
landmarks.RemoveObserver(tag["UpdatesLinesEventTag"])
logging.debug("lines observers removed!")
except:
pass
try:
tag = self.decodeJSON(landmarks.GetAttribute("PointModifiedEventTag"))
landmarks.RemoveObserver(tag["PointModifiedEventTag"])
logging.debug("moving observers removed!")
except:
pass
try:
tag = self.decodeJSON(landmarks.GetAttribute("PointRemovedEventTag"))
landmarks.RemoveObserver(tag["PointRemovedEventTag"])
logging.debug("removing observers removed!")
except:
pass
if connectedModelID:
if connectedModelID != model.GetID():
if self.connectedModelChangement():
self.changementOfConnectedModel(landmarks, model, onSurface)
else:
landmarkSelector.setCurrentNode(None)
return
else:
landmarks.SetAttribute("hardenModelID",model.GetAttribute("hardenModelID"))
# creation of the data structure
else:
self.createNewDataStructure(landmarks, model, onSurface)
#update of the landmark Combo Box
self.updateLandmarkComboBox(landmarks, self.interface.landmarkComboBox, False)
self.updateLandmarkComboBox(landmarks, self.interface.landmarkComboBox1)
self.updateLandmarkComboBox(landmarks, self.interface.landmarkComboBox2)
#adding of listeners
PointAddedEventTag = landmarks.AddObserver(landmarks.PointAddedEvent, self.onPointAddedEvent)
landmarks.SetAttribute("PointAddedEventTag",self.encodeJSON({"PointAddedEventTag":PointAddedEventTag}))
UpdatesLinesEventTag = landmarks.AddObserver(landmarks.PointModifiedEvent, self.updateLinesEvent)
landmarks.SetAttribute("UpdatesLinesEventTag",self.encodeJSON({"UpdatesLinesEventTag":UpdatesLinesEventTag}))
PointModifiedEventTag = landmarks.AddObserver(landmarks.PointModifiedEvent, self.onPointModifiedEvent)
landmarks.SetAttribute("PointModifiedEventTag",self.encodeJSON({"PointModifiedEventTag":PointModifiedEventTag}))
PointRemovedEventTag = landmarks.AddObserver(landmarks.PointRemovedEvent, self.onPointRemovedEvent)
landmarks.SetAttribute("PointRemovedEventTag",self.encodeJSON({"PointRemovedEventTag":PointRemovedEventTag}))
# Called when a landmark is added on a model
def onPointAddedEvent(self, obj, event):
logging.debug("markup adding")
if self.enable_legend_labels:
try:
# Find the index of the last-placed landmark and get the landmark label at that position.
# Ex. if the last-placed landmark was at the 3rd position, we want to use the 3rd landmark label.
n = obj.GetNumberOfMarkups()
label, description = self.current_suggested_landmarks[n - 1]
obj.SetNthMarkupLabel(n - 1, label)
except IndexError:
# If there are more landmarks than suggested labels then fetching the label would fail.
logging.error('Not changing label; wrong number of markups.')
landmarkDescription = self.decodeJSON(obj.GetAttribute("landmarkDescription"))
numOfMarkups = obj.GetNumberOfMarkups()
markupID = obj.GetNthMarkupID(numOfMarkups - 1)
landmarkDescription[markupID] = dict()
landmarkLabel = obj.GetNthMarkupLabel(numOfMarkups - 1)
landmarkDescription[markupID]["landmarkLabel"] = landmarkLabel
landmarkDescription[markupID]["projection"] = dict()
landmarkDescription[markupID]["projection"]["isProjected"] = self.projectNewPoints
# The landmark will be projected by onPointModifiedEvent
landmarkDescription[markupID]["midPoint"] = dict()
landmarkDescription[markupID]["midPoint"]["definedByThisMarkup"] = list()
landmarkDescription[markupID]["midPoint"]["isMidPoint"] = False
landmarkDescription[markupID]["midPoint"]["Point1"] = None
landmarkDescription[markupID]["midPoint"]["Point2"] = None
obj.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
self.updateAllLandmarkComboBox(obj, markupID)
self.UpdateInterface()
qt.QTimer.singleShot(0, lambda : self.onPointModifiedEvent(obj,None))
def updateLinesEvent(self, obj, event):
if self.interface.line1LAComboBox.currentText != '' and self.interface.line1LBComboBox.currentText != '' \
and self.interface.line1LAComboBox.currentText != self.interface.line1LBComboBox.currentText :
# Clear Lines, then define new ones
if self.interface.renderer1 :
self.interface.renderer1.RemoveActor(self.interface.actor1)
self.interface.renderer1, self.interface.actor1 = \
self.drawLineBetween2Landmark(self.interface.line1LAComboBox.currentText,
self.interface.line1LBComboBox.currentText,
self.interface.fidListComboBoxline1LA.currentNode(),
self.interface.fidListComboBoxline1LB.currentNode())
if self.interface.line2LAComboBox.currentText != '' and self.interface.line2LBComboBox.currentText != '' \
and self.interface.line2LAComboBox.currentText != self.interface.line2LBComboBox.currentText :
if self.interface.renderer2 :
self.interface.renderer2.RemoveActor(self.interface.actor2)
self.interface.renderer2, self.interface.actor2 = \
self.drawLineBetween2Landmark(self.interface.line2LAComboBox.currentText,
self.interface.line2LBComboBox.currentText,
self.interface.fidListComboBoxline2LA.currentNode(),
self.interface.fidListComboBoxline2LB.currentNode())
if self.interface.lineLAComboBox.currentText != '' and self.interface.lineLBComboBox.currentText != '' \
and self.interface.lineLAComboBox.currentText != self.interface.lineLBComboBox.currentText :
if self.interface.renderer3 :
self.interface.renderer3.RemoveActor(self.interface.actor3)
self.interface.renderer3, self.interface.actor3 = \
self.drawLineBetween2Landmark(self.interface.lineLAComboBox.currentText,
self.interface.lineLBComboBox.currentText,
self.interface.fidListComboBoxlineLA.currentNode(),
self.interface.fidListComboBoxlineLB.currentNode())
def updateMidPoint(self, fidList, landmarkID):
landmarkDescription = self.decodeJSON(fidList.GetAttribute("landmarkDescription"))
for midPointID in landmarkDescription[landmarkID]["midPoint"]["definedByThisMarkup"]:
if landmarkDescription[midPointID]["midPoint"]["isMidPoint"]:
landmark1ID = landmarkDescription[midPointID]["midPoint"]["Point1"]
landmark2ID = landmarkDescription[midPointID]["midPoint"]["Point2"]
coord = self.calculateMidPointCoord(fidList, landmark1ID, landmark2ID)
index = fidList.GetNthControlPointIndexByID(midPointID)
fidList.SetNthControlPointPositionFromArray(index, coord, fidList.PositionPreview)
if landmarkDescription[midPointID]["projection"]["isProjected"]:
hardenModel = slicer.app.mrmlScene().GetNodeByID(fidList.GetAttribute("hardenModelID"))
landmarkDescription[midPointID]["projection"]["closestPointIndex"] = \
self.projectOnSurface(hardenModel, fidList, midPointID)
fidList.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
self.updateMidPoint(fidList, midPointID)
# Called when a landmarks is moved
def onPointModifiedEvent(self, obj, event):
logging.debug("onPointModifiedEvent Q3DC")
landmarkDescription = self.decodeJSON(obj.GetAttribute("landmarkDescription"))
if not landmarkDescription:
return
selectedLandmarkID = self.findIDFromLabel(obj, self.interface.landmarkComboBox.currentText)
# remove observer to make sure, the callback function won't work..
tag = self.decodeJSON(obj.GetAttribute("PointModifiedEventTag"))
obj.RemoveObserver(tag["PointModifiedEventTag"])
if selectedLandmarkID:
activeLandmarkState = landmarkDescription[selectedLandmarkID]
logging.debug('activeLandmarkState: %s', activeLandmarkState)
if activeLandmarkState["projection"]["isProjected"]:
hardenModel = slicer.app.mrmlScene().GetNodeByID(obj.GetAttribute("hardenModelID"))
activeLandmarkState["projection"]["closestPointIndex"] = \
self.projectOnSurface(hardenModel, obj, selectedLandmarkID)
obj.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
self.updateMidPoint(obj,selectedLandmarkID)
time.sleep(0.08)
# Add the observer again
PointModifiedEventTag = obj.AddObserver(obj.PointModifiedEvent, self.onPointModifiedEvent)
obj.SetAttribute("PointModifiedEventTag",self.encodeJSON({"PointModifiedEventTag":PointModifiedEventTag}))
def onPointRemovedEvent(self, obj, event):
logging.debug("markup deleting")
# ensure that onPointModified won't be called
tag = self.decodeJSON(obj.GetAttribute("PointModifiedEventTag"))
logging.info('Modified %r', tag)
obj.RemoveObserver(tag["PointModifiedEventTag"])
landmarkDescription = self.decodeJSON(obj.GetAttribute("landmarkDescription"))
IDs = []
for ID, value in landmarkDescription.items():
isFound = False
for n in range(obj.GetNumberOfMarkups()):
markupID = obj.GetNthMarkupID(n)
if ID == markupID:
isFound = True
if not isFound:
IDs.append(ID)
for ID in IDs:
self.deleteLandmark(obj, landmarkDescription[ID]["landmarkLabel"])
landmarkDescription.pop(ID,None)
obj.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
def addLandmarkToCombox(self, fidList, combobox, markupID):
if not fidList:
return
landmarkDescription = self.decodeJSON(fidList.GetAttribute("landmarkDescription"))
combobox.addItem(landmarkDescription[markupID]["landmarkLabel"])
def updateAllLandmarkComboBox(self, fidList, markupID):
# update of the Combobox that are always updated
self.updateLandmarkComboBox(fidList, self.interface.landmarkComboBox, False)
self.addLandmarkToCombox(fidList, self.interface.landmarkComboBox1, markupID)
self.addLandmarkToCombox(fidList, self.interface.landmarkComboBox2, markupID)
#update of the Comboboxes that display the fidcial list just modified
for key,value in self.comboboxdict.items():
if value is fidList:
self.addLandmarkToCombox(fidList, key, markupID)
def updateLandmarkComboBox(self, fidList, combobox, displayMidPoint = True):
combobox.blockSignals(True)
combobox.clear()
if not fidList:
return
landmarkDescription = self.decodeJSON(fidList.GetAttribute("landmarkDescription"))
if not fidList:
return
numOfFid = fidList.GetNumberOfMarkups()
if numOfFid > 0:
for i in range(0, numOfFid):
if displayMidPoint is False:
ID = fidList.GetNthMarkupID(i)
if not landmarkDescription[ID]["midPoint"]["isMidPoint"]:
landmarkLabel = fidList.GetNthMarkupLabel(i)
combobox.addItem(landmarkLabel)
else:
landmarkLabel = fidList.GetNthMarkupLabel(i)
combobox.addItem(landmarkLabel)
combobox.setCurrentIndex(combobox.count - 1)
combobox.blockSignals(False)
def deleteLandmark(self, fidList, label):
# update of the Combobox that are always updated
self.interface.landmarkComboBox.removeItem(self.interface.landmarkComboBox.findText(label))
self.interface.landmarkComboBox1.removeItem(self.interface.landmarkComboBox1.findText(label))
self.interface.landmarkComboBox2.removeItem(self.interface.landmarkComboBox2.findText(label))
for key,value in self.comboboxdict.items():
if value is fidList:
key.removeItem(key.findText(label))
@staticmethod
def findIDFromLabel(fidList, landmarkLabel):
# find the ID of the markupsNode from the label of a landmark!
for i in range(fidList.GetNumberOfFiducials()):
if landmarkLabel == fidList.GetNthFiducialLabel(i):
return fidList.GetNthMarkupID(i)
return None
def getClosestPointIndex(self, fidNode, inputPolyData, landmarkID):
landmarkCoord = np.zeros(3)
landmarkCoord[1] = 42
fidNode.GetNthFiducialPosition(landmarkID, landmarkCoord)
pointLocator = vtk.vtkPointLocator()
pointLocator.SetDataSet(inputPolyData)
pointLocator.AutomaticOn()
pointLocator.BuildLocator()
indexClosestPoint = pointLocator.FindClosestPoint(landmarkCoord)
return indexClosestPoint
def replaceLandmark(self, inputModelPolyData, fidNode, landmarkID, indexClosestPoint):
landmarkCoord = [-1, -1, -1]
inputModelPolyData.GetPoints().GetPoint(indexClosestPoint, landmarkCoord)
logging.debug('ReplaceLandmark Coord: %s', landmarkCoord)
fidNode.SetNthControlPointPositionFromArray(landmarkID, landmarkCoord, fidNode.PositionPreview)
def projectOnSurface(self, modelOnProject, fidNode, selectedFidReflID):
if selectedFidReflID:
markupsIndex = fidNode.GetNthControlPointIndexByID(selectedFidReflID)
indexClosestPoint = self.getClosestPointIndex(fidNode, modelOnProject.GetPolyData(), markupsIndex)
self.replaceLandmark(modelOnProject.GetPolyData(), fidNode, markupsIndex, indexClosestPoint)
return indexClosestPoint
def calculateMidPointCoord(self, fidList, landmark1ID, landmark2ID):
"""Set the midpoint when you know the the mrml nodes"""
landmark1Index = fidList.GetNthControlPointIndexByID(landmark1ID)
landmark2Index = fidList.GetNthControlPointIndexByID(landmark2ID)
coord1 = [-1, -1, -1]
coord2 = [-1, -1, -1]
fidList.GetNthFiducialPosition(landmark1Index, coord1)
fidList.GetNthFiducialPosition(landmark2Index, coord2)
midCoord = [-1, -1, -1]
midCoord[0] = (coord1[0] + coord2[0])/2
midCoord[1] = (coord1[1] + coord2[1])/2
midCoord[2] = (coord1[2] + coord2[2])/2
return midCoord
def removecomponentFromStorage(self, type, element):
if type == 'angles':
element.Yaw = None
element.Roll = None
element.Pitch = None
if type == 'distance':
element.RLComponent = None
element.APComponent = None
element.SIComponent = None
element.ThreeDComponent = None
return element
def round(self, value):
return round(value, self.numberOfDecimals)
def computeDistance(self, point1, point2):
delta = point2 - point1
norm = np.linalg.norm(delta)
result = [*delta, norm]
return [self.round(value) for value in result]
def computeAngle(self, line1, line2, axis):
"""
line1: np.array of the first line
line2: np.array of the second line
axis: project the lines onto the plane defined by this axis.
ex. axis=3 (z) would project lines to the 0-1 (x-y) plane
"""
# create a mask which removes the coordinate on axis. this performs the projection
mask = [True] * 3
mask[axis] = False
line1 = line1[mask]
line2 = line2[mask]
norm1 = np.linalg.norm(line1)
norm2 = np.linalg.norm(line2)
if norm1 == 0 or norm2 == 0:
slicer.util.errorDisplay("ERROR, norm of your vector is 0! DEFINE A VECTOR!")
return None
try:
# find the _signed_ angle using the determinant of a 2x2 matrix
# https://en.wikipedia.org/wiki/Determinant#2_%C3%97_2_matrices
# |A| = |u||v|sin(t) where u, v are columns of A and t is the angle between them
matrix = np.array([line1, line2])
det = np.linalg.det(matrix)
radians = np.arcsin(det / norm1 / norm2)
return np.degrees(radians)
except np.linalg.LinAlgError:
slicer.util.errorDisplay('ERROR: failed to project vectors. Only able to compute angles in one plane.')
def computeAngles(self, line1, line2, states):
axes = [
2, # axis=S; axial; for yaw
0, # axis=R; saggital; for pitch
1, # axis=A; coronal; for roll
]
result = []
for axis, state in zip(axes, states):
if state:
value = self.computeAngle(line1, line2, axis)
value = self.round(value)
# we want to show the angle and the complementary angle, signed
sign =
|
np.sign(value)
|
numpy.sign
|
import os
import sys
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from config import cfg
from utils import load_data
from capsNet import CapsNet
import pdb
import csv
import pandas as pd
def save_to():
if not os.path.exists(cfg.results):
os.mkdir(cfg.results)
if cfg.is_training:
loss = cfg.results + '/loss.csv'
train_acc = cfg.results + '/train_acc.csv'
val_acc = cfg.results + '/val_acc.csv'
if os.path.exists(val_acc):
os.remove(val_acc)
if os.path.exists(loss):
os.remove(loss)
if os.path.exists(train_acc):
os.remove(train_acc)
fd_train_acc = open(train_acc, 'w')
fd_train_acc.write('step,train_acc\n')
fd_loss = open(loss, 'w')
fd_loss.write('step,loss\n')
fd_val_acc = open(val_acc, 'w')
fd_val_acc.write('step,val_acc\n')
return(fd_train_acc, fd_loss, fd_val_acc)
else:
test_acc = cfg.results + '/test_acc.csv'
if os.path.exists(test_acc):
os.remove(test_acc)
fd_test_acc = open(test_acc, 'w')
fd_test_acc.write('test_acc\n')
return(fd_test_acc)
def train(model, supervisor, num_label):
trX, trY, num_tr_batch, valX, valY, num_val_batch = load_data(cfg.dataset, cfg.batch_size, is_training=True)
if cfg.num_batch is not None:
num_tr_batch = cfg.num_batch
Y = valY[:num_val_batch * cfg.batch_size].reshape((-1, 1))
fd_train_acc, fd_loss, fd_val_acc = save_to()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with supervisor.managed_session(config=config) as sess:
print("\nNote: all of results will be saved to directory: " + cfg.results)
for epoch in range(cfg.epoch):
sys.stdout.write('Training for epoch ' + str(epoch) + '/' + str(cfg.epoch) + ':')
sys.stdout.flush()
if supervisor.should_stop():
print('supervisor stoped!')
break
for step in tqdm(range(num_tr_batch), total=num_tr_batch, ncols=70, leave=False, unit='b'):
start = step * cfg.batch_size
end = start + cfg.batch_size
global_step = epoch * num_tr_batch + step
if global_step % cfg.train_sum_freq == 0:
_, loss, train_acc, summary_str = sess.run([model.train_op, model.total_loss, model.accuracy, model.train_summary])
assert not np.isnan(loss), 'Something wrong! loss is nan...'
supervisor.summary_writer.add_summary(summary_str, global_step)
fd_loss.write(str(global_step) + ',' + str(loss) + "\n")
fd_loss.flush()
fd_train_acc.write(str(global_step) + ',' + str(train_acc / cfg.batch_size) + "\n")
fd_train_acc.flush()
else:
sess.run(model.train_op)
if cfg.val_sum_freq != 0 and (global_step) % cfg.val_sum_freq == 0:
val_acc = 0
for i in range(num_val_batch):
start = i * cfg.batch_size
end = start + cfg.batch_size
acc = sess.run(model.accuracy, {model.X: valX[start:end], model.labels: valY[start:end]})
val_acc += acc
val_acc = val_acc / (cfg.batch_size * num_val_batch)
fd_val_acc.write(str(global_step) + ',' + str(val_acc) + '\n')
fd_val_acc.flush()
if (epoch + 1) % cfg.save_freq == 0:
supervisor.saver.save(sess, cfg.logdir + '/model_epoch_%04d_step_%02d' % (epoch, global_step))
fd_val_acc.close()
fd_train_acc.close()
fd_loss.close()
def evaluation(model, supervisor, num_label):
teX, teY, num_te_batch = load_data(cfg.dataset, cfg.batch_size, is_training=False)
if cfg.num_batch is not None:
num_te_batch = cfg.num_batch
#create the record table
act_table_name = 'routing.csv'
df0 = pd.DataFrame([], columns=['label', 'prediction'] + ['l_'+str(i+1) for i in range(32)] + ['inputId'] + ['rotation'] +\
['cosine_'+str(i+1) for i in range(32)] + ['contribution_'+str(i+1) for i in range(32)] +\
['cosineRef_'+str(i+1) for i in range(32)]
)
df0.to_csv(act_table_name, index=False)
fd_test_acc = save_to()
with supervisor.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
supervisor.saver.restore(sess, tf.train.latest_checkpoint(cfg.logdir))
tf.logging.info('Model restored!')
test_acc = 0
#use the first batch only
start = 0 * cfg.batch_size
end = start + cfg.batch_size
#fist without rotation
radian = np.zeros((cfg.batch_size,))
res = sess.run([model.c_IJ, model.argmax_idx, model.u_hat],
{model.origX: teX[start:end], model.labels: teY[start:end], model.radian: radian})
c_IJ = res[0]
argmax_idx = res[1] #(None,)
u_hat = res[2]
c_I = c_IJ[range(cfg.batch_size), :, argmax_idx].reshape([cfg.batch_size, -1, 32]) #(128, 36, 32)
max_inds = np.argmax(c_I, axis=1) #(None, 32)
temp_inds = np.indices((cfg.batch_size, 32))
inds = (temp_inds[0], max_inds, temp_inds[1])
u_hat_I = u_hat[range(cfg.batch_size), :, argmax_idx].reshape([cfg.batch_size, -1, 32, 16]) #(128, 36, 32, 16)
u_hat_max = u_hat_I[inds]
u_hat_max_ref = u_hat_max
norm_ref = np.linalg.norm(u_hat_max_ref, axis=2)
for i in tqdm(range(num_te_batch), total=num_te_batch, ncols=70, leave=False, unit='b'):
#start = i * cfg.batch_size
#end = start + cfg.batch_size
if i==0:
radian =
|
np.zeros((cfg.batch_size,))
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 23 00:45:04 2020
@author: <NAME>
"""
import numpy as np
import math
'''Beale'''
def F1(x):
x = np.asarray_chkfinite(x)
x1 = x[0]
x2 = x[1]
s = ((1.5 - x1 + x1 * x2) ** 2.0 + (2.25 - x1 + x1 * x2 ** 2.0) ** 2.0+ (2.625 - x1 + x1 * x2 ** 3.0) ** 2.0)
return s
'''Easom'''
def F2(x):
x = np.asarray_chkfinite(x)
term1=-np.cos(x[0])
term2=np.cos(x[1])
term3=np.exp(-1*((np.float_(x[0])-np.pi)**2 + (np.float_(x[1]) - np.pi) ** 2))
s=term1*term2*term3
return s
'''Matyas'''
def F3(x):
#y=0.26*(x(1)^2+x(2)^2)-0.48*x(1)*x(2);
x = np.asarray_chkfinite(x)
x1 = x[0]
x2 = x[1]
s = 0.26 * (np.power(x1,2.0) + np.power(x2,2)) - 0.48 * np.multiply(x1,x2)
#s= 0.26*(np.sum(np.power(x1,2),np.power(x2,2)))-0.48*np.multiply(x1,x2)
return s
'''Powell'''
def F4(x):
x = np.asarray_chkfinite(x)
n = len(x)
n4 = ((n + 3) // 4) * 4
if n < n4:
x = np.append( x, np.zeros( n4 - n ))
x = x.reshape(( 4, -1 )) # 4 rows: x[4i-3] [4i-2] [4i-1] [4i]
term = np.empty_like( x )
term[0] = x[0] + 10 * x[1]
term[1] = np.sqrt(5) * (x[2] - x[3])
term[2] = np.power((x[1] - 2 * x[2]),2)
term[3] = np.sqrt(10) * np.power((x[0] - x[3]),2)
return np.sum( term**2 )
''' Commenting this as it is not Powell function'''
#def F4(x):
# o=np.max(np.abs(x))
# return o
'''Schaffer No.1'''
def F5(x):
x = np.asarray_chkfinite(x)
x1 = x[0]
x2 = x[1]
s = 0.5 + ((np.power(np.sin(np.power(x1,2) - np.power(x2,2)),2) - 0.5))/ (1 + (0.001 * np.power(np.power(x1,2)+ np.power(x2,2),2)))
return s
'''Schaffer No. 3'''
def F6(x):
x = np.asarray_chkfinite(x)
x1 = x[0]
x2 = x[1]
term1 = np.power(np.sin(np.cos(np.power(np.abs(x1),2) - np.power(x2,2))),2) - 0.5
term2 = (1 + 0.001 * (np.power(x1,2) + np.power(x2,2))) **2
s = 0.5 + (term1 / term2)
return s
'''Schaffer No.4 '''
def F7(x):
x = np.asarray_chkfinite(x)
x1 = x[0]
x2 = x[1]
term1 = np.power(np.cos(np.sin(np.power(np.abs(x1),2) - np.power(x2,2))),2) - 0.5
term2 = (1 + 0.001 * (np.power(x1,2) + np.power(x2,2))) **2
s = 0.5 + term1 / term2;
return s
'''Zakhrov'''
def F8(x):
x = np.asarray_chkfinite(x)
n = len(x);
term1 = 0;
term2 = 0;
for i in range(0,n):
term1 = term1 + (np.power(x[i],2))
term2 = term2 + (0.5 * i * x[i])
s = term1 + (np.power(term2,2)) + (np.power(term2,4))
return s
'''Quartic'''
def F9(x):
x = np.asarray_chkfinite(x)
w=[i for i in range(len(x))]
np.add(w,1)
s = np.sum(np.multiply(w,np.power(x,4)) + np.random.uniform(0,1))
return s
'''Schwefel 2.21 -To test'''
def F10(x):
x = np.asarray_chkfinite(x)
w=len(x)
max=0.0
for i in range(0,w):
if abs(x[i])>max:
max= abs(x[i])
return max
'''Schwefel 2.22 -To test'''
def F11(x):
x = np.asarray_chkfinite(x)
term1 = 0.0
term2 = 1.0
w=len(x)
for i in range(w):
term1 += abs(x[i])
term2 *= abs(x[i])
s=term1 + term2
return s
'''sphere'''
def F12( x ):
s = np.asarray_chkfinite(x)
return np.sum( s**2.0 )
'''step2'''
def F13( x ):
x=np.asarray_chkfinite(x)
s=np.sum(np.floor((x+.5))**2)
return s
'''stepint'''
def F14(x):
x=np.asarray_chkfinite(x)
s = np.sum(np.ceil(x)) + 25
return s
'''sumsquares'''
def F15(x):
x=np.asarray_chkfinite(x)
w=len(x)
p=0
for i in range(0,w):
p=p+ np.multiply(i,np.power(x[i],2))
s=p
return s
'''ackley'''
def F16(x):
term1 = 0.0
term2 = 0.0
x=np.asarray_chkfinite(x)
for c in x:
term1 += np.power(c,2.0)
term2 += np.cos(2.0*np.pi*c)
n = float(len(x))
s= -20.0*np.exp(-0.2*np.sqrt(term1/n)) - np.exp(term2/n) + 20 + np.e
return s
'''Bohachevsky no.2'''
def F17(x):
x1 = x[0]
x2 = x[1]
s= (np.power(x1,2)) + (2*np.power(x2,2)) - (0.2* np.cos(0.3*np.pi*x1))*np.cos(4*np.pi*x2) + 0.3
#for x1, x2 in zip(x[:-1], x[1:])),
return s
'''Bohachevsky no.3'''
def F18(x):
x1 = x[0]
x2 = x[1]
s=np.power(x1,2) + 2*np.power(x2,2) - 0.3*np.cos(3*np.pi*x1+ 4*np.pi*x2)
return s
'''Crossintray'''
def F19(x):
x1 = x[0]
x2 = x[1]
# x1=float(x1)
# x2=float(x2)
a=np.sqrt(np.power(x1,2)*np.power(x2,2))
expo = np.abs(100 - (a/np.pi))
inside = np.fabs(np.sin(x1) * np.sin(x2) * expo) + 1
s = (-0.0001) * np.power(inside, 0.1)
return s
'''Griewank'''
def F20(x):
fr=4000
x = np.asarray_chkfinite(x)
n = len(x)
j = np.arange( 1., n+1 )
temp_sum = sum( np.power(x,2))
p = np.prod(np.cos( x / np.sqrt(j) ))
s=temp_sum/fr - p + 1
return s
'''GoldStein-Price'''
def F21(x):
x1 = x[0]
x2 = x[1]
s = (1+ (x1+x2+1)**2.0* (19- 14*x1+ 3 *x1** 2.0- 14*x2+ 6*x1*x2 + 3 *x2**2.0)) * (30+ (2*x1-3*x2)**2.0* (18-32*x1+12*x1**2.0+ 48*x2- 36 * x1*x2+ 27*x2**2.0))
return s
'''<NAME>'''
def F22(x):
x =
|
np.asarray_chkfinite(x)
|
numpy.asarray_chkfinite
|
#
# Created by: <NAME>, September 2002
#
import sys
import subprocess
import time
from functools import reduce
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_allclose, assert_almost_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import numpy as np
from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices,
triu_indices)
from numpy.random import rand, randint, seed
from scipy.linalg import (_flapack as flapack, lapack, inv, svd, cholesky,
solve, ldl, norm, block_diag, qr, eigh)
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
import scipy.sparse as sps
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.blas import get_blas_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def generate_random_dtype_array(shape, dtype):
# generates a random matrix of desired data type of shape
if dtype in COMPLEX_DTYPES:
return (np.random.rand(*shape)
+ np.random.rand(*shape)*1.0j).astype(dtype)
return np.random.rand(*shape).astype(dtype)
def test_lapack_documented():
"""Test that all entries are in the doc."""
if lapack.__doc__ is None: # just in case there is a python -OO
pytest.skip('lapack.__doc__ is None')
names = set(lapack.__doc__.split())
ignore_list = set([
'absolute_import', 'clapack', 'division', 'find_best_lapack_type',
'flapack', 'print_function', 'HAS_ILP64',
])
missing = list()
for name in dir(lapack):
if (not name.startswith('_') and name not in ignore_list and
name not in names):
missing.append(name)
assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list'
class TestFlapackSimple(object):
def test_gebal(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a1 = [[1, 0, 0, 3e-4],
[4, 0, 0, 2e-3],
[7, 1, 0, 0],
[0, 1, 0, 0]]
for p in 'sdzc':
f = getattr(flapack, p+'gebal', None)
if f is None:
continue
ba, lo, hi, pivscale, info = f(a)
assert_(not info, repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo, hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
assert_(not info, repr(info))
# print(a1)
# print(ba, lo, hi, pivscale)
def test_gehrd(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack, p+'gehrd', None)
if f is None:
continue
ht, tau, info = f(a)
assert_(not info, repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(
np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm_str in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0, 0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm_str, a1)
if norm_str in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm_str in 'Mm':
ref = np.max(np.abs(a1))
elif norm_str in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm_str in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
class TestLapack(object):
def test_flapack(self):
if hasattr(flapack, 'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack, 'empty_module'):
# clapack module is empty
pass
class TestLeastSquaresSolvers(object):
def test_gels(self):
seed(1234)
# Test fat/tall matrix argument handling - gh-issue #8329
for ind, dtype in enumerate(DTYPES):
m = 10
n = 20
nrhs = 1
a1 = rand(m, n).astype(dtype)
b1 = rand(n).astype(dtype)
gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype)
# Request of sizes
lwork = _compute_lwork(glslw, m, n, nrhs)
_, _, info = gls(a1, b1, lwork=lwork)
assert_(info >= 0)
_, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork)
assert_(info >= 0)
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
def test_gelsd(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
-1, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
rwork_size = int(rwork)
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
-1, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s,
np.array([13.035514762572043, 4.337666985231382],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
def test_gelss(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([13.035514762572043,
4.337666985231382], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
def test_gelsy(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)])
def test_geqrf_lwork(dtype, shape):
geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrf_lwork(m=m, n=n)
assert_equal(info, 0)
class TestRegression(object):
def test_ticket_1645(self):
# Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
class TestDpotr(object):
def test_gh_2691(self):
# 'lower' argument of dportf/dpotri
for lower in [True, False]:
for clean in [True, False]:
np.random.seed(42)
x = np.random.normal(size=(3, 3))
a = x.dot(x.T)
dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
c, info = dpotrf(a, lower, clean=clean)
dpt = dpotri(c, lower)[0]
if lower:
assert_allclose(np.tril(dpt), np.tril(inv(a)))
else:
assert_allclose(np.triu(dpt), np.triu(inv(a)))
class TestDlasd4(object):
def test_sing_val_update(self):
sigmas = np.array([4., 3., 2., 0])
m_vec = np.array([3.12, 5.7, -4.8, -2.2])
M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
np.zeros((1, len(m_vec) - 1)))),
m_vec[:, np.newaxis]))
SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
check_finite=False)
it_len = len(sigmas)
sgm = np.concatenate((sigmas[::-1], [sigmas[0] + it_len*norm(m_vec)]))
mvc = np.concatenate((m_vec[::-1], (0,)))
lasd4 = get_lapack_funcs('lasd4', (sigmas,))
roots = []
for i in range(0, it_len):
res = lasd4(i, sgm, mvc)
roots.append(res[1])
assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \
the singular value %i" % i)
roots = np.array(roots)[::-1]
assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
rtol=100*np.finfo(np.float64).eps)
class TestTbtrs(object):
@pytest.mark.parametrize('dtype', DTYPES)
def test_nag_example_f07vef_f07vsf(self, dtype):
"""Test real (f07vef) and complex (f07vsf) examples from NAG
Examples available from:
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vef.html
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vsf.html
"""
if dtype in REAL_DTYPES:
ab = np.array([[-4.16, 4.78, 6.32, 0.16],
[-2.25, 5.86, -4.82, 0]],
dtype=dtype)
b = np.array([[-16.64, -4.16],
[-13.78, -16.59],
[13.10, -4.94],
[-14.14, -9.96]],
dtype=dtype)
x_out = np.array([[4, 1],
[-1, -3],
[3, 2],
[2, -2]],
dtype=dtype)
elif dtype in COMPLEX_DTYPES:
ab = np.array([[-1.94+4.43j, 4.12-4.27j, 0.43-2.66j, 0.44+0.1j],
[-3.39+3.44j, -1.84+5.52j, 1.74 - 0.04j, 0],
[1.62+3.68j, -2.77-1.93j, 0, 0]],
dtype=dtype)
b = np.array([[-8.86 - 3.88j, -24.09 - 5.27j],
[-15.57 - 23.41j, -57.97 + 8.14j],
[-7.63 + 22.78j, 19.09 - 29.51j],
[-14.74 - 2.40j, 19.17 + 21.33j]],
dtype=dtype)
x_out = np.array([[2j, 1 + 5j],
[1 - 3j, -7 - 2j],
[-4.001887 - 4.988417j, 3.026830 + 4.003182j],
[1.996158 - 1.045105j, -6.103357 - 8.986653j]],
dtype=dtype)
else:
raise ValueError(f"Datatype {dtype} not understood.")
tbtrs = get_lapack_funcs(('tbtrs'), dtype=dtype)
x, info = tbtrs(ab=ab, b=b, uplo='L')
assert_equal(info, 0)
assert_allclose(x, x_out, rtol=0, atol=1e-5)
@pytest.mark.parametrize('dtype,trans',
[(dtype, trans)
for dtype in DTYPES for trans in ['N', 'T', 'C']
if not (trans == 'C' and dtype in REAL_DTYPES)])
@pytest.mark.parametrize('uplo', ['U', 'L'])
@pytest.mark.parametrize('diag', ['N', 'U'])
def test_random_matrices(self, dtype, trans, uplo, diag):
seed(1724)
# n, nrhs, kd are used to specify A and b.
# A is of shape n x n with kd super/sub-diagonals
# b is of shape n x nrhs matrix
n, nrhs, kd = 4, 3, 2
tbtrs = get_lapack_funcs('tbtrs', dtype=dtype)
is_upper = (uplo == 'U')
ku = kd * is_upper
kl = kd - ku
# Construct the diagonal and kd super/sub diagonals of A with
# the corresponding offsets.
band_offsets = range(ku, -kl - 1, -1)
band_widths = [n - abs(x) for x in band_offsets]
bands = [generate_random_dtype_array((width,), dtype)
for width in band_widths]
if diag == 'U': # A must be unit triangular
bands[ku] = np.ones(n, dtype=dtype)
# Construct the diagonal banded matrix A from the bands and offsets.
a = sps.diags(bands, band_offsets, format='dia')
# Convert A into banded storage form
ab = np.zeros((kd + 1, n), dtype)
for row, k in enumerate(band_offsets):
ab[row, max(k, 0):min(n+k, n)] = a.diagonal(k)
# The RHS values.
b = generate_random_dtype_array((n, nrhs), dtype)
x, info = tbtrs(ab=ab, b=b, uplo=uplo, trans=trans, diag=diag)
assert_equal(info, 0)
if trans == 'N':
assert_allclose(a @ x, b, rtol=5e-5)
elif trans == 'T':
assert_allclose(a.T @ x, b, rtol=5e-5)
elif trans == 'C':
assert_allclose(a.H @ x, b, rtol=5e-5)
else:
raise ValueError('Invalid trans argument')
@pytest.mark.parametrize('uplo,trans,diag',
[['U', 'N', 'Invalid'],
['U', 'Invalid', 'N'],
['Invalid', 'N', 'N']])
def test_invalid_argument_raises_exception(self, uplo, trans, diag):
"""Test if invalid values of uplo, trans and diag raise exceptions"""
# Argument checks occur independently of used datatype.
# This mean we must not parameterize all available datatypes.
tbtrs = get_lapack_funcs('tbtrs', dtype=np.float64)
ab = rand(4, 2)
b = rand(2, 4)
assert_raises(Exception, tbtrs, ab, b, uplo, trans, diag)
def test_zero_element_in_diagonal(self):
"""Test if a matrix with a zero diagonal element is singular
If the i-th diagonal of A is zero, ?tbtrs should return `i` in `info`
indicating the provided matrix is singular.
Note that ?tbtrs requires the matrix A to be stored in banded form.
In this form the diagonal corresponds to the last row."""
ab = np.ones((3, 4), dtype=float)
b = np.ones(4, dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
ab[-1, 3] = 0
_, info = tbtrs(ab=ab, b=b, uplo='U')
assert_equal(info, 4)
@pytest.mark.parametrize('ldab,n,ldb,nrhs', [
(5, 5, 0, 5),
(5, 5, 3, 5)
])
def test_invalid_matrix_shapes(self, ldab, n, ldb, nrhs):
"""Test ?tbtrs fails correctly if shapes are invalid."""
ab =
|
np.ones((ldab, n), dtype=float)
|
numpy.ones
|
import copy
import math
import numpy as np
from sklearn import metrics
import Levenshtein
from misc import matching
from misc import adapted_utils
from misc.constants import UU_FILTER, MU_FILTER, UNC_FILTER, UNC_MAPPING
from misc.utils import BColors
from misc import utils
##
# evaluation utils
def calc_scores(
event_matcher,
matcher,
matching_kwargs,
labels,
multiclass_strategy,
binary_strategy,
meta=(),
unittest=None,
):
"""Calculates multiclass and binary scores for a given matcher"""
# multiclass evaluation scores
result_multiclass = multiclass_eval(
event_matcher=event_matcher,
matcher=matcher,
matching_kwargs=matching_kwargs,
labels=labels,
strategy=multiclass_strategy,
unittest=unittest,
)
result_binary = []
if len(binary_strategy):
result_binary = binary_eval(
event_matcher=event_matcher,
matcher=matcher,
matching_kwargs=matching_kwargs,
labels=labels,
strategy=binary_strategy,
unittest=unittest,
)
result = [*result_multiclass, *result_binary]
# Add meta data
result = [utils.merge_dicts([*meta, _result]) for _result in result]
return result
def calc_nld(event_matcher, method):
"""Interface for Normalized Levenshtein distance"""
if method in ["sample"]:
# sample level nld
gt = event_matcher.data_gt["evt"].values
pr = event_matcher.data_pr["evt"].values
else:
gt = event_matcher.evt_gt["evt"].values
pr = event_matcher.evt_pr["evt"].values
return _calc_nld(gt, pr)
def _calc_nld(gt, pr):
"""Calculates Normalized Levenshtein distance."""
gt_chr = utils.seq2chr(gt)
pr_chr = utils.seq2chr(pr)
_l = len(gt)
_check = (_l == len(gt_chr), len(pr) == len(pr_chr))
assert all(_check)
nld = Levenshtein.distance(gt_chr, pr_chr) / _l
# nld = np.clip(nld, 0.0, 1.0)
return nld
def calc_multiclass_metrics(gt, pr, labels=None, zero_division=None):
"""Calculates multiclass metrics"""
_metrics = ("accuracy", "accuracy_balanced", "kappa", "mcc")
if len(gt):
c = metrics.confusion_matrix(gt, pr, labels=labels)
scores = (
metrics.accuracy_score(gt, pr),
adapted_utils.balanced_accuracy(c),
adapted_utils.cohen_kappa_score(c, zero_division=zero_division),
adapted_utils.matthews_corrcoef(c, zero_division=zero_division),
)
else:
scores = [None] * len(_metrics)
result = {m: s for m, s in zip(_metrics, scores)}
return result
def calc_binary_metrics(gt, pr, zero_division=None):
"""Calculates binary metrics and explicitly handles cases where metrics are undefined.
Metrics:
Accuracy, Balanced accuracy
Precision, Sensitivity (Recall), Specificity, F1-score
IoU (Jaccard Index)
Cohen's Kappa
MCC
ROC AUC
Normalized Levenshtein distance
"""
c = metrics.confusion_matrix(gt, pr, labels=[0, 1])
tn, fp, fn, tp = c.ravel()
# accuracy
accuracy = (tp + tn) / sum((tn, fp, fn, tp))
accuracy_balanced = adapted_utils.balanced_accuracy(c)
# precision, sensitivity, specificity, f1_score
_denom = tp + fp
precision = tp / _denom if _denom > 0 else zero_division
_denom = tp + fn
sensitivity = tp / _denom if _denom > 0 else zero_division
_denom = tn + fp
specificity = tn / _denom if _denom > 0 else zero_division
_denom = 2 * tp + fp + fn
f1_score = 2 * tp / _denom if _denom > 0 else zero_division
# IoU
_denom = tp + fp + fn
iou = tp / _denom if _denom > 0 else zero_division
# Kappa
kappa = adapted_utils.cohen_kappa_score(c, zero_division=zero_division)
# Matthews Correlation Coefficient
_denom = (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)
mcc = (tp * tn - fp * fn) / math.sqrt(_denom) if _denom > 0 else zero_division
# ROC AUC
_auc_check = len(
|
np.unique(gt)
|
numpy.unique
|
# MIT License
#
# Copyright (c) 2018-2019 Tskit Developers
# Copyright (C) 2016 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Test cases for generalized statistic computation.
"""
import io
import unittest
import random
import collections
import itertools
import functools
import contextlib
import numpy as np
import numpy.testing as nt
import msprime
import tskit
import tskit.exceptions as exceptions
import tests.tsutil as tsutil
import tests.test_wright_fisher as wf
np.random.seed(5)
def subset_combos(*args, p=0.5, min_tests=3):
# We have too many tests, combinatorially; so we will run a random subset
# of them, using this function, below. If we don't set a seed, a different
# random set is run each time. Ensures that at least min_tests are run.
# Uncomment this line to run all tests (takes about an hour):
# p = 1.0
num_tests = 0
skipped_tests = []
# total_tests = 0
for x in itertools.product(*args):
# total_tests = total_tests + 1
if np.random.uniform() < p:
num_tests = num_tests + 1
yield x
elif len(skipped_tests) < min_tests:
skipped_tests.append(x)
elif np.random.uniform() < 0.1:
skipped_tests[np.random.randint(min_tests)] = x
while num_tests < min_tests:
yield skipped_tests.pop()
num_tests = num_tests + 1
# print("tests", num_tests, "/", total_tests)
assert num_tests >= min_tests
def naive_general_branch_stats(ts, W, f, windows=None, polarised=False):
n, K = W.shape
if n != ts.num_samples:
raise ValueError("First dimension of W must be number of samples")
# Hack to determine M
M = len(f(W[0]))
total = np.sum(W, axis=0)
sigma = np.zeros((ts.num_trees, M))
for tree in ts.trees():
X = np.zeros((ts.num_nodes, K))
X[ts.samples()] = W
for u in tree.nodes(order="postorder"):
for v in tree.children(u):
X[u] += X[v]
if polarised:
s = sum(tree.branch_length(u) * f(X[u]) for u in tree.nodes())
else:
s = sum(
tree.branch_length(u) * (f(X[u]) + f(total - X[u]))
for u in tree.nodes())
sigma[tree.index] = s * tree.span
if windows is None:
return sigma
else:
bsc = tskit.BranchLengthStatCalculator(ts)
return bsc.windowed_tree_stat(sigma, windows)
def naive_general_site_stats(ts, W, f, windows=None, polarised=False):
n, K = W.shape
if n != ts.num_samples:
raise ValueError("First dimension of W must be number of samples")
# Hack to determine M
M = len(f(W[0]))
sigma = np.zeros((ts.num_sites, M))
for tree in ts.trees():
X = np.zeros((ts.num_nodes, K))
X[ts.samples()] = W
for u in tree.nodes(order="postorder"):
for v in tree.children(u):
X[u] += X[v]
for site in tree.sites():
state_map = collections.defaultdict(functools.partial(np.zeros, K))
state_map[site.ancestral_state] = sum(X[root] for root in tree.roots)
for mutation in site.mutations:
state_map[mutation.derived_state] += X[mutation.node]
if mutation.parent != tskit.NULL:
parent = site.mutations[mutation.parent - site.mutations[0].id]
state_map[parent.derived_state] -= X[mutation.node]
else:
state_map[site.ancestral_state] -= X[mutation.node]
if polarised:
del state_map[site.ancestral_state]
sigma[site.id] += sum(map(f, state_map.values()))
if windows is None:
return sigma
else:
ssc = tskit.SiteStatCalculator(ts)
return ssc.windowed_sitewise_stat(sigma, windows)
def path_length(tr, x, y):
L = 0
if x >= 0 and y >= 0:
mrca = tr.mrca(x, y)
else:
mrca = -1
for u in x, y:
while u != mrca:
L += tr.branch_length(u)
u = tr.parent(u)
return L
@contextlib.contextmanager
def suppress_division_by_zero_warning():
with np.errstate(invalid='ignore', divide='ignore'):
yield
##############################
# Branch general stat algorithms
##############################
def windowed_tree_stat(ts, stat, windows, span_normalise=True):
shape = list(stat.shape)
shape[0] = len(windows) - 1
A = np.zeros(shape)
tree_breakpoints = np.array(list(ts.breakpoints()))
tree_index = 0
for j in range(len(windows) - 1):
w_left = windows[j]
w_right = windows[j + 1]
while True:
t_left = tree_breakpoints[tree_index]
t_right = tree_breakpoints[tree_index + 1]
left = max(t_left, w_left)
right = min(t_right, w_right)
weight = max(0.0, (right - left) / (t_right - t_left))
A[j] += stat[tree_index] * weight
assert left != right
if t_right <= w_right:
tree_index += 1
# TODO This is inelegant - should include this in the case below
if t_right == w_right:
break
else:
break
if span_normalise:
# re-normalize by window lengths
window_lengths = np.diff(windows)
for j in range(len(windows) - 1):
A[j] /= window_lengths[j]
return A
def naive_branch_general_stat(ts, w, f, windows=None, polarised=False,
span_normalise=True):
if windows is None:
windows = [0.0, ts.sequence_length]
n, k = w.shape
# hack to determine m
m = len(f(w[0]))
total = np.sum(w, axis=0)
sigma = np.zeros((ts.num_trees, m))
for tree in ts.trees():
x = np.zeros((ts.num_nodes, k))
x[ts.samples()] = w
for u in tree.nodes(order="postorder"):
for v in tree.children(u):
x[u] += x[v]
if polarised:
s = sum(tree.branch_length(u) * f(x[u]) for u in tree.nodes())
else:
s = sum(
tree.branch_length(u) * (f(x[u]) + f(total - x[u]))
for u in tree.nodes())
sigma[tree.index] = s * tree.span
if isinstance(windows, str) and windows == "trees":
# need to average across the windows
if span_normalise:
for j, tree in enumerate(ts.trees()):
sigma[j] /= tree.span
return sigma
else:
return windowed_tree_stat(ts, sigma, windows, span_normalise=span_normalise)
def branch_general_stat(ts, sample_weights, summary_func, windows=None,
polarised=False, span_normalise=True):
"""
Efficient implementation of the algorithm used as the basis for the
underlying C version.
"""
n, state_dim = sample_weights.shape
windows = ts.parse_windows(windows)
num_windows = windows.shape[0] - 1
# Determine result_dim
result_dim = len(summary_func(sample_weights[0]))
result = np.zeros((num_windows, result_dim))
state = np.zeros((ts.num_nodes, state_dim))
state[ts.samples()] = sample_weights
total_weight = np.sum(sample_weights, axis=0)
def area_weighted_summary(u):
v = parent[u]
branch_length = 0
if v != -1:
branch_length = time[v] - time[u]
s = summary_func(state[u])
if not polarised:
s += summary_func(total_weight - state[u])
return branch_length * s
tree_index = 0
window_index = 0
time = ts.tables.nodes.time
parent = np.zeros(ts.num_nodes, dtype=np.int32) - 1
running_sum = np.zeros(result_dim)
for (t_left, t_right), edges_out, edges_in in ts.edge_diffs():
for edge in edges_out:
u = edge.child
running_sum -= area_weighted_summary(u)
u = edge.parent
while u != -1:
running_sum -= area_weighted_summary(u)
state[u] -= state[edge.child]
running_sum += area_weighted_summary(u)
u = parent[u]
parent[edge.child] = -1
for edge in edges_in:
parent[edge.child] = edge.parent
u = edge.child
running_sum += area_weighted_summary(u)
u = edge.parent
while u != -1:
running_sum -= area_weighted_summary(u)
state[u] += state[edge.child]
running_sum += area_weighted_summary(u)
u = parent[u]
# Update the windows
assert window_index < num_windows
while windows[window_index] < t_right:
w_left = windows[window_index]
w_right = windows[window_index + 1]
left = max(t_left, w_left)
right = min(t_right, w_right)
weight = right - left
assert weight > 0
result[window_index] += running_sum * weight
if w_right <= t_right:
window_index += 1
else:
# This interval crosses a tree boundary, so we update it again in the
# for the next tree
break
tree_index += 1
# print("window_index:", window_index, windows.shape)
assert window_index == windows.shape[0] - 1
if span_normalise:
for j in range(num_windows):
result[j] /= windows[j + 1] - windows[j]
return result
##############################
# Site general stat algorithms
##############################
def windowed_sitewise_stat(ts, sigma, windows, span_normalise=True):
M = sigma.shape[1]
A = np.zeros((len(windows) - 1, M))
window = 0
for site in ts.sites():
while windows[window + 1] <= site.position:
window += 1
assert windows[window] <= site.position < windows[window + 1]
A[window] += sigma[site.id]
if span_normalise:
diff = np.zeros((A.shape[0], 1))
diff[:, 0] = np.diff(windows).T
A /= diff
return A
def naive_site_general_stat(ts, W, f, windows=None, polarised=False,
span_normalise=True):
n, K = W.shape
# Hack to determine M
M = len(f(W[0]))
sigma = np.zeros((ts.num_sites, M))
for tree in ts.trees():
X = np.zeros((ts.num_nodes, K))
X[ts.samples()] = W
for u in tree.nodes(order="postorder"):
for v in tree.children(u):
X[u] += X[v]
for site in tree.sites():
state_map = collections.defaultdict(functools.partial(np.zeros, K))
state_map[site.ancestral_state] = sum(X[root] for root in tree.roots)
for mutation in site.mutations:
state_map[mutation.derived_state] += X[mutation.node]
if mutation.parent != tskit.NULL:
parent = site.mutations[mutation.parent - site.mutations[0].id]
state_map[parent.derived_state] -= X[mutation.node]
else:
state_map[site.ancestral_state] -= X[mutation.node]
if polarised:
del state_map[site.ancestral_state]
sigma[site.id] += sum(map(f, state_map.values()))
return windowed_sitewise_stat(
ts, sigma, ts.parse_windows(windows),
span_normalise=span_normalise)
def site_general_stat(ts, sample_weights, summary_func, windows=None, polarised=False,
span_normalise=True):
"""
Problem: 'sites' is different that the other windowing options
because if we output by site we don't want to normalize by length of the window.
Solution: we pass an argument "normalize", to the windowing function.
"""
windows = ts.parse_windows(windows)
num_windows = windows.shape[0] - 1
n, state_dim = sample_weights.shape
# Determine result_dim
result_dim, = summary_func(sample_weights[0]).shape
result = np.zeros((num_windows, result_dim))
state = np.zeros((ts.num_nodes, state_dim))
state[ts.samples()] = sample_weights
total_weight = np.sum(sample_weights, axis=0)
site_index = 0
mutation_index = 0
window_index = 0
sites = ts.tables.sites
mutations = ts.tables.mutations
parent = np.zeros(ts.num_nodes, dtype=np.int32) - 1
for (left, right), edges_out, edges_in in ts.edge_diffs():
for edge in edges_out:
u = edge.parent
while u != -1:
state[u] -= state[edge.child]
u = parent[u]
parent[edge.child] = -1
for edge in edges_in:
parent[edge.child] = edge.parent
u = edge.parent
while u != -1:
state[u] += state[edge.child]
u = parent[u]
while site_index < len(sites) and sites.position[site_index] < right:
assert left <= sites.position[site_index]
ancestral_state = sites[site_index].ancestral_state
allele_state = collections.defaultdict(
functools.partial(np.zeros, state_dim))
allele_state[ancestral_state][:] = total_weight
while (
mutation_index < len(mutations)
and mutations[mutation_index].site == site_index):
mutation = mutations[mutation_index]
allele_state[mutation.derived_state] += state[mutation.node]
if mutation.parent != -1:
parent_allele = mutations[mutation.parent].derived_state
allele_state[parent_allele] -= state[mutation.node]
else:
allele_state[ancestral_state] -= state[mutation.node]
mutation_index += 1
if polarised:
del allele_state[ancestral_state]
site_result = np.zeros(result_dim)
for allele, value in allele_state.items():
site_result += summary_func(value)
pos = sites.position[site_index]
while windows[window_index + 1] <= pos:
window_index += 1
assert windows[window_index] <= pos < windows[window_index + 1]
result[window_index] += site_result
site_index += 1
if span_normalise:
for j in range(num_windows):
span = windows[j + 1] - windows[j]
result[j] /= span
return result
##############################
# Node general stat algorithms
##############################
def naive_node_general_stat(ts, W, f, windows=None, polarised=False,
span_normalise=True):
windows = ts.parse_windows(windows)
n, K = W.shape
M = f(W[0]).shape[0]
total = np.sum(W, axis=0)
sigma = np.zeros((ts.num_trees, ts.num_nodes, M))
for tree in ts.trees():
X = np.zeros((ts.num_nodes, K))
X[ts.samples()] = W
for u in tree.nodes(order="postorder"):
for v in tree.children(u):
X[u] += X[v]
s = np.zeros((ts.num_nodes, M))
for u in range(ts.num_nodes):
s[u] = f(X[u])
if not polarised:
s[u] += f(total - X[u])
sigma[tree.index] = s * tree.span
return windowed_tree_stat(ts, sigma, windows, span_normalise=span_normalise)
def node_general_stat(ts, sample_weights, summary_func, windows=None, polarised=False,
span_normalise=True):
"""
Efficient implementation of the algorithm used as the basis for the
underlying C version.
"""
n, state_dim = sample_weights.shape
windows = ts.parse_windows(windows)
num_windows = windows.shape[0] - 1
result_dim = summary_func(sample_weights[0]).shape[0]
result = np.zeros((num_windows, ts.num_nodes, result_dim))
state = np.zeros((ts.num_nodes, state_dim))
state[ts.samples()] = sample_weights
total_weight = np.sum(sample_weights, axis=0)
def node_summary(u):
s = summary_func(state[u])
if not polarised:
s += summary_func(total_weight - state[u])
return s
tree_index = 0
window_index = 0
parent = np.zeros(ts.num_nodes, dtype=np.int32) - 1
# contains summary_func(state[u]) for each node
current_values = np.zeros((ts.num_nodes, result_dim))
for u in range(ts.num_nodes):
current_values[u] = node_summary(u)
# contains the location of the last time we updated the output for a node.
last_update = np.zeros((ts.num_nodes, 1))
for (t_left, t_right), edges_out, edges_in in ts.edge_diffs():
for edge in edges_out:
u = edge.child
v = edge.parent
while v != -1:
result[window_index, v] += (t_left - last_update[v]) * current_values[v]
last_update[v] = t_left
state[v] -= state[u]
current_values[v] = node_summary(v)
v = parent[v]
parent[u] = -1
for edge in edges_in:
u = edge.child
v = edge.parent
parent[u] = v
while v != -1:
result[window_index, v] += (t_left - last_update[v]) * current_values[v]
last_update[v] = t_left
state[v] += state[u]
current_values[v] = node_summary(v)
v = parent[v]
# Update the windows
while window_index < num_windows and windows[window_index + 1] <= t_right:
w_right = windows[window_index + 1]
# Flush the contribution of all nodes to the current window.
for u in range(ts.num_nodes):
result[window_index, u] += (w_right - last_update[u]) * current_values[u]
last_update[u] = w_right
window_index += 1
tree_index += 1
assert window_index == windows.shape[0] - 1
if span_normalise:
for j in range(num_windows):
result[j] /= windows[j + 1] - windows[j]
return result
def general_stat(
ts, sample_weights, summary_func, windows=None, polarised=False,
mode="site", span_normalise=True):
"""
General iterface for algorithms above. Directly corresponds to the interface
for TreeSequence.general_stat.
"""
method_map = {
"site": site_general_stat,
"node": node_general_stat,
"branch": branch_general_stat}
return method_map[mode](
ts, sample_weights, summary_func, windows=windows, polarised=polarised,
span_normalise=span_normalise)
def upper_tri_to_matrix(x):
"""
Given x, a vector of entries of the upper triangle of a matrix
in row-major order, including the diagonal, return the corresponding matrix.
"""
# n^2 + n = 2 u => n = (-1 + sqrt(1 + 8*u))/2
n = int((np.sqrt(1 + 8 * len(x)) - 1)/2.0)
out = np.ones((n, n))
k = 0
for i in range(n):
for j in range(i, n):
out[i, j] = out[j, i] = x[k]
k += 1
return out
##################################
# Test cases
##################################
class StatsTestCase(unittest.TestCase):
"""
Provides convenience functions.
"""
def assertListAlmostEqual(self, x, y):
self.assertEqual(len(x), len(y))
for a, b in zip(x, y):
self.assertAlmostEqual(a, b)
def assertArrayEqual(self, x, y):
nt.assert_equal(x, y)
def assertArrayAlmostEqual(self, x, y):
nt.assert_array_almost_equal(x, y)
class TopologyExamplesMixin(object):
"""
Defines a set of test cases on different example tree sequence topologies.
Derived classes need to define a 'verify' function which will perform the
actual tests.
"""
def test_single_tree(self):
ts = msprime.simulate(6, random_seed=1)
self.verify(ts)
def test_many_trees(self):
ts = msprime.simulate(6, recombination_rate=2, random_seed=1)
self.assertGreater(ts.num_trees, 2)
self.verify(ts)
def test_many_trees_sequence_length(self):
for L in [0.5, 3.3333]:
ts = msprime.simulate(6, length=L, recombination_rate=2, random_seed=1)
self.verify(ts)
def test_wright_fisher_unsimplified(self):
tables = wf.wf_sim(
4, 5, seed=1, deep_history=True, initial_generation_samples=False,
num_loci=5)
tables.sort()
ts = tables.tree_sequence()
self.verify(ts)
def test_wright_fisher_initial_generation(self):
tables = wf.wf_sim(
6, 5, seed=3, deep_history=True, initial_generation_samples=True,
num_loci=2)
tables.sort()
tables.simplify()
ts = tables.tree_sequence()
self.verify(ts)
def test_wright_fisher_initial_generation_no_deep_history(self):
tables = wf.wf_sim(
6, 15, seed=202, deep_history=False, initial_generation_samples=True,
num_loci=5)
tables.sort()
tables.simplify()
ts = tables.tree_sequence()
self.verify(ts)
def test_wright_fisher_unsimplified_multiple_roots(self):
tables = wf.wf_sim(
6, 5, seed=1, deep_history=False, initial_generation_samples=False,
num_loci=4)
tables.sort()
ts = tables.tree_sequence()
self.verify(ts)
def test_wright_fisher_simplified(self):
tables = wf.wf_sim(
5, 8, seed=1, deep_history=True, initial_generation_samples=False,
num_loci=5)
tables.sort()
ts = tables.tree_sequence().simplify()
self.verify(ts)
def test_wright_fisher_simplified_multiple_roots(self):
tables = wf.wf_sim(
6, 8, seed=1, deep_history=False, initial_generation_samples=False,
num_loci=3)
tables.sort()
ts = tables.tree_sequence().simplify()
self.verify(ts)
def test_empty_ts(self):
tables = tskit.TableCollection(1.0)
tables.nodes.add_row(1, 0)
tables.nodes.add_row(1, 0)
ts = tables.tree_sequence()
self.verify(ts)
class MutatedTopologyExamplesMixin(object):
"""
Defines a set of test cases on different example tree sequence topologies.
Derived classes need to define a 'verify' function which will perform the
actual tests.
"""
def test_single_tree_no_sites(self):
ts = msprime.simulate(6, random_seed=1)
self.assertEqual(ts.num_sites, 0)
self.verify(ts)
def test_single_tree_infinite_sites(self):
ts = msprime.simulate(6, random_seed=1, mutation_rate=1)
self.assertGreater(ts.num_sites, 0)
self.verify(ts)
def test_single_tree_sites_no_mutations(self):
ts = msprime.simulate(6, random_seed=1)
tables = ts.dump_tables()
tables.sites.add_row(0.1, "a")
tables.sites.add_row(0.2, "aaa")
self.verify(tables.tree_sequence())
def test_single_tree_jukes_cantor(self):
ts = msprime.simulate(6, random_seed=1, mutation_rate=1)
ts = tsutil.jukes_cantor(ts, 20, 1, seed=10)
self.verify(ts)
def test_single_tree_multichar_mutations(self):
ts = msprime.simulate(6, random_seed=1, mutation_rate=1)
ts = tsutil.insert_multichar_mutations(ts)
self.verify(ts)
def test_many_trees_infinite_sites(self):
ts = msprime.simulate(6, recombination_rate=2, mutation_rate=2, random_seed=1)
self.assertGreater(ts.num_sites, 0)
self.assertGreater(ts.num_trees, 2)
self.verify(ts)
def test_many_trees_sequence_length_infinite_sites(self):
for L in [0.5, 1.5, 3.3333]:
ts = msprime.simulate(
6, length=L, recombination_rate=2, mutation_rate=1, random_seed=1)
self.verify(ts)
def test_wright_fisher_unsimplified(self):
tables = wf.wf_sim(
4, 5, seed=1, deep_history=True, initial_generation_samples=False,
num_loci=10)
tables.sort()
ts = msprime.mutate(tables.tree_sequence(), rate=0.05, random_seed=234)
self.assertGreater(ts.num_sites, 0)
self.verify(ts)
def test_wright_fisher_initial_generation(self):
tables = wf.wf_sim(
6, 5, seed=3, deep_history=True, initial_generation_samples=True,
num_loci=2)
tables.sort()
tables.simplify()
ts = msprime.mutate(tables.tree_sequence(), rate=0.08, random_seed=2)
self.assertGreater(ts.num_sites, 0)
self.verify(ts)
def test_wright_fisher_initial_generation_no_deep_history(self):
tables = wf.wf_sim(
7, 15, seed=202, deep_history=False, initial_generation_samples=True,
num_loci=5)
tables.sort()
tables.simplify()
ts = msprime.mutate(tables.tree_sequence(), rate=0.01, random_seed=2)
self.assertGreater(ts.num_sites, 0)
self.verify(ts)
def test_wright_fisher_unsimplified_multiple_roots(self):
tables = wf.wf_sim(
8, 15, seed=1, deep_history=False, initial_generation_samples=False,
num_loci=20)
tables.sort()
ts = msprime.mutate(tables.tree_sequence(), rate=0.006, random_seed=2)
self.assertGreater(ts.num_sites, 0)
self.verify(ts)
def test_wright_fisher_simplified(self):
tables = wf.wf_sim(
9, 10, seed=1, deep_history=True, initial_generation_samples=False,
num_loci=5)
tables.sort()
ts = tables.tree_sequence().simplify()
ts = msprime.mutate(ts, rate=0.01, random_seed=1234)
self.assertGreater(ts.num_sites, 0)
self.verify(ts)
def test_empty_ts(self):
tables = tskit.TableCollection(1.0)
for _ in range(10):
tables.nodes.add_row(tskit.NODE_IS_SAMPLE, 0)
ts = tables.tree_sequence()
self.verify(ts)
def example_sample_sets(ts, min_size=1):
"""
Generate a series of example sample sets from the specfied tree sequence.
"""
samples = ts.samples()
yield [[u] for u in samples]
if ts.num_samples >= min_size:
splits = np.array_split(samples, min_size)
yield splits
yield splits[::-1]
def example_sample_set_index_pairs(sample_sets):
k = len(sample_sets)
if k > 1:
yield [(0, 1)]
yield [(1, 0), (0, 1)]
if k > 2:
yield [(0, 1), (1, 2), (0, 2)]
def example_sample_set_index_triples(sample_sets):
k = len(sample_sets)
if k > 2:
yield [(0, 1, 2)]
yield [(0, 2, 1), (2, 1, 0)]
if k > 3:
yield [(3, 0, 1), (0, 2, 3), (1, 2, 3)]
def example_sample_set_index_quads(sample_sets):
k = len(sample_sets)
if k > 3:
yield [(0, 1, 2, 3)]
yield [(0, 1, 2, 3), (3, 2, 1, 0)]
yield [(0, 1, 2, 3), (3, 2, 1, 0), (1, 2, 3, 0)]
def example_windows(ts):
"""
Generate a series of example windows for the specified tree sequence.
"""
L = ts.sequence_length
yield None
yield "sites"
yield "trees"
yield [0, L]
yield ts.breakpoints(as_array=True)
yield np.linspace(0, L, num=10)
class WeightStatsMixin(object):
"""
Implements the verify method and dispatches it to verify_weighted_stat
for a representative set of sample sets and windows.
"""
def example_weights(self, ts, min_size=1):
"""
Generate a series of example weights from the specfied tree sequence.
"""
for k in [min_size, min_size + 1, min_size + 10]:
W = 1.0 + np.zeros((ts.num_samples, k))
W[0, :] = 2.0
yield W
for j in range(k):
W[:, j] = np.random.exponential(1, ts.num_samples)
yield W
for j in range(k):
W[:, j] = np.random.normal(0, 1, ts.num_samples)
yield W
def transform_weights(self, W):
"""
Specific methods will need to transform weights
before passing them to general_stat.
"""
return W
def verify(self, ts):
for W, windows in subset_combos(
self.example_weights(ts), example_windows(ts), p=0.1):
self.verify_weighted_stat(ts, W, windows=windows)
def verify_definition(
self, ts, W, windows, summary_func, ts_method, definition):
# general_stat will need an extra column for p
gW = self.transform_weights(W)
def wrapped_summary_func(x):
with suppress_division_by_zero_warning():
return summary_func(x)
for sn in [True, False]:
sigma1 = ts.general_stat(gW, wrapped_summary_func, windows, mode=self.mode,
span_normalise=sn)
sigma2 = general_stat(ts, gW, wrapped_summary_func, windows, mode=self.mode,
span_normalise=sn)
sigma3 = ts_method(W, windows=windows, mode=self.mode,
span_normalise=sn)
sigma4 = definition(ts, W, windows=windows, mode=self.mode,
span_normalise=sn)
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertEqual(sigma1.shape, sigma3.shape)
self.assertEqual(sigma1.shape, sigma4.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
self.assertArrayAlmostEqual(sigma1, sigma3)
self.assertArrayAlmostEqual(sigma1, sigma4)
class SampleSetStatsMixin(object):
"""
Implements the verify method and dispatches it to verify_sample_sets
for a representative set of sample sets and windows.
"""
def verify(self, ts):
for sample_sets, windows in subset_combos(
example_sample_sets(ts), example_windows(ts)):
self.verify_sample_sets(ts, sample_sets, windows=windows)
def verify_definition(
self, ts, sample_sets, windows, summary_func, ts_method, definition):
W = np.array(
[[u in A for A in sample_sets] for u in ts.samples()], dtype=float)
def wrapped_summary_func(x):
with suppress_division_by_zero_warning():
return summary_func(x)
for sn in [True, False]:
sigma1 = ts.general_stat(W, wrapped_summary_func, windows, mode=self.mode,
span_normalise=sn)
sigma2 = general_stat(ts, W, wrapped_summary_func, windows, mode=self.mode,
span_normalise=sn)
sigma3 = ts_method(sample_sets, windows=windows, mode=self.mode,
span_normalise=sn)
sigma4 = definition(ts, sample_sets, windows=windows, mode=self.mode,
span_normalise=sn)
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertEqual(sigma1.shape, sigma3.shape)
self.assertEqual(sigma1.shape, sigma4.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
self.assertArrayAlmostEqual(sigma1, sigma3)
self.assertArrayAlmostEqual(sigma1, sigma4)
class KWaySampleSetStatsMixin(SampleSetStatsMixin):
"""
Defines the verify definition method, which comparse the results from
several different ways of defining and computing the same statistic.
"""
def verify_definition(
self, ts, sample_sets, indexes, windows, summary_func, ts_method,
definition):
def wrapped_summary_func(x):
with suppress_division_by_zero_warning():
return summary_func(x)
W = np.array(
[[u in A for A in sample_sets] for u in ts.samples()], dtype=float)
sigma1 = ts.general_stat(W, wrapped_summary_func, windows, mode=self.mode)
sigma2 = general_stat(ts, W, wrapped_summary_func, windows, mode=self.mode)
sigma3 = ts_method(
sample_sets, indexes=indexes, windows=windows, mode=self.mode)
sigma4 = definition(
ts, sample_sets, indexes=indexes, windows=windows, mode=self.mode)
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertEqual(sigma1.shape, sigma3.shape)
self.assertEqual(sigma1.shape, sigma4.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
self.assertArrayAlmostEqual(sigma1, sigma3)
self.assertArrayAlmostEqual(sigma1, sigma4)
class TwoWaySampleSetStatsMixin(KWaySampleSetStatsMixin):
"""
Implements the verify method and dispatches it to verify_sample_sets_indexes,
which gives a representative sample of sample set indexes.
"""
def verify(self, ts):
for sample_sets, windows in subset_combos(
example_sample_sets(ts, min_size=2), example_windows(ts)):
for indexes in example_sample_set_index_pairs(sample_sets):
self.verify_sample_sets_indexes(ts, sample_sets, indexes, windows)
class ThreeWaySampleSetStatsMixin(KWaySampleSetStatsMixin):
"""
Implements the verify method and dispatches it to verify_sample_sets_indexes,
which gives a representative sample of sample set indexes.
"""
def verify(self, ts):
for sample_sets, windows in subset_combos(
example_sample_sets(ts, min_size=3), example_windows(ts)):
for indexes in example_sample_set_index_triples(sample_sets):
self.verify_sample_sets_indexes(ts, sample_sets, indexes, windows)
class FourWaySampleSetStatsMixin(KWaySampleSetStatsMixin):
"""
Implements the verify method and dispatches it to verify_sample_sets_indexes,
which gives a representative sample of sample set indexes.
"""
def verify(self, ts):
for sample_sets, windows in subset_combos(
example_sample_sets(ts, min_size=4), example_windows(ts)):
for indexes in example_sample_set_index_quads(sample_sets):
self.verify_sample_sets_indexes(ts, sample_sets, indexes, windows)
############################################
# Diversity
############################################
def site_diversity(ts, sample_sets, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(sample_sets)))
samples = ts.samples()
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
haps = ts.genotype_matrix().T
site_positions = [x.position for x in ts.sites()]
for i, X in enumerate(sample_sets):
S = 0
site_in_window = False
denom = np.float64(len(X) * (len(X) - 1))
for k in range(ts.num_sites):
if (site_positions[k] >= begin) and (site_positions[k] < end):
site_in_window = True
for x in X:
for y in set(X) - set([x]):
x_index = np.where(samples == x)[0][0]
y_index = np.where(samples == y)[0][0]
if haps[x_index][k] != haps[y_index][k]:
# x|y
S += 1
if site_in_window:
with suppress_division_by_zero_warning():
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def branch_diversity(ts, sample_sets, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(sample_sets)))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i, X in enumerate(sample_sets):
S = 0
denom = np.float64(len(X) * (len(X) - 1))
has_trees = False
for tr in ts.trees():
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
if tr.total_branch_length > 0:
has_trees = True
SS = 0
for x in X:
for y in set(X) - set([x]):
SS += path_length(tr, x, y)
S += SS*(min(end, tr.interval[1]) - max(begin, tr.interval[0]))
if has_trees:
with suppress_division_by_zero_warning():
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def node_diversity(ts, sample_sets, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
K = len(sample_sets)
out = np.zeros((len(windows) - 1, ts.num_nodes, K))
for k in range(K):
X = sample_sets[k]
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
tX = len(X)
denom = np.float64(len(X) * (len(X) - 1))
S = np.zeros(ts.num_nodes)
for tr in ts.trees(tracked_samples=X):
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
SS = np.zeros(ts.num_nodes)
for u in tr.nodes():
# count number of pairwise paths going through u
n = tr.num_tracked_samples(u)
SS[u] += 2 * n * (tX - n)
S += SS*(min(end, tr.interval[1]) - max(begin, tr.interval[0]))
with suppress_division_by_zero_warning():
out[j, :, k] = S / denom
if span_normalise:
out[j, :, k] /= (end - begin)
return out
def diversity(ts, sample_sets, windows=None, mode="site", span_normalise=True):
"""
Computes average pairwise diversity between two random choices from x
over the window specified.
"""
method_map = {
"site": site_diversity,
"node": node_diversity,
"branch": branch_diversity}
return method_map[mode](ts, sample_sets, windows=windows,
span_normalise=span_normalise)
class TestDiversity(StatsTestCase, SampleSetStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def verify_sample_sets(self, ts, sample_sets, windows):
# print("verify", ts, sample_sets, windows)
n = np.array([len(x) for x in sample_sets])
def f(x):
return x * (n - x) / (n * (n - 1))
self.verify_definition(
ts, sample_sets, windows, f, ts.diversity, diversity)
class TestBranchDiversity(TestDiversity, TopologyExamplesMixin):
mode = "branch"
class TestNodeDiversity(TestDiversity, TopologyExamplesMixin):
mode = "node"
class TestSiteDiversity(TestDiversity, MutatedTopologyExamplesMixin):
mode = "site"
############################################
# Segregating sites
############################################
def site_segregating_sites(ts, sample_sets, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(sample_sets)))
samples = ts.samples()
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
haps = ts.genotype_matrix()
site_positions = [x.position for x in ts.sites()]
for i, X in enumerate(sample_sets):
X_index = np.where(np.in1d(X, samples))[0]
for k in range(ts.num_sites):
if (site_positions[k] >= begin) and (site_positions[k] < end):
num_alleles = len(set(haps[k, X_index]))
out[j][i] += (num_alleles - 1)
if span_normalise:
out[j][i] /= (end - begin)
return out
def branch_segregating_sites(ts, sample_sets, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(sample_sets)))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i, X in enumerate(sample_sets):
tX = len(X)
for tr in ts.trees(tracked_samples=X):
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
SS = 0
for u in tr.nodes():
nX = tr.num_tracked_samples(u)
if nX > 0 and nX < tX:
SS += tr.branch_length(u)
out[j][i] += SS*(min(end, tr.interval[1]) - max(begin, tr.interval[0]))
if span_normalise:
out[j][i] /= (end - begin)
return out
def node_segregating_sites(ts, sample_sets, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
K = len(sample_sets)
out = np.zeros((len(windows) - 1, ts.num_nodes, K))
for k in range(K):
X = sample_sets[k]
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
tX = len(X)
S = np.zeros(ts.num_nodes)
for tr in ts.trees(tracked_samples=X):
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
SS = np.zeros(ts.num_nodes)
for u in tr.nodes():
nX = tr.num_tracked_samples(u)
SS[u] = (nX > 0) and (nX < tX)
S += SS*(min(end, tr.interval[1]) - max(begin, tr.interval[0]))
out[j, :, k] = S
if span_normalise:
out[j, :, k] /= (end - begin)
return out
def segregating_sites(ts, sample_sets, windows=None, mode="site", span_normalise=True):
"""
Computes the density of segregating sites over the window specified.
"""
method_map = {
"site": site_segregating_sites,
"node": node_segregating_sites,
"branch": branch_segregating_sites}
return method_map[mode](ts, sample_sets, windows=windows,
span_normalise=span_normalise)
class TestSegregatingSites(StatsTestCase, SampleSetStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def verify_sample_sets(self, ts, sample_sets, windows):
n = np.array([len(x) for x in sample_sets])
# this works because sum_{i=1}^k (1-p_i) = k-1
def f(x):
return (x > 0) * (1 - x / n)
self.verify_definition(
ts, sample_sets, windows, f, ts.segregating_sites, segregating_sites)
class TestBranchSegregatingSites(TestSegregatingSites, TopologyExamplesMixin):
mode = "branch"
class TestNodeSegregatingSites(TestSegregatingSites, TopologyExamplesMixin):
mode = "node"
class TestSiteSegregatingSites(TestSegregatingSites, MutatedTopologyExamplesMixin):
mode = "site"
############################################
# Tajima's D
############################################
def site_tajimas_d(ts, sample_sets, windows=None):
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(sample_sets)))
samples = ts.samples()
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
haps = ts.genotype_matrix()
site_positions = [x.position for x in ts.sites()]
n = np.array([len(X) for X in sample_sets])
for i, X in enumerate(sample_sets):
nn = n[i]
S = 0
T = 0
X_index = np.where(np.in1d(X, samples))[0]
for k in range(ts.num_sites):
if (site_positions[k] >= begin) and (site_positions[k] < end):
hX = haps[k, X_index]
alleles = set(hX)
num_alleles = len(alleles)
n_alleles = [np.sum(hX == a) for a in alleles]
S += (num_alleles - 1)
for k in n_alleles:
with suppress_division_by_zero_warning():
T += k * (nn - k) / (nn * (nn - 1))
with suppress_division_by_zero_warning():
a1 = np.sum(1/np.arange(1, nn)) # this is h in the main version
a2 = np.sum(1/np.arange(1, nn)**2) # this is g
b1 = (nn+1)/(3*(nn-1))
b2 = 2 * (nn**2 + nn + 3) / (9 * nn * (nn-1))
c1 = b1 - 1/a1
c2 = b2 - (nn + 2)/(a1 * nn) + a2 / a1**2
e1 = c1 / a1 # this is a
e2 = c2 / (a1**2 + a2) # this is b
out[j][i] = (T - S/a1) / np.sqrt(e1*S + e2*S*(S-1))
return out
def tajimas_d(ts, sample_sets, windows=None, mode="site", span_normalise=True):
method_map = {
"site": site_tajimas_d}
return method_map[mode](ts, sample_sets, windows=windows,
span_normalise=span_normalise)
class TestTajimasD(StatsTestCase, SampleSetStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def verify(self, ts):
# only check per-site
for sample_sets in example_sample_sets(ts, min_size=1):
self.verify_persite_tajimas_d(ts, sample_sets)
def get_windows(self, ts):
yield None
yield "sites"
yield [0, ts.sequence_length]
yield np.arange(0, 1.1, 0.1) * ts.sequence_length
def verify_persite_tajimas_d(self, ts, sample_sets):
for windows in self.get_windows(ts):
sigma1 = ts.Tajimas_D(sample_sets, windows=windows, mode=self.mode)
sigma2 = site_tajimas_d(ts, sample_sets, windows=windows)
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
class TestSiteTajimasD(TestTajimasD, MutatedTopologyExamplesMixin):
mode = "site"
############################################
# Y1
############################################
def branch_Y1(ts, sample_sets, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(sample_sets)))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i, X in enumerate(sample_sets):
S = 0
denom = np.float64(len(X) * (len(X)-1) * (len(X)-2))
has_trees = False
for tr in ts.trees():
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
if tr.total_branch_length > 0:
has_trees = True
this_length = min(end, tr.interval[1]) - max(begin, tr.interval[0])
for x in X:
for y in set(X) - {x}:
for z in set(X) - {x, y}:
xy_mrca = tr.mrca(x, y)
xz_mrca = tr.mrca(x, z)
yz_mrca = tr.mrca(y, z)
if xy_mrca == xz_mrca:
# /\
# / /\
# x y z
S += path_length(tr, x, yz_mrca) * this_length
elif xy_mrca == yz_mrca:
# /\
# / /\
# y x z
S += path_length(tr, x, xz_mrca) * this_length
elif xz_mrca == yz_mrca:
# /\
# / /\
# z x y
S += path_length(tr, x, xy_mrca) * this_length
if has_trees:
with suppress_division_by_zero_warning():
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def site_Y1(ts, sample_sets, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(sample_sets)))
samples = ts.samples()
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
haps = ts.genotype_matrix().T
site_positions = [x.position for x in ts.sites()]
for i, X in enumerate(sample_sets):
S = 0
denom = np.float64(len(X) * (len(X)-1) * (len(X)-2))
site_in_window = False
for k in range(ts.num_sites):
if (site_positions[k] >= begin) and (site_positions[k] < end):
site_in_window = True
for x in X:
x_index = np.where(samples == x)[0][0]
for y in set(X) - {x}:
y_index = np.where(samples == y)[0][0]
for z in set(X) - {x, y}:
z_index = np.where(samples == z)[0][0]
condition = (
haps[x_index, k] != haps[y_index, k] and
haps[x_index, k] != haps[z_index, k])
if condition:
# x|yz
S += 1
if site_in_window:
with suppress_division_by_zero_warning():
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def node_Y1(ts, sample_sets, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
K = len(sample_sets)
out = np.zeros((len(windows) - 1, ts.num_nodes, K))
for k in range(K):
X = sample_sets[k]
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
tX = len(X)
denom = np.float64(tX * (tX - 1) * (tX - 2))
S = np.zeros(ts.num_nodes)
for tr in ts.trees(tracked_samples=X):
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
SS = np.zeros(ts.num_nodes)
for u in tr.nodes():
# count number of paths above a but not b,c
n = tr.num_tracked_samples(u)
SS[u] += (n * (tX - n) * (tX - n - 1)
+ (tX - n) * n * (n - 1))
S += SS*(min(end, tr.interval[1]) - max(begin, tr.interval[0]))
with suppress_division_by_zero_warning():
out[j, :, k] = S / denom
if span_normalise:
out[j, :, k] /= (end - begin)
return out
def Y1(ts, sample_sets, windows=None, mode="site", span_normalise=True):
windows = ts.parse_windows(windows)
method_map = {
"site": site_Y1,
"node": node_Y1,
"branch": branch_Y1}
return method_map[mode](ts, sample_sets, windows=windows,
span_normalise=span_normalise)
class TestY1(StatsTestCase, SampleSetStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def verify_sample_sets(self, ts, sample_sets, windows):
n = np.array([len(x) for x in sample_sets])
denom = n * (n - 1) * (n - 2)
def f(x):
return x * (n - x) * (n - x - 1) / denom
self.verify_definition(ts, sample_sets, windows, f, ts.Y1, Y1)
class TestBranchY1(TestY1, TopologyExamplesMixin):
mode = "branch"
class TestNodeY1(TestY1, TopologyExamplesMixin):
mode = "node"
class TestSiteY1(TestY1, MutatedTopologyExamplesMixin):
mode = "site"
############################################
# Divergence
############################################
def site_divergence(ts, sample_sets, indexes, windows=None, span_normalise=True):
out = np.zeros((len(windows) - 1, len(indexes)))
samples = ts.samples()
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
haps = ts.genotype_matrix().T
site_positions = [x.position for x in ts.sites()]
for i, (ix, iy) in enumerate(indexes):
X = sample_sets[ix]
Y = sample_sets[iy]
denom = np.float64(len(X) * len(Y))
site_in_window = False
S = 0
for k in range(ts.num_sites):
if (site_positions[k] >= begin) and (site_positions[k] < end):
site_in_window = True
for x in X:
x_index = np.where(samples == x)[0][0]
for y in Y:
y_index = np.where(samples == y)[0][0]
if haps[x_index][k] != haps[y_index][k]:
# x|y
S += 1
if site_in_window:
with np.errstate(invalid='ignore', divide='ignore'):
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def branch_divergence(ts, sample_sets, indexes, windows=None, span_normalise=True):
out = np.zeros((len(windows) - 1, len(indexes)))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i, (ix, iy) in enumerate(indexes):
X = sample_sets[ix]
Y = sample_sets[iy]
denom = np.float64(len(X) * len(Y))
has_trees = False
S = 0
for tr in ts.trees():
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
if tr.total_branch_length > 0:
has_trees = True
SS = 0
for x in X:
for y in Y:
SS += path_length(tr, x, y)
S += SS*(min(end, tr.interval[1]) - max(begin, tr.interval[0]))
if has_trees:
with suppress_division_by_zero_warning():
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def node_divergence(ts, sample_sets, indexes, windows=None, span_normalise=True):
out = np.zeros((len(windows) - 1, ts.num_nodes, len(indexes)))
for i, (ix, iy) in enumerate(indexes):
X = sample_sets[ix]
Y = sample_sets[iy]
tX = len(X)
tY = len(Y)
denom = np.float64(len(X) * len(Y))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
S = np.zeros(ts.num_nodes)
for t1, t2 in zip(ts.trees(tracked_samples=X),
ts.trees(tracked_samples=Y)):
if t1.interval[1] <= begin:
continue
if t1.interval[0] >= end:
break
SS = np.zeros(ts.num_nodes)
for u in t1.nodes():
# count number of pairwise paths going through u
nX = t1.num_tracked_samples(u)
nY = t2.num_tracked_samples(u)
SS[u] += (nX * (tY - nY) + (tX - nX) * nY)
S += SS*(min(end, t1.interval[1]) - max(begin, t1.interval[0]))
with suppress_division_by_zero_warning():
out[j, :, i] = S / denom
if span_normalise:
out[j, :, i] /= (end - begin)
return out
def divergence(ts, sample_sets, indexes=None, windows=None, mode="site",
span_normalise=True):
"""
Computes average pairwise divergence between two random choices from x
over the window specified.
"""
windows = ts.parse_windows(windows)
if indexes is None:
indexes = [(0, 1)]
method_map = {
"site": site_divergence,
"node": node_divergence,
"branch": branch_divergence}
return method_map[mode](ts, sample_sets, indexes=indexes, windows=windows,
span_normalise=span_normalise)
class TestDivergence(StatsTestCase, TwoWaySampleSetStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def verify_sample_sets_indexes(self, ts, sample_sets, indexes, windows):
# print("verify_indexes", ts, sample_sets, indexes, windows)
n = np.array([len(x) for x in sample_sets])
denom = np.array([n[i] * (n[j] - (i == j)) for i, j in indexes])
def f(x):
numer = np.array([(x[i] * (n[j] - x[j])) for i, j in indexes])
return numer / denom
self.verify_definition(
ts, sample_sets, indexes, windows, f, ts.divergence, divergence)
class TestBranchDivergence(TestDivergence, TopologyExamplesMixin):
mode = "branch"
class TestNodeDivergence(TestDivergence, TopologyExamplesMixin):
mode = "node"
class TestSiteDivergence(TestDivergence, MutatedTopologyExamplesMixin):
mode = "site"
############################################
# Fst
############################################
def single_site_Fst(ts, sample_sets, indexes):
"""
Compute single-site Fst, which between two groups with frequencies p and q is
1 - 2 * (p (1-p) + q(1-q)) / ( p(1-p) + q(1-q) + p(1-q) + q(1-p) )
or in the multiallelic case, replacing p(1-p) with the sum over alleles of p(1-p),
and adjusted for sampling without replacement.
"""
# TODO: what to do in this case?
if ts.num_sites == 0:
out = np.array([np.repeat(np.nan, len(indexes))])
return out
out = np.zeros((ts.num_sites, len(indexes)))
samples = ts.samples()
for j, v in enumerate(ts.variants()):
for i, (ix, iy) in enumerate(indexes):
g = v.genotypes
X = sample_sets[ix]
Y = sample_sets[iy]
gX = [a for k, a in zip(samples, g) if k in X]
gY = [a for k, a in zip(samples, g) if k in Y]
nX = len(X)
nY = len(Y)
dX = dY = dXY = 0
for a in set(g):
fX = np.sum(gX == a)
fY = np.sum(gY == a)
with suppress_division_by_zero_warning():
dX += fX * (nX - fX) / (nX * (nX - 1))
dY += fY * (nY - fY) / (nY * (nY - 1))
dXY += (fX * (nY - fY) + (nX - fX) * fY) / (2 * nX * nY)
with suppress_division_by_zero_warning():
out[j][i] = 1 - 2 * (dX + dY) / (dX + dY + 2 * dXY)
return out
class TestFst(StatsTestCase, TwoWaySampleSetStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def verify(self, ts):
# only check per-site
for sample_sets in example_sample_sets(ts, min_size=2):
for indexes in example_sample_set_index_pairs(sample_sets):
self.verify_persite_Fst(ts, sample_sets, indexes)
def verify_persite_Fst(self, ts, sample_sets, indexes):
sigma1 = ts.Fst(sample_sets, indexes=indexes, windows="sites",
mode=self.mode, span_normalise=False)
sigma2 = single_site_Fst(ts, sample_sets, indexes)
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
class TestSiteFst(TestFst, MutatedTopologyExamplesMixin):
mode = "site"
############################################
# Y2
############################################
def branch_Y2(ts, sample_sets, indexes, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(indexes)))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i, (ix, iy) in enumerate(indexes):
X = sample_sets[ix]
Y = sample_sets[iy]
denom = np.float64(len(X) * len(Y) * (len(Y)-1))
has_trees = False
S = 0
for tr in ts.trees():
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
if tr.total_branch_length > 0:
has_trees = True
this_length = min(end, tr.interval[1]) - max(begin, tr.interval[0])
for x in X:
for y in Y:
for z in set(Y) - {y}:
xy_mrca = tr.mrca(x, y)
xz_mrca = tr.mrca(x, z)
yz_mrca = tr.mrca(y, z)
if xy_mrca == xz_mrca:
# /\
# / /\
# x y z
S += path_length(tr, x, yz_mrca) * this_length
elif xy_mrca == yz_mrca:
# /\
# / /\
# y x z
S += path_length(tr, x, xz_mrca) * this_length
elif xz_mrca == yz_mrca:
# /\
# / /\
# z x y
S += path_length(tr, x, xy_mrca) * this_length
if has_trees:
with suppress_division_by_zero_warning():
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def site_Y2(ts, sample_sets, indexes, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
samples = ts.samples()
out = np.zeros((len(windows) - 1, len(indexes)))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
haps = ts.genotype_matrix().T
site_positions = [x.position for x in ts.sites()]
for i, (ix, iy) in enumerate(indexes):
X = sample_sets[ix]
Y = sample_sets[iy]
denom = np.float64(len(X) * len(Y) * (len(Y)-1))
S = 0
site_in_window = False
for k in range(ts.num_sites):
if (site_positions[k] >= begin) and (site_positions[k] < end):
site_in_window = True
for x in X:
x_index = np.where(samples == x)[0][0]
for y in Y:
y_index = np.where(samples == y)[0][0]
for z in set(Y) - {y}:
z_index = np.where(samples == z)[0][0]
condition = (
haps[x_index, k] != haps[y_index, k] and
haps[x_index, k] != haps[z_index, k])
if condition:
# x|yz
S += 1
if site_in_window:
with suppress_division_by_zero_warning():
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def node_Y2(ts, sample_sets, indexes, windows=None, span_normalise=True):
out = np.zeros((len(windows) - 1, ts.num_nodes, len(indexes)))
for i, (ix, iy) in enumerate(indexes):
X = sample_sets[ix]
Y = sample_sets[iy]
tX = len(X)
tY = len(Y)
denom = np.float64(tX * tY * (tY - 1))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
S = np.zeros(ts.num_nodes)
for t1, t2 in zip(ts.trees(tracked_samples=X),
ts.trees(tracked_samples=Y)):
if t1.interval[1] <= begin:
continue
if t1.interval[0] >= end:
break
SS = np.zeros(ts.num_nodes)
for u in t1.nodes():
# count number of pairwise paths going through u
nX = t1.num_tracked_samples(u)
nY = t2.num_tracked_samples(u)
SS[u] += (nX * (tY - nY) * (tY - nY - 1)
+ (tX - nX) * nY * (nY - 1))
S += SS*(min(end, t1.interval[1]) - max(begin, t1.interval[0]))
with suppress_division_by_zero_warning():
out[j, :, i] = S / denom
if span_normalise:
out[j, :, i] /= (end - begin)
return out
def Y2(ts, sample_sets, indexes=None, windows=None, mode="site", span_normalise=True):
windows = ts.parse_windows(windows)
windows = ts.parse_windows(windows)
if indexes is None:
indexes = [(0, 1)]
method_map = {
"site": site_Y2,
"node": node_Y2,
"branch": branch_Y2}
return method_map[mode](ts, sample_sets, indexes=indexes, windows=windows,
span_normalise=span_normalise)
class TestY2(StatsTestCase, TwoWaySampleSetStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def verify_sample_sets_indexes(self, ts, sample_sets, indexes, windows):
n = np.array([len(x) for x in sample_sets])
denom = np.array([n[i] * n[j] * (n[j] - 1) for i, j in indexes])
def f(x):
numer = np.array([
(x[i] * (n[j] - x[j]) * (n[j] - x[j] - 1)) for i, j in indexes])
return numer / denom
self.verify_definition(ts, sample_sets, indexes, windows, f, ts.Y2, Y2)
class TestBranchY2(TestY2, TopologyExamplesMixin):
mode = "branch"
class TestNodeY2(TestY2, TopologyExamplesMixin):
mode = "node"
class TestSiteY2(TestY2, MutatedTopologyExamplesMixin):
mode = "site"
############################################
# Y3
############################################
def branch_Y3(ts, sample_sets, indexes, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(indexes)))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i, (ix, iy, iz) in enumerate(indexes):
S = 0
X = sample_sets[ix]
Y = sample_sets[iy]
Z = sample_sets[iz]
denom = np.float64(len(X) * len(Y) * len(Z))
has_trees = False
for tr in ts.trees():
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
if tr.total_branch_length > 0:
has_trees = True
this_length = min(end, tr.interval[1]) - max(begin, tr.interval[0])
for x in X:
for y in Y:
for z in Z:
xy_mrca = tr.mrca(x, y)
xz_mrca = tr.mrca(x, z)
yz_mrca = tr.mrca(y, z)
if xy_mrca == xz_mrca:
# /\
# / /\
# x y z
S += path_length(tr, x, yz_mrca) * this_length
elif xy_mrca == yz_mrca:
# /\
# / /\
# y x z
S += path_length(tr, x, xz_mrca) * this_length
elif xz_mrca == yz_mrca:
# /\
# / /\
# z x y
S += path_length(tr, x, xy_mrca) * this_length
if has_trees:
with suppress_division_by_zero_warning():
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def site_Y3(ts, sample_sets, indexes, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(indexes)))
haps = ts.genotype_matrix().T
site_positions = ts.tables.sites.position
samples = ts.samples()
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i, (ix, iy, iz) in enumerate(indexes):
X = sample_sets[ix]
Y = sample_sets[iy]
Z = sample_sets[iz]
denom = np.float64(len(X) * len(Y) * len(Z))
S = 0
site_in_window = False
for k in range(ts.num_sites):
if (site_positions[k] >= begin) and (site_positions[k] < end):
site_in_window = True
for x in X:
x_index = np.where(samples == x)[0][0]
for y in Y:
y_index = np.where(samples == y)[0][0]
for z in Z:
z_index = np.where(samples == z)[0][0]
if ((haps[x_index][k] != haps[y_index][k])
and (haps[x_index][k] != haps[z_index][k])):
# x|yz
with suppress_division_by_zero_warning():
S += 1
if site_in_window:
with suppress_division_by_zero_warning():
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def node_Y3(ts, sample_sets, indexes, windows=None, span_normalise=True):
out = np.zeros((len(windows) - 1, ts.num_nodes, len(indexes)))
for i, (ix, iy, iz) in enumerate(indexes):
X = sample_sets[ix]
Y = sample_sets[iy]
Z = sample_sets[iz]
tX = len(X)
tY = len(Y)
tZ = len(Z)
denom = np.float64(tX * tY * tZ)
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
S = np.zeros(ts.num_nodes)
for t1, t2, t3 in zip(ts.trees(tracked_samples=X),
ts.trees(tracked_samples=Y),
ts.trees(tracked_samples=Z)):
if t1.interval[1] <= begin:
continue
if t1.interval[0] >= end:
break
SS = np.zeros(ts.num_nodes)
for u in t1.nodes():
# count number of pairwise paths going through u
nX = t1.num_tracked_samples(u)
nY = t2.num_tracked_samples(u)
nZ = t3.num_tracked_samples(u)
SS[u] += (nX * (tY - nY) * (tZ - nZ)
+ (tX - nX) * nY * nZ)
S += SS*(min(end, t1.interval[1]) - max(begin, t1.interval[0]))
with suppress_division_by_zero_warning():
out[j, :, i] = S / denom
if span_normalise:
out[j, :, i] /= (end - begin)
return out
def Y3(ts, sample_sets, indexes=None, windows=None, mode="site", span_normalise=True):
windows = ts.parse_windows(windows)
if indexes is None:
indexes = [(0, 1, 2)]
method_map = {
"site": site_Y3,
"node": node_Y3,
"branch": branch_Y3}
return method_map[mode](ts, sample_sets, indexes=indexes, windows=windows,
span_normalise=span_normalise)
class TestY3(StatsTestCase, ThreeWaySampleSetStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def verify_sample_sets_indexes(self, ts, sample_sets, indexes, windows):
n = np.array([len(x) for x in sample_sets])
denom = np.array([n[i] * n[j] * n[k] for i, j, k in indexes])
def f(x):
numer = np.array(
[x[i] * (n[j] - x[j]) * (n[k] - x[k]) for i, j, k in indexes])
return numer / denom
self.verify_definition(ts, sample_sets, indexes, windows, f, ts.Y3, Y3)
class TestBranchY3(TestY3, TopologyExamplesMixin):
mode = "branch"
class TestNodeY3(TestY3, TopologyExamplesMixin):
mode = "node"
class TestSiteY3(TestY3, MutatedTopologyExamplesMixin):
mode = "site"
############################################
# f2
############################################
def branch_f2(ts, sample_sets, indexes, windows=None, span_normalise=True):
# this is f4(A,B;A,B) but drawing distinct samples from A and B
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(indexes)))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i, (ia, ib) in enumerate(indexes):
A = sample_sets[ia]
B = sample_sets[ib]
denom = np.float64(len(A) * (len(A) - 1) * len(B) * (len(B) - 1))
has_trees = False
S = 0
for tr in ts.trees():
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
if tr.total_branch_length > 0:
has_trees = True
this_length = min(end, tr.interval[1]) - max(begin, tr.interval[0])
SS = 0
for a in A:
for b in B:
for c in set(A) - {a}:
for d in set(B) - {b}:
with suppress_division_by_zero_warning():
SS += path_length(tr, tr.mrca(a, c), tr.mrca(b, d))
SS -= path_length(tr, tr.mrca(a, d), tr.mrca(b, c))
S += SS * this_length
if has_trees:
with suppress_division_by_zero_warning():
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def site_f2(ts, sample_sets, indexes, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(indexes)))
samples = ts.samples()
haps = ts.genotype_matrix().T
site_positions = ts.tables.sites.position
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i, (iA, iB) in enumerate(indexes):
A = sample_sets[iA]
B = sample_sets[iB]
denom = np.float64(len(A) * (len(A) - 1) * len(B) * (len(B) - 1))
S = 0
site_in_window = False
for k in range(ts.num_sites):
if (site_positions[k] >= begin) and (site_positions[k] < end):
site_in_window = True
for a in A:
a_index = np.where(samples == a)[0][0]
for b in B:
b_index = np.where(samples == b)[0][0]
for c in set(A) - {a}:
c_index = np.where(samples == c)[0][0]
for d in set(B) - {b}:
d_index = np.where(samples == d)[0][0]
if ((haps[a_index][k] == haps[c_index][k])
and (haps[a_index][k] != haps[d_index][k])
and (haps[a_index][k] != haps[b_index][k])):
# ac|bd
S += 1
elif ((haps[a_index][k] == haps[d_index][k])
and (haps[a_index][k] != haps[c_index][k])
and (haps[a_index][k] != haps[b_index][k])):
# ad|bc
S -= 1
if site_in_window:
with np.errstate(invalid='ignore', divide='ignore'):
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def node_f2(ts, sample_sets, indexes, windows=None, span_normalise=True):
out = np.zeros((len(windows) - 1, ts.num_nodes, len(indexes)))
for i, (ia, ib) in enumerate(indexes):
A = sample_sets[ia]
B = sample_sets[ib]
tA = len(A)
tB = len(B)
denom = np.float64(tA * (tA - 1) * tB * (tB - 1))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
S = np.zeros(ts.num_nodes)
for t1, t2 in zip(ts.trees(tracked_samples=A),
ts.trees(tracked_samples=B)):
if t1.interval[1] <= begin:
continue
if t1.interval[0] >= end:
break
SS = np.zeros(ts.num_nodes)
for u in t1.nodes():
# count number of pairwise paths going through u
nA = t1.num_tracked_samples(u)
nB = t2.num_tracked_samples(u)
# xy|uv - xv|uy with x,y in A, u, v in B
SS[u] += (nA * (nA - 1) * (tB - nB) * (tB - nB - 1)
+ (tA - nA) * (tA - nA - 1) * nB * (nB - 1))
SS[u] -= 2 * nA * nB * (tA - nA) * (tB - nB)
S += SS*(min(end, t1.interval[1]) - max(begin, t1.interval[0]))
with suppress_division_by_zero_warning():
out[j, :, i] = S / denom
if span_normalise:
out[j, :, i] /= (end - begin)
return out
def f2(ts, sample_sets, indexes=None, windows=None, mode="site", span_normalise=True):
"""
Patterson's f2 statistic definitions.
"""
windows = ts.parse_windows(windows)
if indexes is None:
indexes = [(0, 1)]
method_map = {
"site": site_f2,
"node": node_f2,
"branch": branch_f2}
return method_map[mode](ts, sample_sets, indexes=indexes, windows=windows,
span_normalise=span_normalise)
class Testf2(StatsTestCase, TwoWaySampleSetStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def verify_sample_sets_indexes(self, ts, sample_sets, indexes, windows):
n = np.array([len(x) for x in sample_sets])
denom = np.array([n[i] * (n[i] - 1) * n[j] * (n[j] - 1) for i, j in indexes])
def f(x):
numer = np.array([
x[i] * (x[i] - 1) * (n[j] - x[j]) * (n[j] - x[j] - 1)
- x[i] * (n[i] - x[i]) * (n[j] - x[j]) * x[j]
for i, j in indexes])
return numer / denom
self.verify_definition(ts, sample_sets, indexes, windows, f, ts.f2, f2)
class TestBranchf2(Testf2, TopologyExamplesMixin):
mode = "branch"
class TestNodef2(Testf2, TopologyExamplesMixin):
mode = "node"
class TestSitef2(Testf2, MutatedTopologyExamplesMixin):
mode = "site"
############################################
# f3
############################################
def branch_f3(ts, sample_sets, indexes, windows=None, span_normalise=True):
# this is f4(A,B;A,C) but drawing distinct samples from A
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(indexes)))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i, (ia, ib, ic) in enumerate(indexes):
A = sample_sets[ia]
B = sample_sets[ib]
C = sample_sets[ic]
denom = np.float64(len(A) * (len(A) - 1) * len(B) * len(C))
has_trees = False
S = 0
for tr in ts.trees():
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
if tr.total_branch_length > 0:
has_trees = True
this_length = min(end, tr.interval[1]) - max(begin, tr.interval[0])
SS = 0
for a in A:
for b in B:
for c in set(A) - {a}:
for d in C:
SS += path_length(tr, tr.mrca(a, c), tr.mrca(b, d))
SS -= path_length(tr, tr.mrca(a, d), tr.mrca(b, c))
S += SS * this_length
if has_trees:
with suppress_division_by_zero_warning():
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def site_f3(ts, sample_sets, indexes, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(indexes)))
samples = ts.samples()
haps = ts.genotype_matrix().T
site_positions = ts.tables.sites.position
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i, (iA, iB, iC) in enumerate(indexes):
A = sample_sets[iA]
B = sample_sets[iB]
C = sample_sets[iC]
denom = np.float64(len(A) * (len(A) - 1) * len(B) * len(C))
S = 0
site_in_window = False
for k in range(ts.num_sites):
if (site_positions[k] >= begin) and (site_positions[k] < end):
site_in_window = True
for a in A:
a_index = np.where(samples == a)[0][0]
for b in B:
b_index = np.where(samples == b)[0][0]
for c in set(A) - {a}:
c_index = np.where(samples == c)[0][0]
for d in C:
d_index = np.where(samples == d)[0][0]
if ((haps[a_index][k] == haps[c_index][k])
and (haps[a_index][k] != haps[d_index][k])
and (haps[a_index][k] != haps[b_index][k])):
# ac|bd
S += 1
elif ((haps[a_index][k] == haps[d_index][k])
and (haps[a_index][k] != haps[c_index][k])
and (haps[a_index][k] != haps[b_index][k])):
# ad|bc
S -= 1
if site_in_window:
with np.errstate(invalid='ignore', divide='ignore'):
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def node_f3(ts, sample_sets, indexes, windows=None, span_normalise=True):
out = np.zeros((len(windows) - 1, ts.num_nodes, len(indexes)))
for i, (iA, iB, iC) in enumerate(indexes):
A = sample_sets[iA]
B = sample_sets[iB]
C = sample_sets[iC]
tA = len(A)
tB = len(B)
tC = len(C)
denom = np.float64(tA * (tA - 1) * tB * tC)
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
S = np.zeros(ts.num_nodes)
for t1, t2, t3 in zip(ts.trees(tracked_samples=A),
ts.trees(tracked_samples=B),
ts.trees(tracked_samples=C)):
if t1.interval[1] <= begin:
continue
if t1.interval[0] >= end:
break
SS = np.zeros(ts.num_nodes)
for u in t1.nodes():
# count number of pairwise paths going through u
nA = t1.num_tracked_samples(u)
nB = t2.num_tracked_samples(u)
nC = t3.num_tracked_samples(u)
# xy|uv - xv|uy with x,y in A, u in B and v in C
SS[u] += (nA * (nA - 1) * (tB - nB) * (tC - nC)
+ (tA - nA) * (tA - nA - 1) * nB * nC)
SS[u] -= (nA * nC * (tA - nA) * (tB - nB)
+ (tA - nA) * (tC - nC) * nA * nB)
S += SS*(min(end, t1.interval[1]) - max(begin, t1.interval[0]))
with suppress_division_by_zero_warning():
out[j, :, i] = S / denom
if span_normalise:
out[j, :, i] /= (end - begin)
return out
def f3(ts, sample_sets, indexes=None, windows=None, mode="site", span_normalise=True):
"""
Patterson's f3 statistic definitions.
"""
windows = ts.parse_windows(windows)
if indexes is None:
indexes = [(0, 1, 2)]
method_map = {
"site": site_f3,
"node": node_f3,
"branch": branch_f3}
return method_map[mode](ts, sample_sets, indexes=indexes, windows=windows,
span_normalise=span_normalise)
class Testf3(StatsTestCase, ThreeWaySampleSetStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def verify_sample_sets_indexes(self, ts, sample_sets, indexes, windows):
n = np.array([len(x) for x in sample_sets])
denom = np.array([n[i] * (n[i] - 1) * n[j] * n[k] for i, j, k in indexes])
def f(x):
numer = np.array([
x[i] * (x[i] - 1) * (n[j] - x[j]) * (n[k] - x[k])
- x[i] * (n[i] - x[i]) * (n[j] - x[j]) * x[k] for i, j, k in indexes])
return numer / denom
self.verify_definition(ts, sample_sets, indexes, windows, f, ts.f3, f3)
class TestBranchf3(Testf3, TopologyExamplesMixin):
mode = "branch"
class TestNodef3(Testf3, TopologyExamplesMixin):
mode = "node"
class TestSitef3(Testf3, MutatedTopologyExamplesMixin):
mode = "site"
############################################
# f4
############################################
def branch_f4(ts, sample_sets, indexes, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, len(indexes)))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i, (iA, iB, iC, iD) in enumerate(indexes):
A = sample_sets[iA]
B = sample_sets[iB]
C = sample_sets[iC]
D = sample_sets[iD]
denom = np.float64(len(A) * len(B) * len(C) * len(D))
has_trees = False
S = 0
for tr in ts.trees():
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
if tr.total_branch_length > 0:
has_trees = True
this_length = min(end, tr.interval[1]) - max(begin, tr.interval[0])
SS = 0
for a in A:
for b in B:
for c in C:
for d in D:
with suppress_division_by_zero_warning():
SS += path_length(tr, tr.mrca(a, c), tr.mrca(b, d))
SS -= path_length(tr, tr.mrca(a, d), tr.mrca(b, c))
S += SS * this_length
if has_trees:
with suppress_division_by_zero_warning():
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def site_f4(ts, sample_sets, indexes, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
samples = ts.samples()
haps = ts.genotype_matrix().T
site_positions = ts.tables.sites.position
out = np.zeros((len(windows) - 1, len(indexes)))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i, (iA, iB, iC, iD) in enumerate(indexes):
A = sample_sets[iA]
B = sample_sets[iB]
C = sample_sets[iC]
D = sample_sets[iD]
denom = np.float64(len(A) * len(B) * len(C) * len(D))
S = 0
site_in_window = False
for k in range(ts.num_sites):
if (site_positions[k] >= begin) and (site_positions[k] < end):
site_in_window = True
for a in A:
a_index = np.where(samples == a)[0][0]
for b in B:
b_index = np.where(samples == b)[0][0]
for c in C:
c_index = np.where(samples == c)[0][0]
for d in D:
d_index = np.where(samples == d)[0][0]
if ((haps[a_index][k] == haps[c_index][k])
and (haps[a_index][k] != haps[d_index][k])
and (haps[a_index][k] != haps[b_index][k])):
# ac|bd
S += 1
elif ((haps[a_index][k] == haps[d_index][k])
and (haps[a_index][k] != haps[c_index][k])
and (haps[a_index][k] != haps[b_index][k])):
# ad|bc
S -= 1
if site_in_window:
with np.errstate(invalid='ignore', divide='ignore'):
out[j][i] = S / denom
if span_normalise:
out[j][i] /= (end - begin)
return out
def node_f4(ts, sample_sets, indexes, windows=None, span_normalise=True):
windows = ts.parse_windows(windows)
out = np.zeros((len(windows) - 1, ts.num_nodes, len(indexes)))
for i, (iA, iB, iC, iD) in enumerate(indexes):
A = sample_sets[iA]
B = sample_sets[iB]
C = sample_sets[iC]
D = sample_sets[iD]
tA = len(A)
tB = len(B)
tC = len(C)
tD = len(D)
denom = np.float64(tA * tB * tC * tD)
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
S = np.zeros(ts.num_nodes)
for t1, t2, t3, t4 in zip(ts.trees(tracked_samples=A),
ts.trees(tracked_samples=B),
ts.trees(tracked_samples=C),
ts.trees(tracked_samples=D)):
if t1.interval[1] <= begin:
continue
if t1.interval[0] >= end:
break
SS = np.zeros(ts.num_nodes)
for u in t1.nodes():
# count number of pairwise paths going through u
nA = t1.num_tracked_samples(u)
nB = t2.num_tracked_samples(u)
nC = t3.num_tracked_samples(u)
nD = t4.num_tracked_samples(u)
# ac|bd - ad|bc
SS[u] += (nA * nC * (tB - nB) * (tD - nD)
+ (tA - nA) * (tC - nC) * nB * nD)
SS[u] -= (nA * nD * (tB - nB) * (tC - nC)
+ (tA - nA) * (tD - nD) * nB * nC)
S += SS*(min(end, t1.interval[1]) - max(begin, t1.interval[0]))
with suppress_division_by_zero_warning():
out[j, :, i] = S / denom
if span_normalise:
out[j, :, i] /= (end - begin)
return out
def f4(ts, sample_sets, indexes=None, windows=None, mode="site", span_normalise=True):
"""
Patterson's f4 statistic definitions.
"""
if indexes is None:
indexes = [(0, 1, 2, 3)]
method_map = {
"site": site_f4,
"node": node_f4,
"branch": branch_f4}
return method_map[mode](ts, sample_sets, indexes=indexes, windows=windows,
span_normalise=span_normalise)
class Testf4(StatsTestCase, FourWaySampleSetStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def verify_sample_sets_indexes(self, ts, sample_sets, indexes, windows):
n = np.array([len(x) for x in sample_sets])
denom = np.array([n[i] * n[j] * n[k] * n[l] for i, j, k, l in indexes])
def f(x):
numer = np.array([
x[i] * x[k] * (n[j] - x[j]) * (n[l] - x[l])
- x[i] * x[l] * (n[j] - x[j]) * (n[k] - x[k]) for i, j, k, l in indexes])
return numer / denom
self.verify_definition(ts, sample_sets, indexes, windows, f, ts.f4, f4)
class TestBranchf4(Testf4, TopologyExamplesMixin):
mode = "branch"
class TestNodef4(Testf4, TopologyExamplesMixin):
mode = "node"
class TestSitef4(Testf4, MutatedTopologyExamplesMixin):
mode = "site"
############################################
# Site frequency spectrum
############################################
def naive_branch_sample_frequency_spectrum(ts, sample_sets, windows=None):
# Draft of the 'site frequency spectrum' definition for different
# sample sets. Take the middle dimension as the max of sizes of the
# sample sets, and the last dimension as the different sample sets. This
# makes it easy to drop the last dimension in the default case of all
# samples. (But, we could definitely do it the other way around, with
# the middle dimension being the sample set index.
#
# The other difference with older versions is that we're outputting
# sfs[j] as the total branch length over j members of the set, including
# sfs[0] for zero members. Other versions were using sfs[j - 1] for
# total branch_length over j, and not tracking the branch length over
# 0. The current approach seems more natura to me.
windows = ts.parse_windows(windows)
n_out = 1 + max(len(sample_set) for sample_set in sample_sets)
out = np.zeros((len(windows) - 1, n_out, len(sample_sets)))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for set_index, sample_set in enumerate(sample_sets):
S = np.zeros((n_out))
for t in ts.trees(tracked_samples=sample_set, sample_counts=True):
tr_len = min(end, t.interval[1]) - max(begin, t.interval[0])
if tr_len > 0:
for node in t.nodes():
x = t.num_tracked_samples(node)
S[x] += t.branch_length(node) * tr_len
out[j, :, set_index] = S / (end - begin)
return out
def naive_sample_frequency_spectrum(ts, sample_sets, windows=None, mode="site"):
"""
Naive definition of the generalised site frequency spectrum.
"""
method_map = {
# "site": naive_site_sample_frequency_spectrum,
"branch": naive_branch_sample_frequency_spectrum}
return method_map[mode](ts, sample_sets, windows=windows)
def branch_sample_frequency_spectrum(ts, sample_sets, windows):
"""
Efficient implementation of the algorithm used as the basis for the
underlying C version.
"""
num_sample_sets = len(sample_sets)
n_out = 1 + max(len(sample_set) for sample_set in sample_sets)
windows = ts.parse_windows(windows)
num_windows = windows.shape[0] - 1
result = np.zeros((num_windows, n_out, num_sample_sets))
state = np.zeros((ts.num_nodes, num_sample_sets), dtype=np.uint32)
for j in range(num_sample_sets):
state[sample_sets[j], j] = 1
def area_weighted_summary(u):
v = parent[u]
branch_length = 0
s = np.zeros((n_out, num_sample_sets))
if v != -1:
branch_length = time[v] - time[u]
if branch_length > 0:
count = state[u]
for j in range(num_sample_sets):
s[count[j], j] += branch_length
return s
tree_index = 0
window_index = 0
time = ts.tables.nodes.time
parent = np.zeros(ts.num_nodes, dtype=np.int32) - 1
running_sum = np.zeros((n_out, num_sample_sets))
for (t_left, t_right), edges_out, edges_in in ts.edge_diffs():
for edge in edges_out:
u = edge.child
running_sum -= area_weighted_summary(u)
u = edge.parent
while u != -1:
running_sum -= area_weighted_summary(u)
state[u] -= state[edge.child]
running_sum += area_weighted_summary(u)
u = parent[u]
parent[edge.child] = -1
for edge in edges_in:
parent[edge.child] = edge.parent
u = edge.child
running_sum += area_weighted_summary(u)
u = edge.parent
while u != -1:
running_sum -= area_weighted_summary(u)
state[u] += state[edge.child]
running_sum += area_weighted_summary(u)
u = parent[u]
# Update the windows
assert window_index < num_windows
while windows[window_index] < t_right:
w_left = windows[window_index]
w_right = windows[window_index + 1]
left = max(t_left, w_left)
right = min(t_right, w_right)
weight = right - left
assert weight > 0
result[window_index] += running_sum * weight
if w_right <= t_right:
window_index += 1
else:
# This interval crosses a tree boundary, so we update it again in the
# for the next tree
break
tree_index += 1
# print("window_index:", window_index, windows.shape)
assert window_index == windows.shape[0] - 1
for j in range(num_windows):
result[j] /= windows[j + 1] - windows[j]
return result
def sample_frequency_spectrum(ts, sample_sets, windows=None, mode="site"):
"""
Generalised site frequency spectrum.
"""
method_map = {
# "site": site_sample_frequency_spectrum,
"branch": branch_sample_frequency_spectrum}
return method_map[mode](ts, sample_sets, windows=windows)
class TestSampleFrequencySpectrum(StatsTestCase, SampleSetStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def verify_sample_sets(self, ts, sample_sets, windows):
# print("Verify", sample_sets, windows)
sfs1 = naive_sample_frequency_spectrum(ts, sample_sets, windows, mode=self.mode)
sfs2 = sample_frequency_spectrum(ts, sample_sets, windows, mode=self.mode)
windows = ts.parse_windows(windows)
self.assertEqual(sfs1.shape[0], len(windows) - 1)
self.assertEqual(sfs1.shape, sfs2.shape)
# print(sfs1)
# print(sfs2)
self.assertArrayAlmostEqual(sfs1, sfs2)
# print(sfs2.shape)
class TestBranchSampleFrequencySpectrum(
TestSampleFrequencySpectrum, TopologyExamplesMixin):
mode = "branch"
def test_simple_example(self):
ts = msprime.simulate(6, random_seed=1)
self.verify_sample_sets(ts, [[0, 1, 2], [3, 4, 5]], [0, 1])
@unittest.skip("Mismatch when multiple roots")
def test_wright_fisher_simplified_multiple_roots(self):
pass
@unittest.skip("Mismatch when multiple roots")
def test_wright_fisher_unsimplified_multiple_roots(self):
pass
@unittest.skip("Not working yet")
class TestSiteSampleFrequencySpectrum(
TestSampleFrequencySpectrum, MutatedTopologyExamplesMixin):
mode = "site"
############################################
# End of specific stats tests.
############################################
class TestWindowedTreeStat(StatsTestCase):
"""
Tests that the treewise windowing function defined here has the correct
behaviour.
"""
# TODO add more tests here covering the various windowing possibilities.
def get_tree_sequence(self):
ts = msprime.simulate(10, recombination_rate=2, random_seed=1)
self.assertGreater(ts.num_trees, 3)
return ts
def test_all_trees(self):
ts = self.get_tree_sequence()
A1 = np.ones((ts.num_trees, 1))
windows = np.array(list(ts.breakpoints()))
A2 = windowed_tree_stat(ts, A1, windows)
# print("breakpoints = ", windows)
# print(A2)
self.assertEqual(A1.shape, A2.shape)
# JK: I don't understand what we're computing here, this normalisation
# seems pretty weird.
# for tree in ts.trees():
# self.assertAlmostEqual(A2[tree.index, 0], tree.span / ts.sequence_length)
def test_single_interval(self):
ts = self.get_tree_sequence()
A1 = np.ones((ts.num_trees, 1))
windows = np.array([0, ts.sequence_length])
A2 = windowed_tree_stat(ts, A1, windows)
self.assertEqual(A2.shape, (1, 1))
# TODO: Test output
class TestSampleSets(StatsTestCase):
"""
Tests that passing sample sets in various ways gets interpreted correctly.
"""
def get_example_ts(self):
ts = msprime.simulate(10, mutation_rate=1, recombination_rate=1, random_seed=2)
assert ts.num_mutations > 0
return ts
def test_duplicate_samples(self):
ts = self.get_example_ts()
for bad_set in [[1, 1], [1, 2, 1], list(range(10)) + [9]]:
with self.assertRaises(exceptions.LibraryError):
ts.diversity([bad_set])
with self.assertRaises(exceptions.LibraryError):
ts.divergence([[0, 1], bad_set])
with self.assertRaises(ValueError):
ts.sample_count_stat([bad_set], lambda x: x)
def test_empty_sample_set(self):
ts = self.get_example_ts()
with self.assertRaises(ValueError):
ts.diversity([[]])
for bad_sample_sets in [[[], []], [[1], []], [[1, 2], [1], []]]:
with self.assertRaises(ValueError):
ts.diversity(bad_sample_sets)
with self.assertRaises(ValueError):
ts.divergence(bad_sample_sets)
with self.assertRaises(ValueError):
ts.sample_count_stat(bad_sample_sets, lambda x: x)
def test_non_samples(self):
ts = self.get_example_ts()
with self.assertRaises(exceptions.LibraryError):
ts.diversity([[10]])
with self.assertRaises(exceptions.LibraryError):
ts.divergence([[10], [1, 2]])
with self.assertRaises(ValueError):
ts.sample_count_stat([[10]], lambda x: x)
def test_span_normalise(self):
ts = self.get_example_ts()
sample_sets = [[0, 1], [2, 3, 4], [5, 6]]
windows = ts.sequence_length * np.random.uniform(size=10)
windows.sort()
windows[0] = 0.0
windows[-1] = ts.sequence_length
def f(x):
return x
for mode in ('site', 'branch', 'node'):
sigma1 = ts.sample_count_stat(sample_sets, f, windows=windows)
sigma2 = ts.sample_count_stat(sample_sets, f, windows=windows,
span_normalise=True)
sigma3 = ts.sample_count_stat(sample_sets, f, windows=windows,
span_normalise=False)
denom = np.diff(windows)[:, np.newaxis]
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertEqual(sigma1.shape, sigma3.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
self.assertArrayAlmostEqual(sigma1, sigma3 / denom)
class TestSampleSetIndexes(StatsTestCase):
"""
Tests that we get the correct behaviour from the indexes argument to
k-way stats functions.
"""
def get_example_ts(self):
ts = msprime.simulate(10, mutation_rate=1, random_seed=1)
self.assertGreater(ts.num_mutations, 0)
return ts
def test_2_way_default(self):
ts = self.get_example_ts()
sample_sets = np.array_split(ts.samples(), 2)
S1 = ts.divergence(sample_sets)
S2 = divergence(ts, sample_sets)
S3 = ts.divergence(sample_sets, [[0, 1]])
self.assertEqual(S1.shape, S2.shape)
self.assertArrayAlmostEqual(S1, S2)
self.assertArrayAlmostEqual(S1, S3)
def test_3_way_default(self):
ts = self.get_example_ts()
sample_sets = np.array_split(ts.samples(), 3)
S1 = ts.f3(sample_sets)
S2 = f3(ts, sample_sets)
S3 = ts.f3(sample_sets, [[0, 1, 2]])
self.assertEqual(S1.shape, S2.shape)
self.assertArrayAlmostEqual(S1, S2)
self.assertArrayAlmostEqual(S1, S3)
def test_4_way_default(self):
ts = self.get_example_ts()
sample_sets = np.array_split(ts.samples(), 4)
S1 = ts.f4(sample_sets)
S2 = f4(ts, sample_sets)
S3 = ts.f4(sample_sets, [[0, 1, 2, 3]])
self.assertEqual(S1.shape, S2.shape)
self.assertArrayAlmostEqual(S1, S2)
self.assertArrayAlmostEqual(S1, S3)
def test_2_way_combinations(self):
ts = self.get_example_ts()
sample_sets = np.array_split(ts.samples(), 4)
pairs = list(itertools.combinations(range(4), 2))
for k in range(1, len(pairs)):
S1 = ts.divergence(sample_sets, pairs[:k])
S2 = divergence(ts, sample_sets, pairs[:k])
self.assertEqual(S1.shape[-1], k)
self.assertEqual(S1.shape, S2.shape)
self.assertArrayAlmostEqual(S1, S2)
def test_3_way_combinations(self):
ts = self.get_example_ts()
sample_sets = np.array_split(ts.samples(), 5)
triples = list(itertools.combinations(range(5), 3))
for k in range(1, len(triples)):
S1 = ts.Y3(sample_sets, triples[:k])
S2 = Y3(ts, sample_sets, triples[:k])
self.assertEqual(S1.shape[-1], k)
self.assertEqual(S1.shape, S2.shape)
self.assertArrayAlmostEqual(S1, S2)
def test_4_way_combinations(self):
ts = self.get_example_ts()
sample_sets = np.array_split(ts.samples(), 5)
quads = list(itertools.combinations(range(5), 4))
for k in range(1, len(quads)):
S1 = ts.f4(sample_sets, quads[:k])
S2 = f4(ts, sample_sets, quads[:k])
self.assertEqual(S1.shape[-1], k)
self.assertEqual(S1.shape, S2.shape)
self.assertArrayAlmostEqual(S1, S2)
def test_errors(self):
ts = self.get_example_ts()
sample_sets = np.array_split(ts.samples(), 2)
with self.assertRaises(ValueError):
ts.divergence(sample_sets, indexes=[])
with self.assertRaises(ValueError):
ts.divergence(sample_sets, indexes=[(1, 1, 1)])
with self.assertRaises(exceptions.LibraryError):
ts.divergence(sample_sets, indexes=[(1, 2)])
class TestGeneralStatInterface(StatsTestCase):
"""
Tests for the basic interface for general_stats.
"""
def get_tree_sequence(self):
ts = msprime.simulate(10, recombination_rate=2,
mutation_rate=2, random_seed=1)
return ts
def test_default_mode(self):
ts = msprime.simulate(10, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 2))
sigma1 = ts.general_stat(W, lambda x: x)
sigma2 = ts.general_stat(W, lambda x: x, mode="site")
self.assertArrayEqual(sigma1, sigma2)
def test_bad_mode(self):
ts = msprime.simulate(10, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 2))
for bad_mode in ["", "MODE", "x" * 8192]:
with self.assertRaises(ValueError):
ts.general_stat(W, lambda x: x, mode=bad_mode)
def test_bad_window_strings(self):
ts = self.get_tree_sequence()
with self.assertRaises(ValueError):
ts.diversity([list(ts.samples())], mode="site", windows="abc")
with self.assertRaises(ValueError):
ts.diversity([list(ts.samples())], mode="site", windows="")
with self.assertRaises(ValueError):
ts.diversity([list(ts.samples())], mode="tree", windows="abc")
class TestGeneralBranchStats(StatsTestCase):
"""
Tests for general branch stats (using functions and arbitrary weights)
"""
def compare_general_stat(self, ts, W, f, windows=None, polarised=False):
sigma1 = naive_branch_general_stat(ts, W, f, windows, polarised=polarised)
sigma2 = ts.general_stat(W, f, windows, polarised=polarised, mode="branch")
sigma3 = branch_general_stat(ts, W, f, windows, polarised=polarised)
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertEqual(sigma1.shape, sigma3.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
self.assertArrayAlmostEqual(sigma1, sigma3)
return sigma1
def test_simple_identity_f_w_zeros(self):
ts = msprime.simulate(12, recombination_rate=3, random_seed=2)
W = np.zeros((ts.num_samples, 3))
for polarised in [True, False]:
sigma = self.compare_general_stat(ts, W, lambda x: x, windows="trees",
polarised=polarised)
self.assertEqual(sigma.shape, (ts.num_trees, W.shape[1]))
self.assertTrue(np.all(sigma == 0))
def test_simple_identity_f_w_ones(self):
ts = msprime.simulate(10, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 2))
sigma = self.compare_general_stat(ts, W, lambda x: x, windows="trees",
polarised=True)
self.assertEqual(sigma.shape, (ts.num_trees, W.shape[1]))
# A W of 1 for every node and identity f counts the samples in the subtree
# if polarised is True.
for tree in ts.trees():
s = sum(tree.num_samples(u) * tree.branch_length(u) for u in tree.nodes())
self.assertTrue(np.allclose(sigma[tree.index], s))
def test_simple_cumsum_f_w_ones(self):
ts = msprime.simulate(13, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 8))
for polarised in [True, False]:
sigma = self.compare_general_stat(
ts, W, lambda x: np.cumsum(x), windows="trees", polarised=polarised)
self.assertEqual(sigma.shape, (ts.num_trees, W.shape[1]))
def test_simple_cumsum_f_w_ones_many_windows(self):
ts = msprime.simulate(15, recombination_rate=3, random_seed=3)
self.assertGreater(ts.num_trees, 3)
windows = np.linspace(0, ts.sequence_length, num=ts.num_trees * 10)
W = np.ones((ts.num_samples, 3))
sigma = self.compare_general_stat(ts, W, lambda x: np.cumsum(x), windows=windows)
self.assertEqual(sigma.shape, (windows.shape[0] - 1, W.shape[1]))
def test_windows_equal_to_ts_breakpoints(self):
ts = msprime.simulate(14, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 1))
for polarised in [True, False]:
sigma_no_windows = self.compare_general_stat(
ts, W, lambda x: np.cumsum(x), windows="trees", polarised=polarised)
self.assertEqual(sigma_no_windows.shape, (ts.num_trees, W.shape[1]))
sigma_windows = self.compare_general_stat(
ts, W, lambda x: np.cumsum(x), windows=ts.breakpoints(as_array=True),
polarised=polarised)
self.assertEqual(sigma_windows.shape, sigma_no_windows.shape)
self.assertTrue(np.allclose(sigma_windows.shape, sigma_no_windows.shape))
def test_single_tree_windows(self):
ts = msprime.simulate(15, random_seed=2, length=100)
W = np.ones((ts.num_samples, 2))
# for num_windows in range(1, 10):
for num_windows in [2]:
windows = np.linspace(0, ts.sequence_length, num=num_windows + 1)
sigma = self.compare_general_stat(ts, W, lambda x: np.array([np.sum(x)]),
windows)
self.assertEqual(sigma.shape, (num_windows, 1))
def test_simple_identity_f_w_zeros_windows(self):
ts = msprime.simulate(15, recombination_rate=3, random_seed=2)
W = np.zeros((ts.num_samples, 3))
windows = np.linspace(0, ts.sequence_length, num=11)
for polarised in [True, False]:
sigma = self.compare_general_stat(ts, W, lambda x: x, windows,
polarised=polarised)
self.assertEqual(sigma.shape, (10, W.shape[1]))
self.assertTrue(np.all(sigma == 0))
class TestGeneralSiteStats(StatsTestCase):
"""
Tests for general site stats (using functions and arbitrary weights)
"""
def compare_general_stat(self, ts, W, f, windows=None, polarised=False):
py_ssc = PythonSiteStatCalculator(ts)
sigma1 = py_ssc.naive_general_stat(W, f, windows, polarised=polarised)
sigma2 = ts.general_stat(W, f, windows, polarised=polarised, mode="site")
sigma3 = site_general_stat(ts, W, f, windows, polarised=polarised)
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertEqual(sigma1.shape, sigma3.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
self.assertArrayAlmostEqual(sigma1, sigma3)
return sigma1
def test_identity_f_W_0_multiple_alleles(self):
ts = msprime.simulate(20, recombination_rate=0, random_seed=2)
ts = tsutil.jukes_cantor(ts, 20, 1, seed=10)
W = np.zeros((ts.num_samples, 3))
for polarised in [True, False]:
sigma = self.compare_general_stat(ts, W, lambda x: x, windows="sites",
polarised=polarised)
self.assertEqual(sigma.shape, (ts.num_sites, W.shape[1]))
self.assertTrue(np.all(sigma == 0))
def test_identity_f_W_0_multiple_alleles_windows(self):
ts = msprime.simulate(34, recombination_rate=0, random_seed=2)
ts = tsutil.jukes_cantor(ts, 20, 1, seed=10)
W = np.zeros((ts.num_samples, 3))
windows = np.linspace(0, 1, num=11)
for polarised in [True, False]:
sigma = self.compare_general_stat(
ts, W, lambda x: x, windows=windows, polarised=polarised)
self.assertEqual(sigma.shape, (windows.shape[0] - 1, W.shape[1]))
self.assertTrue(np.all(sigma == 0))
def test_cumsum_f_W_1_multiple_alleles(self):
ts = msprime.simulate(3, recombination_rate=2, random_seed=2)
ts = tsutil.jukes_cantor(ts, 20, 1, seed=10)
W = np.ones((ts.num_samples, 3))
for polarised in [True, False]:
sigma = self.compare_general_stat(ts, W, lambda x: np.cumsum(x),
windows="sites", polarised=polarised)
self.assertEqual(sigma.shape, (ts.num_sites, W.shape[1]))
def test_cumsum_f_W_1_two_alleles(self):
ts = msprime.simulate(33, recombination_rate=1, mutation_rate=2, random_seed=1)
W = np.ones((ts.num_samples, 5))
for polarised in [True, False]:
sigma = self.compare_general_stat(
ts, W, lambda x: np.cumsum(x), windows="sites", polarised=polarised)
self.assertEqual(sigma.shape, (ts.num_sites, W.shape[1]))
class TestGeneralNodeStats(StatsTestCase):
"""
Tests for general node stats (using functions and arbitrary weights)
"""
def compare_general_stat(self, ts, W, f, windows=None, polarised=False):
sigma1 = naive_node_general_stat(ts, W, f, windows, polarised=polarised)
sigma2 = ts.general_stat(W, f, windows, polarised=polarised, mode="node")
sigma3 = node_general_stat(ts, W, f, windows, polarised=polarised)
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertEqual(sigma1.shape, sigma3.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
self.assertArrayAlmostEqual(sigma1, sigma3)
return sigma1
def test_simple_sum_f_w_zeros(self):
ts = msprime.simulate(12, recombination_rate=3, random_seed=2)
W = np.zeros((ts.num_samples, 3))
for polarised in [True, False]:
sigma = self.compare_general_stat(
ts, W, lambda x: x, windows="trees", polarised=polarised)
self.assertEqual(sigma.shape, (ts.num_trees, ts.num_nodes, 3))
self.assertTrue(np.all(sigma == 0))
def test_simple_sum_f_w_ones(self):
ts = msprime.simulate(44, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 2))
sigma = self.compare_general_stat(
ts, W, lambda x: np.array([sum(x)]), windows="trees", polarised=True)
self.assertEqual(sigma.shape, (ts.num_trees, ts.num_nodes, 1))
# Drop the last dimension
sigma = sigma.reshape((ts.num_trees, ts.num_nodes))
# A W of 1 for every node and f(x)=sum(x) counts the samples in the subtree
# times 2 if polarised is True.
for tree in ts.trees():
s = np.array([tree.num_samples(u) for u in range(ts.num_nodes)])
self.assertArrayAlmostEqual(sigma[tree.index], 2*s)
def test_small_tree_windows_polarised(self):
ts = msprime.simulate(4, recombination_rate=0.5, random_seed=2)
self.assertGreater(ts.num_trees, 1)
W = np.ones((ts.num_samples, 1))
sigma = self.compare_general_stat(
ts, W, lambda x: np.cumsum(x), windows=ts.breakpoints(as_array=True),
polarised=True)
self.assertEqual(sigma.shape, (ts.num_trees, ts.num_nodes, 1))
def test_one_window_polarised(self):
ts = msprime.simulate(4, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 1))
sigma = self.compare_general_stat(
ts, W, lambda x: np.cumsum(x), windows=[0, ts.sequence_length],
polarised=True)
self.assertEqual(sigma.shape, (1, ts.num_nodes, W.shape[1]))
@unittest.skip("Funny things happening for unpolarised")
def test_one_window_unpolarised(self):
ts = msprime.simulate(4, recombination_rate=1, random_seed=2)
W = np.ones((ts.num_samples, 2))
sigma = self.compare_general_stat(
ts, W, lambda x: np.cumsum(x), windows=[0, ts.sequence_length],
polarised=False)
self.assertEqual(sigma.shape, (1, ts.num_nodes, 2))
def test_many_windows(self):
ts = msprime.simulate(24, recombination_rate=3, random_seed=2)
W = np.ones((ts.num_samples, 3))
for k in [1, ts.num_trees // 2, ts.num_trees, ts.num_trees * 2]:
windows = np.linspace(0, 1, num=k + 1)
for polarised in [True]:
sigma = self.compare_general_stat(
ts, W, lambda x: np.cumsum(x), windows=windows, polarised=polarised)
self.assertEqual(sigma.shape, (k, ts.num_nodes, 3))
def test_one_tree(self):
ts = msprime.simulate(10, random_seed=3)
W = np.ones((ts.num_samples, 2))
sigma = self.compare_general_stat(
ts, W, lambda x: np.array([sum(x), sum(x)]), windows=[0, 1], polarised=True)
self.assertEqual(sigma.shape, (1, ts.num_nodes, 2))
# A W of 1 for every node and f(x)=sum(x) counts the samples in the subtree
# times 2 if polarised is True.
tree = ts.first()
s = np.array([tree.num_samples(u) for u in range(ts.num_nodes)])
self.assertArrayAlmostEqual(sigma[tree.index, :, 0], 2 * s)
self.assertArrayAlmostEqual(sigma[tree.index, :, 1], 2 * s)
##############################
# Trait covariance
##############################
def covsq(x, y):
cov = np.dot(x - np.mean(x), y - np.mean(y)) / (len(x) - 1)
return cov * cov
def corsq(x, y):
vx = covsq(x, x)
vy = covsq(y, y)
# sqrt is because vx and vy are *squared* variances
return covsq(x, y) / np.sqrt(vx * vy)
def site_trait_covariance(ts, W, windows=None, span_normalise=True):
"""
For each site, computes the covariance between the columns of W and the genotypes.
"""
windows = ts.parse_windows(windows)
n, K = W.shape
assert(n == ts.num_samples)
out = np.zeros((len(windows) - 1, K))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
haps = ts.genotype_matrix()
site_positions = [x.position for x in ts.sites()]
for i in range(K):
w = W[:, i].copy()
w -= np.mean(w)
S = 0
site_in_window = False
for k in range(ts.num_sites):
if (site_positions[k] >= begin) and (site_positions[k] < end):
site_in_window = True
hX = haps[k]
alleles = set(hX)
for a in alleles:
S += covsq(w, hX == a) / 2
if site_in_window:
out[j, i] = S
if span_normalise:
out[j, i] /= (end - begin)
return out
def branch_trait_covariance(ts, W, windows=None, span_normalise=True):
"""
For each branch, computes the covariance between the columns of W and the split
induced by the branch, multiplied by the length of the branch.
"""
windows = ts.parse_windows(windows)
n, K = W.shape
assert(n == ts.num_samples)
out = np.zeros((len(windows) - 1, K))
samples = ts.samples()
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i in range(K):
w = W[:, i].copy()
w -= np.mean(w)
S = 0
has_trees = False
for tr in ts.trees():
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
if tr.total_branch_length > 0:
has_trees = True
SS = 0
for u in range(ts.num_nodes):
below = np.in1d(samples, list(tr.samples(u)))
branch_length = tr.branch_length(u)
SS += covsq(w, below) * branch_length
S += SS*(min(end, tr.interval[1]) - max(begin, tr.interval[0]))
if has_trees:
out[j, i] = S
if span_normalise:
out[j, i] /= (end - begin)
return out
def node_trait_covariance(ts, W, windows=None, span_normalise=True):
"""
For each node, computes the covariance between the columns of W and the split
induced by above/below the node.
"""
windows = ts.parse_windows(windows)
n, K = W.shape
assert(n == ts.num_samples)
out = np.zeros((len(windows) - 1, ts.num_nodes, K))
samples = ts.samples()
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i in range(K):
w = W[:, i].copy()
w -= np.mean(w)
S = np.zeros(ts.num_nodes)
for tr in ts.trees():
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
SS = np.zeros(ts.num_nodes)
for u in range(ts.num_nodes):
below = np.in1d(samples, list(tr.samples(u)))
SS[u] += covsq(w, below)
S += SS*(min(end, tr.interval[1]) - max(begin, tr.interval[0]))
out[j, :, i] = S
if span_normalise:
out[j, :, i] /= (end - begin)
return out
def trait_covariance(ts, W, windows=None, mode="site", span_normalise=True):
method_map = {
"site": site_trait_covariance,
"node": node_trait_covariance,
"branch": branch_trait_covariance}
return method_map[mode](ts, W, windows=windows,
span_normalise=span_normalise)
class TestTraitCovariance(StatsTestCase, WeightStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def get_example_ts(self):
ts = msprime.simulate(10, mutation_rate=1, recombination_rate=2, random_seed=1)
self.assertGreater(ts.num_mutations, 0)
return ts
def transform_weights(self, W):
"""
Need centered weights to compare to general stats.
"""
W -= np.mean(W, axis=0)
return W
def verify_weighted_stat(self, ts, W, windows):
n = W.shape[0]
def f(x):
return (x ** 2) / (2 * (n - 1) * (n - 1))
self.verify_definition(
ts, W, windows, f, ts.trait_covariance, trait_covariance)
def verify_interface(self, ts, ts_method):
W = np.array([np.arange(ts.num_samples)]).T
sigma1 = ts_method(W, mode=self.mode)
sigma2 = ts_method(W, windows=None, mode=self.mode)
sigma3 = ts_method(W, windows=[0.0, ts.sequence_length], mode=self.mode)
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertEqual(sigma1.shape, sigma3.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
self.assertArrayAlmostEqual(sigma1, sigma3)
def verify_centering(self, ts, method, ts_method):
# Since weights are mean-centered, adding a constant shouldn't change anything.
ts = self.get_example_ts()
for W, windows in subset_combos(
self.example_weights(ts), example_windows(ts), p=0.1):
shift = np.arange(1, W.shape[1] + 1)
sigma1 = ts_method(W, windows=windows, mode=self.mode)
sigma2 = ts_method(W + shift, windows=windows, mode=self.mode)
sigma3 = method(ts, W, windows=windows, mode=self.mode)
sigma4 = method(ts, W + shift, windows=windows, mode=self.mode)
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertEqual(sigma1.shape, sigma3.shape)
self.assertEqual(sigma1.shape, sigma4.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
self.assertArrayAlmostEqual(sigma1, sigma3)
self.assertArrayAlmostEqual(sigma1, sigma4)
class TraitCovarianceMixin(object):
def test_interface(self):
ts = self.get_example_ts()
self.verify_interface(ts, ts.trait_covariance)
def test_normalisation(self):
ts = self.get_example_ts()
self.verify_centering(ts, trait_covariance, ts.trait_covariance)
def test_errors(self):
ts = self.get_example_ts()
W = np.ones((ts.num_samples, 2))
# W must have the right number of rows
self.assertRaises(ValueError, ts.trait_correlation, W[1:, :])
class TestBranchTraitCovariance(
TestTraitCovariance, TopologyExamplesMixin, TraitCovarianceMixin):
mode = "branch"
class TestNodeTraitCovariance(
TestTraitCovariance, TopologyExamplesMixin, TraitCovarianceMixin):
mode = "node"
class TestSiteTraitCovariance(
TestTraitCovariance, MutatedTopologyExamplesMixin,
TraitCovarianceMixin):
mode = "site"
##############################
# Trait correlation
##############################
def site_trait_correlation(ts, W, windows=None, span_normalise=True):
"""
For each site, computes the correlation between the columns of W and the genotypes.
"""
windows = ts.parse_windows(windows)
n, K = W.shape
assert(n == ts.num_samples)
out = np.zeros((len(windows) - 1, K))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
haps = ts.genotype_matrix()
site_positions = [x.position for x in ts.sites()]
for i in range(K):
w = W[:, i].copy()
w -= np.mean(w)
w /= np.std(w) * np.sqrt(len(w) / (len(w) - 1))
S = 0
site_in_window = False
for k in range(ts.num_sites):
if (site_positions[k] >= begin) and (site_positions[k] < end):
site_in_window = True
hX = haps[k]
alleles = set(hX)
for a in alleles:
p = np.mean(hX == a)
if p > 0 and p < 1:
# S += sum(w[hX == a])**2 / (2 * (p * (1 - p)))
S += corsq(w, hX == a) / 2
if site_in_window:
out[j, i] = S
if span_normalise:
out[j, i] /= (end - begin)
return out
def branch_trait_correlation(ts, W, windows=None, span_normalise=True):
"""
For each branch, computes the correlation between the columns of W and the split
induced by the branch, multiplied by the length of the branch.
"""
windows = ts.parse_windows(windows)
n, K = W.shape
assert(n == ts.num_samples)
out = np.zeros((len(windows) - 1, K))
samples = ts.samples()
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i in range(K):
w = W[:, i].copy()
w -= np.mean(w)
w /= np.std(w) * np.sqrt(len(w) / (len(w) - 1))
S = 0
has_trees = False
for tr in ts.trees():
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
if tr.total_branch_length > 0:
has_trees = True
SS = 0
for u in range(ts.num_nodes):
below = np.in1d(samples, list(tr.samples(u)))
p = np.mean(below)
if p > 0 and p < 1:
branch_length = tr.branch_length(u)
# SS += ((sum(w[below])**2 +
# sum(w[np.logical_not(below)])**2) * branch_length
# / (2 * (p * (1 - p))))
SS += corsq(w, below) * branch_length
S += SS*(min(end, tr.interval[1]) - max(begin, tr.interval[0]))
if has_trees:
out[j, i] = S
if span_normalise:
out[j, i] /= (end - begin)
return out
def node_trait_correlation(ts, W, windows=None, span_normalise=True):
"""
For each node, computes the correlation between the columns of W and the split
induced by above/below the node.
"""
windows = ts.parse_windows(windows)
n, K = W.shape
assert(n == ts.num_samples)
out = np.zeros((len(windows) - 1, ts.num_nodes, K))
samples = ts.samples()
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i in range(K):
w = W[:, i].copy()
w -= np.mean(w)
w /= np.std(w) * np.sqrt(len(w) / (len(w) - 1))
S = np.zeros(ts.num_nodes)
for tr in ts.trees():
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
SS = np.zeros(ts.num_nodes)
for u in range(ts.num_nodes):
below = np.in1d(samples, list(tr.samples(u)))
p = np.mean(below)
if p > 0 and p < 1:
# SS[u] += sum(w[below])**2 / 2
# SS[u] += sum(w[np.logical_not(below)])**2 / 2
# SS[u] /= (p * (1 - p))
SS[u] += corsq(w, below)
S += SS*(min(end, tr.interval[1]) - max(begin, tr.interval[0]))
out[j, :, i] = S
if span_normalise:
out[j, :, i] /= (end - begin)
return out
def trait_correlation(ts, W, windows=None, mode="site", span_normalise=True):
method_map = {
"site": site_trait_correlation,
"node": node_trait_correlation,
"branch": branch_trait_correlation}
return method_map[mode](ts, W, windows=windows,
span_normalise=span_normalise)
class TestTraitCorrelation(TestTraitCovariance):
# Derived classes define this to get a specific stats mode.
mode = None
def transform_weights(self, W):
"""
Need standardised weights to compare to general stats,
and also an extra column to compute allele frequencies.
"""
W -= np.mean(W, axis=0)
n = W.shape[0]
with suppress_division_by_zero_warning():
W /= np.std(W, axis=0) * np.sqrt(n / (n - 1))
return np.column_stack((W, np.ones(W.shape[0])/W.shape[0]))
def verify_weighted_stat(self, ts, W, windows):
n = W.shape[0]
def f(x):
p = x[-1]
if p > 0 and p < 1:
return (x[:-1] ** 2) / (2 * (p * (1 - p)) * n * (n - 1))
else:
return x[:-1] * 0.0
self.verify_definition(
ts, W, windows, f, ts.trait_correlation, trait_correlation)
def test_errors(self):
ts = self.get_example_ts()
# columns of W must have positive SD
W = np.ones((ts.num_samples, 2))
self.assertRaises(ValueError, ts.trait_correlation, W)
# W must have the right number of rows
self.assertRaises(ValueError, ts.trait_correlation, W[1:, :])
def verify_standardising(self, ts, method, ts_method):
"""
Since weights are standardised, multiplying by a constant shouldn't
change anything.
"""
for W, windows in subset_combos(
self.example_weights(ts), example_windows(ts), p=0.1):
scale = np.arange(1, W.shape[1] + 1)
sigma1 = ts_method(W, windows=windows, mode=self.mode)
sigma2 = ts_method(W * scale, windows=windows, mode=self.mode)
sigma3 = method(ts, W, windows=windows, mode=self.mode)
sigma4 = method(ts, W * scale, windows=windows, mode=self.mode)
self.assertEqual(sigma1.shape, sigma2.shape)
self.assertArrayAlmostEqual(sigma1, sigma2)
self.assertArrayAlmostEqual(sigma1, sigma3)
self.assertArrayAlmostEqual(sigma1, sigma4)
class TraitCorrelationMixin(object):
def test_interface(self):
ts = self.get_example_ts()
self.verify_interface(ts, ts.trait_correlation)
def test_normalisation(self):
ts = self.get_example_ts()
self.verify_centering(ts, trait_correlation, ts.trait_correlation)
self.verify_standardising(
ts, trait_correlation, ts.trait_correlation)
class TestBranchTraitCorrelation(
TestTraitCorrelation, TopologyExamplesMixin, TraitCorrelationMixin):
mode = "branch"
class TestNodeTraitCorrelation(
TestTraitCorrelation, TopologyExamplesMixin, TraitCorrelationMixin):
mode = "node"
class TestSiteTraitCorrelation(
TestTraitCorrelation, MutatedTopologyExamplesMixin,
TraitCorrelationMixin):
mode = "site"
##############################
# Trait regression
##############################
def regression(y, x, z):
"""
Returns the squared coefficient of x in the least-squares linear regression
: y ~ x + z
where x and y are vectors and z is a matrix.
Note that if z is None then the output is
cor(x, y) * sd(y) / sd(x) = cov(x, y) / (sd(x) ** 2) .
"""
# add the constant vector to z
if z is None:
z = np.ones((len(x), 1))
else:
xz = np.column_stack([z, np.ones((len(x), 1))])
if np.linalg.matrix_rank(xz) == xz.shape[1]:
z = xz
xz = np.column_stack([x, z])
if np.linalg.matrix_rank(xz) < xz.shape[1]:
return 0.0
else:
coefs, _, _, _ = np.linalg.lstsq(xz, y, rcond=None)
return coefs[0] * coefs[0]
def site_trait_regression(ts, W, Z, windows=None, span_normalise=True):
"""
For each site, and for each trait w (column of W), computes the coefficient
of site in the linear regression:
w ~ site + Z
"""
windows = ts.parse_windows(windows)
n, K = W.shape
assert(n == ts.num_samples)
out = np.zeros((len(windows) - 1, K))
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
haps = ts.genotype_matrix()
site_positions = [x.position for x in ts.sites()]
for i in range(K):
w = W[:, i]
S = 0
site_in_window = False
for k in range(ts.num_sites):
if (site_positions[k] >= begin) and (site_positions[k] < end):
site_in_window = True
hX = haps[k]
alleles = set(hX)
for a in alleles:
p = np.mean(hX == a)
if p > 0 and p < 1:
S += regression(w, hX == a, Z) / 2
if site_in_window:
out[j, i] = S
if span_normalise:
out[j, i] /= (end - begin)
return out
def branch_trait_regression(ts, W, Z, windows=None, span_normalise=True):
"""
For each branch, computes the regression of each column of W onto the split
induced by the branch and the covariates Z, multiplied by the length of the branch,
returning the squared coefficient of the column of W.
"""
windows = ts.parse_windows(windows)
n, K = W.shape
assert(n == ts.num_samples)
out = np.zeros((len(windows) - 1, K))
samples = ts.samples()
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i in range(K):
w = W[:, i]
S = 0
has_trees = False
for tr in ts.trees():
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
if tr.total_branch_length > 0:
has_trees = True
SS = 0
for u in range(ts.num_nodes):
below = np.in1d(samples, list(tr.samples(u)))
branch_length = tr.branch_length(u)
SS += regression(w, below, Z) * branch_length
S += SS*(min(end, tr.interval[1]) - max(begin, tr.interval[0]))
if has_trees:
out[j, i] = S
if span_normalise:
out[j, i] /= (end - begin)
return out
def node_trait_regression(ts, W, Z, windows=None, span_normalise=True):
"""
For each node, computes the regression of each columns of W on the split
induced by above/below the node and the covariates Z, returning the squared
coefficient of the column of W.
"""
windows = ts.parse_windows(windows)
n, K = W.shape
assert(n == ts.num_samples)
out = np.zeros((len(windows) - 1, ts.num_nodes, K))
samples = ts.samples()
for j in range(len(windows) - 1):
begin = windows[j]
end = windows[j + 1]
for i in range(K):
w = W[:, i]
S = np.zeros(ts.num_nodes)
for tr in ts.trees():
if tr.interval[1] <= begin:
continue
if tr.interval[0] >= end:
break
SS = np.zeros(ts.num_nodes)
for u in range(ts.num_nodes):
below = np.in1d(samples, list(tr.samples(u)))
SS[u] += regression(w, below, Z)
S += SS*(min(end, tr.interval[1]) - max(begin, tr.interval[0]))
out[j, :, i] = S
if span_normalise:
out[j, :, i] /= (end - begin)
return out
def trait_regression(ts, W, Z, windows=None, mode="site", span_normalise=True):
method_map = {
"site": site_trait_regression,
"node": node_trait_regression,
"branch": branch_trait_regression}
return method_map[mode](ts, W, Z, windows=windows,
span_normalise=span_normalise)
class TestTraitRegression(StatsTestCase, WeightStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def get_example_ts(self):
ts = msprime.simulate(10, mutation_rate=1, recombination_rate=2, random_seed=1)
self.assertGreater(ts.num_mutations, 0)
return ts
def example_covariates(self, ts):
N = ts.num_samples
for k in [1, 2, 5]:
k = min(k, ts.num_samples)
Z = np.ones((N, k))
Z[1, :] = np.arange(k, 2*k)
yield Z
for j in range(k):
Z[:, j] = np.random.normal(0, 1, N)
yield Z
def transform_weights(self, W, Z):
n = W.shape[0]
return np.column_stack([W, Z, np.ones((n, 1))])
def transform_covariates(self, Z):
tZ = np.column_stack([Z, np.ones((Z.shape[0], 1))])
if np.linalg.matrix_rank(tZ) == tZ.shape[1]:
Z = tZ
assert(np.linalg.matrix_rank(Z) == Z.shape[1])
K = np.linalg.cholesky(np.matmul(Z.T, Z)).T
Z = np.matmul(Z,
|
np.linalg.inv(K)
|
numpy.linalg.inv
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
import numpy as np
import mxnet as mx
import random
import itertools
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
import unittest
def test_box_nms_op():
def test_box_nms_forward(data, expected, thresh=0.5, valid=0, topk=-1, coord=2, score=1, cid=0,
force=False, in_format='corner', out_format='corner'):
for dtype in ['float16', 'float32', 'float64']:
data = mx.nd.array(data, dtype=dtype)
out = mx.contrib.nd.box_nms(data, overlap_thresh=thresh, valid_thresh=valid, topk=topk,
coord_start=coord, score_index=score, id_index=cid,
force_suppress=force, in_format=in_format, out_format=out_format)
assert_almost_equal(out.asnumpy(), expected.astype(dtype), rtol=1e-3, atol=1e-3)
def test_box_nms_backward(data, grad, expected, thresh=0.5, valid=0, topk=-1, coord=2, score=1,
cid=0, force=False, in_format='corner', out_format='corner'):
in_var = mx.sym.Variable('data')
arr_data = mx.nd.array(data)
arr_grad = mx.nd.empty(arr_data.shape)
op = mx.contrib.sym.box_nms(in_var, overlap_thresh=thresh, valid_thresh=valid, topk=topk,
coord_start=coord, score_index=score, id_index=cid,
force_suppress=force, in_format=in_format, out_format=out_format)
exe = op.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
exe.forward(is_train=True)
exe.backward(mx.nd.array(grad))
assert_almost_equal(arr_grad.asnumpy(), expected)
def corner_to_center(data):
out = np.reshape(data, (-1, 6)).copy()
out[:, 2] = (data[:, 2] + data[:, 4]) / 2.0
out[:, 3] = (data[:, 3] + data[:, 5]) / 2.0
out[:, 4] = data[:, 4] - data[:, 2]
out[:, 5] = data[:, 5] - data[:, 3]
invalid = np.where(data[:, 0] < 0)[0]
out[invalid, :] = -1
return out
def center_to_corner(data):
data = np.reshape(data, (-1, 6)).copy()
out[:, 2] = data[:, 2] - data[:, 4] / 2.0
out[:, 3] = data[:, 3] - data[:, 5] / 2.0
out[:, 4] = data[:, 2] + data[:, 4] / 2.0
out[:, 5] = data[:, 3] + data[:, 5] / 2.0
invalid = np.where(data[:, 0] < 0)[0]
out[invalid, :] = -1
return out
def swap_position(data, expected, coord=2, score=1, cid=0, new_col=0):
data = np.reshape(data, (-1, 6))
expected = np.reshape(expected, (-1, 6))
new_coord = random.randint(0, 6 + new_col - 4)
others = list(range(new_coord)) + list(range(new_coord + 4, 6 + new_col))
random.shuffle(others)
new_score = others[0]
new_cid = others[1]
new_data = np.full((data.shape[0], data.shape[1] + new_col), -1.0)
new_expected = np.full((expected.shape[0], expected.shape[1] + new_col), -1.0)
new_data[:, new_coord:new_coord+4] = data[:, coord:coord+4]
new_data[:, new_score] = data[:, score]
new_data[:, new_cid] = data[:, cid]
new_expected[:, new_coord:new_coord+4] = expected[:, coord:coord+4]
new_expected[:, new_score] = expected[:, score]
new_expected[:, new_cid] = expected[:, cid]
return new_data, new_expected, new_coord, new_score, new_cid
# manually set up test cases
boxes = [[0, 0.5, 0.1, 0.1, 0.2, 0.2], [1, 0.4, 0.1, 0.1, 0.2, 0.2],
[0, 0.3, 0.1, 0.1, 0.14, 0.14], [2, 0.6, 0.5, 0.5, 0.7, 0.8]]
# case1
force=True
thresh=0.5
expected = [[2, 0.6, 0.5, 0.5, 0.7, 0.8], [0, 0.5, 0.1, 0.1, 0.2, 0.2],
[0, 0.3, 0.1, 0.1, 0.14, 0.14], [-1, -1, -1, -1, -1, -1]]
grad = np.random.rand(4, 6)
expected_in_grad = grad[(1, 3, 2, 0), :]
expected_in_grad[1, :] = 0
test_box_nms_forward(np.array(boxes), np.array(expected), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes), grad, expected_in_grad, force=force, thresh=thresh)
# case2: multi batch
boxes2 = [boxes] * 3
expected2 = [expected] * 3
grad2 = np.array([grad.tolist()] * 3)
expected_in_grad2 = np.array([expected_in_grad.tolist()] * 3)
test_box_nms_forward(np.array(boxes2),
|
np.array(expected2)
|
numpy.array
|
import os
import numpy as np
import torch
from .metric_computer import MetricComputer
from ..common_util.image import get_gt_frame, get_comp_frame
from ..models.i3d.pytorch_i3d import InceptionI3d
from ..fid import calculate_frechet_distance
from ..fid.util import extract_video_features
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PROJ_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..'))
class VFIDComputer(MetricComputer):
def compute_metric(self):
num_work_items = 2 * len(self.opts.video_names) if self.opts.gt_vfid_feats_path is None \
else len(self.opts.video_names)
self.send_work_count_msg(num_work_items)
model = InceptionI3d(400, in_channels=3)
weights = torch.load(os.path.join(PROJ_DIR, 'pretrained_models', 'rgb_imagenet.pt'))
model.load_state_dict(weights)
model.cuda()
model.eval()
torch.set_grad_enabled(False)
def get_gt_frame_wrapper(video_name, t):
return get_gt_frame(self.opts.gt_root, video_name, t)
def get_comp_frame_wrapper(video_name, t):
return get_comp_frame(self.opts.gt_root, self.opts.pred_root, video_name, t)
def update_progress_cb():
self.send_update_msg(1)
if self.opts.gt_vfid_clips_feats_path is None:
gt_clip_features = extract_video_features(model, get_gt_frame_wrapper, self.opts.video_names,
self.opts.video_frame_counts, update_progress_cb)
else:
gt_clip_features = np.load(self.opts.gt_vfid_feats_path)
gt_mu = np.mean(gt_clip_features, axis=0)
gt_sigma =
|
np.cov(gt_clip_features, rowvar=False)
|
numpy.cov
|
from utils.env_utils import minigrid_render, minigrid_get_env
import os, time
import numpy as np
import argparse
import random
import matplotlib.pyplot as plt
import pickle5 as pickle
from imitation.data import rollout
from imitation.util import logger, util
from imitation.algorithms import bc
import gym
import gym_minigrid
from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy
from imitation.algorithms import bc
from imitation.data.types import Trajectory
import dataclasses
import logging
import os
from typing import Callable, Dict, Iterable, Mapping, Optional, Type, Union
import gym
import numpy as np
import torch as th
import torch.nn as nn
import torch.utils.data as th_data
import torch.utils.tensorboard as thboard
from tqdm import tqdm
from stable_baselines3.common import on_policy_algorithm, preprocessing, vec_env
from imitation.data import buffer, types, wrappers
from imitation.rewards import common as rew_common
from imitation.rewards import discrim_nets, reward_nets
from imitation.util import logger, reward_wrapper, util
from imitation.data import rollout
import itertools
import copy
class BaCRNN:
def __init__(
self,
train_env,
bac_classifier,
expert_data,
bc_trainer=None,
not_expert_data=None,
nepochs: int = 10,
batch_size: int = 10,
):
self.train_env = train_env # pass an instance of the environment
self.bc_trainer = (
bc_trainer # pass an instance of imitation.bc with a trained policy.
)
self.bac_classifier = bac_classifier
self.not_expert_data = not_expert_data
self.expert_data = expert_data
# taken from imitation.algorithms.adversarial
self.expert_dataloader = util.endless_iter(expert_data)
self.provided_not_expert_dataloader = None
if not_expert_data:
self.provided_not_expert_dataloader = util.endless_iter(not_expert_data)
self.bac_optimizer = th.optim.AdamW(self.bac_classifier.parameters())
self.bac_loss = nn.BCEWithLogitsLoss()
self.batch_size = batch_size
self.nepochs = nepochs
self.collect_max = 150
def train_bac_classifier(self):
if self.bc_trainer != None:
self.collect_not_expert_from_bc(filter = False)
self.collect_not_expert(filter = False)
self.collect_not_expert_from_expert(filter = False)
for i in tqdm(range(self.nepochs)):
full_loss = 0
if i%25 == 0 and i > 20:
if self.bc_trainer != None:
self.collect_not_expert_from_bc(filter = True)
self.collect_not_expert(filter = True)
self.collect_not_expert_from_expert(filter = True)
self.bac_classifier.train()
for j in range(10):
batch = [next(self.expert_dataloader) for i in range(self.batch_size)]
exp_labels = np.ones(self.batch_size, dtype=int)
if i % 2 == 0:
if self.bc_trainer != None:
batch.extend([next(self.not_expert_from_bc_dataloader) for i in range(self.batch_size)])
else:
batch.extend([next(self.not_expert_dataloader) for i in range(self.batch_size)])
else:
batch.extend([next(self.not_expert_from_expert_dataloader) for i in range(self.batch_size)])
label = self._torchify_array(
np.concatenate([exp_labels, np.zeros(self.batch_size, dtype=int),])
)
logits = self.bac_classifier(batch)
loss = self.bac_loss(logits, label.float())
self.bac_optimizer.zero_grad()
loss.backward()
self.bac_optimizer.step()
full_loss += loss.data
print(full_loss / 10)
if self.earlystopping():
break
print("bac training done")
def expert_warmstart(self):
self.bac_classifier.train()
print(f"warm start for {20} epochs with batch size {5}")
for i in tqdm(range(10)):
full_loss = 0
for j in range(2):
batch = [next(self.expert_dataloader) for i in range(5)]
label = self._torchify_array(np.ones(5, dtype=int))
logits = self.bac_classifier(batch)
loss = self.bac_loss(logits, label.float())
self.bac_optimizer.zero_grad()
loss.backward()
self.bac_optimizer.step()
full_loss += loss.item()
print("expert only:", full_loss / 10)
expert_probs_avg = 0
for traj in self.expert_data:
expert_probs_avg += self.predict(traj).item()
print(f"expert probs sanity check {expert_probs_avg/len(self.expert_data)}")
print("bac warmstart done")
def save(self, save_path, save_name):
os.chdir(save_path)
th.save(self.bac_classifier.state_dict(),save_name)
def _torchify_array(self, ndarray: np.ndarray, **kwargs) -> th.Tensor:
return th.as_tensor(ndarray, device=self.bac_classifier.device(), **kwargs)
def _torchify_with_space(
self, ndarray: np.ndarray, space: gym.Space, **kwargs
) -> th.Tensor:
tensor = th.as_tensor(ndarray, device=self.bac_classifier.device(), **kwargs)
preprocessed = preprocessing.preprocess_obs(
tensor, space, normalize_images=False,
)
return preprocessed
def predict(self, traj, return_logit=False):
"""
predicts and returns either logit or prop
"""
self.bac_classifier.eval()
logit = self.bac_classifier(traj)
if return_logit:
return logit
else:
probs = th.sigmoid(logit) # no need for -logit as expert is 1
return probs
def collect_not_expert(self, filter = False, cutoff = 0.9):
self.not_expert_dataset = []
for _ in range(self.collect_max):
obs_list = []
action_list = []
obs = self.train_env.reset()
obs_list.append(obs[0])
for i in range(4):
# action = self.train_env.action_space.sample()
action = random.sample([0, 1, 2], 1)[0]
# print(action)
obs, _, done, _ = self.train_env.step([action])
action_list.append(action)
obs_list.append(obs[0])
if done:
break
if len(action_list) >= 1:
collected_traj = Trajectory(
obs=np.array(obs_list),
acts=np.array(action_list),
infos=np.array([{} for i in action_list]),
)
if filter:
if self.predict(collected_traj) < cutoff:
self.not_expert_dataset.append(collected_traj)
else:
self.not_expert_dataset.append(collected_traj)
print(f"not expert dataset size: {len(self.not_expert_dataset)}")
self.not_expert_dataloader = util.endless_iter(self.not_expert_dataset)
def collect_not_expert_from_bc(self, filter = False, cutoff = 0.9):
assert self.bc_trainer != None, "Need a trained BC"
self.not_expert_from_bc_dataset = []
for _ in range(self.collect_max):
obs_list = []
action_list = []
ok_flag = True
obs = self.train_env.reset()
obs_list.append(obs[0])
#bc rollout
for j in range(random.sample(list(range(1)), 1)[0]):
action, _ = self.bc_trainer.policy.predict(obs, deterministic=True)
obs, _, done, _ = self.train_env.step(action)
action_list.append(action[0])
obs_list.append(obs[0])
if done:
ok_flag = False
break
#continue with random actions
for i in range(2):
if not ok_flag:
break
action = random.sample([0, 1, 2], 1)[0]
obs, _, done, _ = self.train_env.step([action])
action_list.append(action)
obs_list.append(obs[0])
if done:
ok_flag = False
break
if len(action_list) >= 1 and ok_flag:
collected_traj = Trajectory(
obs=np.array(obs_list),
acts=np.array(action_list),
infos=np.array([{} for i in action_list]),
)
if filter:
if self.predict(collected_traj) < cutoff:
self.not_expert_from_bc_dataset.append(collected_traj)
else:
self.not_expert_from_bc_dataset.append(collected_traj)
print(f"not expert from bc dataset size: {len(self.not_expert_from_bc_dataset)}")
self.not_expert_from_bc_dataloader = util.endless_iter(self.not_expert_from_bc_dataset)
def collect_not_expert_from_expert(self, filter = False, cutoff = 0.9):
self.not_expert_from_expert_dataset = []
for _ in range(int(self.collect_max)):
expert_traj = copy.deepcopy(next(self.expert_dataloader))
obs_list = expert_traj.obs.tolist()
act_list = expert_traj.acts.tolist()
if len(act_list) < 5:
continue
for _ in range(random.sample(list(range(3)),1)[0]):
del obs_list[-1]
del act_list[-1]
if len(act_list) < 2:
break
if len(act_list) >= 1:
collected_traj = Trajectory(
obs=
|
np.array(obs_list)
|
numpy.array
|
from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import map
from builtins import str
from builtins import range
from past.utils import old_div
from future.utils import raise_
import numpy as num
import unittest
import tempfile
import os
import sys
from anuga.file.netcdf import NetCDFFile
from anuga.utilities.system_tools import get_pathname_from_package
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.coordinate_transforms.redfearn import redfearn
from anuga.utilities.numerical_tools import ensure_numeric
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a
from anuga.file.sts import create_sts_boundary
from anuga.file.csv_file import load_csv_as_dict, load_csv_as_array
from anuga.shallow_water.shallow_water_domain import Domain
# boundary functions
from anuga.shallow_water.boundaries import Reflective_boundary, \
Field_boundary, Transmissive_momentum_set_stage_boundary, \
Transmissive_stage_zero_momentum_boundary
from anuga.abstract_2d_finite_volumes.generic_boundary_conditions\
import Transmissive_boundary, Dirichlet_boundary, \
Time_boundary, File_boundary, AWI_boundary
from anuga.pmesh.mesh_interface import create_mesh_from_regions
from anuga.file_conversion.urs2sts import urs2sts
# Allow us to use helper methods from this test.
from anuga.file.tests.test_mux import Test_Mux
class Test_Urs2Sts(Test_Mux):
""" A suite of tests to test urs2sts file conversion functions.
These tests are quite coarse-grained: converting a file
and checking that its headers and some of its contents
are correct.
"""
def tearDown(self):
for file in ['domain.sww', 'urs_test_mesh.tsh' ]:
try:
os.remove(file)
except:
pass
def test_urs2sts0(self):
"""
Test single source
"""
tide=0
time_step_count = 3
time_step = 2
lat_long_points =[(-21.5,114.5),(-21,114.5),(-21.5,115), (-21.,115.)]
n=len(lat_long_points)
first_tstep=num.ones(n,num.int)
first_tstep[0]+=1
first_tstep[2]+=1
last_tstep=(time_step_count)*num.ones(n,num.int)
last_tstep[0]-=1
gauge_depth=20*num.ones(n,num.float)
ha=2*num.ones((n,time_step_count),num.float)
ha[0]=num.arange(0,time_step_count)
ha[1]=num.arange(time_step_count,2*time_step_count)
ha[2]=num.arange(2*time_step_count,3*time_step_count)
ha[3]=num.arange(3*time_step_count,4*time_step_count)
ua=5*num.ones((n,time_step_count),num.float)
va=-10*num.ones((n,time_step_count),num.float)
base_name, files = self.write_mux2(lat_long_points,
time_step_count, time_step,
first_tstep, last_tstep,
depth=gauge_depth,
ha=ha,
ua=ua,
va=va)
sts_file = base_name + '.sts'
urs2sts(base_name,
basename_out=sts_file,
mean_stage=tide,verbose=False)
#Let's interigate the sww file
# Note, the sww info is not gridded. It is point data.
fid = NetCDFFile(sts_file)
# Make x and y absolute
x = fid.variables['x'][:]
y = fid.variables['y'][:]
geo_reference = Geo_reference(NetCDFObject=fid)
points = geo_reference.get_absolute(list(zip(x, y)))
points = ensure_numeric(points)
x = points[:,0]
y = points[:,1]
#Check that first coordinate is correctly represented
#Work out the UTM coordinates for first point
for i in range(4):
zone, e, n = redfearn(lat_long_points[i][0], lat_long_points[i][1])
assert num.allclose([x[i],y[i]], [e,n])
#Check the time vector
times = fid.variables['time'][:]
times_actual = []
for i in range(time_step_count):
times_actual.append(time_step * i)
assert num.allclose(ensure_numeric(times),
ensure_numeric(times_actual))
#Check first value
stage = fid.variables['stage'][:]
xmomentum = fid.variables['xmomentum'][:]
ymomentum = fid.variables['ymomentum'][:]
elevation = fid.variables['elevation'][:]
# Set original data used to write mux file to be zero when gauges are
#not recdoring
ha[0][0]=0.0
ha[0][time_step_count-1]=0.0;
ha[2][0]=0.0;
ua[0][0]=0.0
ua[0][time_step_count-1]=0.0;
ua[2][0]=0.0;
va[0][0]=0.0
va[0][time_step_count-1]=0.0;
va[2][0]=0.0;
assert num.allclose(num.transpose(ha),stage) #Meters
#Check the momentums - ua
#momentum = velocity*(stage-elevation)
# elevation = - depth
#momentum = velocity_ua *(stage+depth)
depth=num.zeros((len(lat_long_points),time_step_count),num.float)
for i in range(len(lat_long_points)):
depth[i]=gauge_depth[i]+tide+ha[i]
assert num.allclose(num.transpose(ua*depth),xmomentum)
#Check the momentums - va
#momentum = velocity*(stage-elevation)
# elevation = - depth
#momentum = velocity_va *(stage+depth)
assert num.allclose(num.transpose(va*depth),ymomentum)
# check the elevation values.
# -ve since urs measures depth, sww meshers height,
assert num.allclose(-elevation, gauge_depth) #Meters
fid.close()
self.delete_mux(files)
os.remove(sts_file)
def test_urs2sts_nonstandard_meridian(self):
"""
Test single source using the meridian from zone 50 as a nonstandard meridian
"""
tide=0
time_step_count = 3
time_step = 2
lat_long_points =[(-21.,114.5),(-21.,113.5),(-21.,114.), (-21.,115.)]
n=len(lat_long_points)
first_tstep=num.ones(n,num.int)
first_tstep[0]+=1
first_tstep[2]+=1
last_tstep=(time_step_count)*
|
num.ones(n,num.int)
|
numpy.ones
|
"""
Test module to make sure that simple representations of
hyperbolic errors are equivalent to more generalized
expressions.
"""
import numpy as N
from scipy.stats import chi2
from ..display.plot.cov_types.regressions import hyperbola
from ..orientation.test_pca import random_pca
from ..orientation.pca import augment as augment_matrix
from .conics import Conic, conic
from ..display.hyperbola import hyperbolic_errors
from ..error.axes import sampling_axes
from .util import vector, plane, dot
def simple_hyperbola(cov, xvals, n=1, level=1):
"""
Simple hyperbolic error bounds for 2d errors
using quadratic formulation.
Returns tuple of
( distance from center of distribution,
width of error bar)
in unrotated coordinate space
"""
assert len(cov) == 2
a = cov[0]
# Plot hyperbola
b = N.sqrt(cov[-1])
def y(x):
return level*b*N.sqrt(x**2/(a*n)+1/n)
# Top values of error bar only
t = N.array([xvals,y(xvals)])
return t
# Create a basic fit to test against
# (we could probably speed this up)
fit = random_pca()
sv = fit.singular_values
n = len(fit.arr)
covariance = sv**2/(n-1)
xvals = N.linspace(-100,100,100)
level = N.sqrt(chi2.ppf(0.95,n-3))
def test_sampling_covariance():
"""
Test the creation of hyperbolic errors
along direction of maximum angular variability
"""
# use only direction of maximum angular
# variation
cov = covariance[1:]
res1 = simple_hyperbola(cov,xvals, n, level)
res2 = hyperbola(
cov,
N.identity(2), # rotation
N.array([0,0]), # mean
xvals,
n=n,
level=level)
# In axis-aligned frame, magnitude of top and bottom
# of error bars should be the same
assert N.allclose(
N.abs(res2[1]),
res2[2])
# Get only top values (bottom will be the same
# implicitly)
res2 = (res2[0],res2[-1])
for a,b in zip(res1,res2):
assert N.allclose(a,b)
def test_hyperbolic_simple():
"""
Convert to hyperbolic axes before projection into
plane of maximum angular variability
"""
# Integrate error level at first
hyp_axes = N.copy(covariance)
hyp_axes[-1]*=level**2/n
hyp_axes = hyp_axes[1:]
cov = covariance[1:]
res1 = simple_hyperbola(cov,xvals, n, level)
res2 = simple_hyperbola(hyp_axes,xvals)
for a,b in zip(res1,res2):
assert N.allclose(a,b)
def test_hyperbolic_projection():
"""
Fully projective mechanism to get hyperbolic error
bounds in a generalized way along any axes associated with
the plane.
"""
# Convert covariance into hyperbolic axes
# using assumptions of normal vectorization
hyp_axes = N.copy(covariance)
hyp_axes[-1]*=level**2/n
d = 1/hyp_axes
#d[-1] *= -1
ndim = len(d)
arr =
|
N.identity(ndim+1)
|
numpy.identity
|
#-*- coding: utf-8 -*-
'''utility functions for causality analysis'''
import math
import numpy as np
def _tsdata_to_var(X, p):
"""
Calculate coefficients and recovariance and noise covariance of
the optimized model order.
ref: http://users.sussex.ac.uk/~lionelb/MVGC/html/tsdata_to_var.html
Parameters
----------
X: narray, shape (n_sources, n_times, n_epochs)
The data to estimate the model order for.
p: int, the optimized model order.
Returns
----------
A: array, coefficients of the specified model
SIG:array, recovariance of this model
E: array, noise covariance of this model
"""
assert p >= 1, "The model order must be greater or equal to 1."
n, m, N = X.shape
p1 = p + 1
q1n = p1 * n
I = np.eye(n)
XX =
|
np.zeros((n, p1, m + p, N))
|
numpy.zeros
|
""" Plot full light curves, one panel per band
Advice on aesthetic from <NAME>
this is Fig 3 in the paper """
import matplotlib.pyplot as plt
plt.rc("font", family="serif")
plt.rc("text", usetex=True)
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import numpy as np
from astropy.table import Table
from astropy.cosmology import Planck15
import glob
import extinction
from uv_lc import get_uv_lc
zp = 2458370.6473
def plot_inset():
# zoomed-in window showing the earliest non-detection and detection
axins = inset_axes(
ax, 2, 1, loc=1,
bbox_to_anchor=(0.87,0.98),
bbox_transform=ax.transAxes)
choose = np.logical_and(det, band)
axins.errorbar(
dt[choose]*24, mag[choose], emag[choose], fmt='s', ms=6,
mec=rcol, mfc=rcol, c=rcol, label='r', zorder=9)
choose = np.logical_and(nondet, band)
axins.arrow(
2458370.6408-zp, 19.97, 0, 0.5, length_includes_head=True,
head_width=0.2, head_length=0.3, fc='k', ec='k')
band = filt=='g'
choose = np.logical_and(np.logical_and(det, band), dt*24 < 3)
axins.errorbar(
dt[choose]*24, mag[choose], emag[choose],
fmt='o', ms=5, mec='#57106e', mfc='white', c='#57106e', label='g')
# fit a line to this early g-band data
out = np.polyfit(dt[choose]*24, mag[choose], deg=1, w=1/emag[choose])
m,b = out
dt_plt = np.linspace(-1,3)
y_plt = m*dt_plt + b
axins.plot(dt_plt, y_plt, ls='--', c='k', lw=0.5)
axins.text(0.5, 0.5, "31.2 mag/day", fontsize=12, transform=axins.transAxes,
verticalalignment='top')
axins.set_xlim(-0.1,3)
axins.set_ylim(18,21)
axins.tick_params(axis='both', labelsize=12)
axins.set_xlabel(r"Hours since $t_0$", fontsize=12)
axins.invert_yaxis()
ax.plot([-1, -1], [21, 18], c='k', lw=0.5)
ax.plot([1, 1], [21, 18], c='k', lw=0.5)
ax.plot([-1, 1], [18, 18], c='k', lw=0.5)
ax.plot([-1, 1], [21, 21], c='k', lw=0.5)
#mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
def get_lc():
# get optical light curves
DATA_DIR = "/Users/annaho/Dropbox/Projects/Research/ZTF18abukavn/data/phot"
f = DATA_DIR + "/ZTF18abukavn_opt_phot.dat"
dat = np.loadtxt(f, dtype=str, delimiter=' ')
instr = dat[:,0]
jd = dat[:,1].astype(float)
filt = dat[:,2]
mag = dat[:,3].astype(float)
emag = dat[:,4].astype(float)
dt = jd-zp
# add the UV light curves
add_dt, add_filt, fnu_mjy, efnu_mjy = get_uv_lc()
# convert to AB mag
add_mag = -2.5 * np.log10(fnu_mjy*1E-3) + 8.90
add_emag = (efnu_mjy/fnu_mjy) # I think it's just the ratio
choose = add_emag < 50
dt = np.append(dt, add_dt[choose])
filt = np.append(filt, add_filt[choose])
mag =
|
np.append(mag, add_mag[choose])
|
numpy.append
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 01 11:05:31 2018
@author: willie
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
import time
import _pickle as pickle
from random import sample
import sklearn.metrics as mt
class FlagError(Exception):
def __init__(self, message):
self.message = message
def split(row_indcs, folds):
k, m = divmod(len(row_indcs), folds)
return (row_indcs[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(folds))
def multi_calc_model_stats(predictions, answers):
'''args: predictions: the list of predictions shape(76,2) for each lesion
answers: the list of answers for each lesion
returns: acc: list of accuracy for the three states
sens: list of sensitivities for the three states
spec: list of specificities for the three states
tots: list of overall acc in [0] and f1 score in [1]
'''
#Hyp == 1 Ser ==2 Aden ==3
#1 is true positive, true negative, false positive, false negative
confu_matrix = mt.confusion_matrix(answers,predictions, labels = np.array([1,2,3]))
total = np.sum(confu_matrix)
print('Confusion Matrix: ')
print(confu_matrix)
total = np.sum(confu_matrix)
tots = [(confu_matrix[0,0]+confu_matrix[1,1]+confu_matrix[2,2])/total,
mt.fbeta_score(answers,predictions,labels=[0,1,2],beta=2,average='micro')]
Hyp = [0,0,0,0]
Ser = [0,0,0,0]
Aden = [0,0,0,0]
Stats = [Hyp,Ser,Aden]
classes = [0,1,2]
for predic, ans in zip(predictions, answers):
p,a = (int(predic),int(ans))
if p == a:
Stats[p-1][0] += 1
for cl in classes:
if cl is not p:
Stats[cl-1][1] += 1
else:
Stats[p-1][2] += 1
Stats[a-1][3] += 1
acc = [(c[0]+c[1])/sum(c) for c in Stats]
sens = [c[0]/(c[0]+c[3]) for c in Stats]
spec = [c[1]/(c[1]+c[2]) for c in Stats]
return acc,sens,spec,tots
def binary_calc_model_stats(predictions, answers):
'''args: predictions: the list of predictions shape(76,2) for each lesion
answers: the list of answers for each lesion
returns: acc: accuracy for the two states
sens: sensitivity for the two states
spec: specificity for the two states
f1: f1 score of the classifier
'''
#makes a confusion matrix
#[0-actual 0 , 1-actual 0]
#[0-actual 1 , 1 - actual 1]
confu_matrix = mt.confusion_matrix(answers,predictions, labels = np.array([0,1]))
total =
|
np.sum(confu_matrix)
|
numpy.sum
|
"""
utility.py
pytracer package
Defines global constants and
utility functions.
Created by Jiayao on Aug 13, 2017
"""
from __future__ import absolute_import
import numpy as np
from pytracer import (FLOAT, INT, UINT, EPS)
__all__ = ['progress_reporter','logging','feq', 'eq_unity', 'ne_unity',
'ftoi', 'ctoi', 'rtoi', 'lerp', 'round_pow_2', 'next_pow_2', 'is_pow_2', 'ufunc_lerp', 'clip', 'is_black']
# Global Functions
def logging(tp: str, msg: str):
print("[{}] {}".format(tp.upper(), msg))
def progress_reporter(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
:params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
# print()
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filled_length = int(length * iteration // total)
bar = fill * filled_length + '-' * (length - filled_length)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='\r')
# Print New Line on Complete
if iteration == total:
percent = ("{0:." + str(decimals) + "f}").format(100)
bar = fill * length
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix))
def feq(x: (float, FLOAT), y: (float, FLOAT)) -> bool:
"""Equality test for floats."""
return np.isclose(x, y, atol=EPS)
def eq_unity(x: (float, FLOAT)) -> bool:
"""Equality test with unity."""
return (x > 1. - EPS) and (x < 1. + EPS)
def ne_unity(x: (float, FLOAT)) -> bool:
"""Inequality test with unity."""
return x < 1. - EPS or x > 1. + EPS
def is_zero(x: (float, FLOAT)) -> bool:
"""Equality test with zero."""
return x > -EPS and x < EPS
def not_zero(x: (float, FLOAT)) -> bool:
"""Inequality test with zero"""
return x < -EPS or x > EPS
def ftoi(x: (float, FLOAT)) -> INT:
"""Floor to integer"""
return INT(np.floor(x))
def ctoi(x: (float, FLOAT)) -> INT:
"""Ceiling to integer"""
return INT(np.ceil(x))
def rtoi(x: (float, FLOAT)) -> INT:
"""Round to integer"""
return INT(np.round(x))
def lerp(t: (float, FLOAT), v1: (float, FLOAT), v2: (float, FLOAT)) -> (float, FLOAT):
"""Linear interpolation between `v1` and `v2`"""
return (1. - t) * v1 + t * v2
def round_pow_2(x: INT) -> INT:
"""Round to nearest power of 2"""
return INT(2 ** np.round(np.log2(x)))
def next_pow_2(x: INT) -> INT:
"""Round to next(or current) power of 2"""
return INT(2 ** np.ceil(np.log2(x)))
def is_pow_2(x: INT) -> bool:
"""Test whether is power of 2"""
return x & (x-1) == 0
# return True if x == 0 else (np.log2(x) % 1) == 0.
def clip(x, min=0., max=np.inf):
from pytracer import Spectrum
if isinstance(x, (float, FLOAT, int, INT, UINT)):
return
|
np.clip(x, min, max)
|
numpy.clip
|
# Copyright (c) 2019-2021, <NAME>, <NAME>, <NAME>, and <NAME>.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
import collections.abc
import numpy
import vector.backends.object_
from vector.methods import (
Azimuthal,
AzimuthalRhoPhi,
AzimuthalXY,
Coordinates,
Longitudinal,
LongitudinalEta,
LongitudinalTheta,
LongitudinalZ,
Lorentz,
LorentzMomentum,
Planar,
PlanarMomentum,
Spatial,
SpatialMomentum,
Temporal,
TemporalT,
TemporalTau,
Vector,
Vector2D,
Vector3D,
Vector4D,
_aztype,
_coordinate_class_to_names,
_coordinate_order,
_handler,
_ltype,
_repr_generic_to_momentum,
_repr_momentum_to_generic,
_ttype,
)
def _array_from_columns(columns):
if len(columns) == 0:
raise ValueError("no columns have been provided")
names = list(columns.keys())
names.sort(
key=lambda x: _coordinate_order.index(x)
if x in _coordinate_order
else float("inf")
)
dtype = []
shape = None
for x in names:
if hasattr(columns[x], "dtype"):
thisdtype = (x, columns[x].dtype)
else:
thisdtype = (x, numpy.float64)
if hasattr(columns[x], "shape"):
thisshape = columns[x].shape
elif isinstance(columns[x], collections.abc.Sized):
thisshape = (len(columns[x]),)
else:
raise TypeError(f"column {repr(x)} has no length")
dtype.append(thisdtype)
if shape is None:
shape = thisshape
elif shape != thisshape:
raise ValueError(f"column {repr(x)} has a different shape than the others")
array = numpy.empty(shape, dtype)
for x in names:
array[x] = columns[x]
return array
def _setitem(array, where, what, is_momentum):
if isinstance(where, str):
if is_momentum:
where = _repr_momentum_to_generic.get(where, where)
array.view(numpy.ndarray)[where] = what
else:
if hasattr(what, "dtype") and what.dtype.names is not None:
tofill = array[where]
for name in what.dtype.names:
if is_momentum:
generic = _repr_momentum_to_generic.get(name, name)
tofill[generic] = what[name]
else:
raise TypeError(
"right-hand side of assignment must be a structured array with "
"the same fields as " + type(array).__name__
)
def _getitem(array, where, is_momentum):
if isinstance(where, str):
if is_momentum:
where = _repr_momentum_to_generic.get(where, where)
return array.view(numpy.ndarray)[where]
else:
out = numpy.ndarray.__getitem__(array, where)
if isinstance(out, numpy.void):
azimuthal, longitudinal, temporal = None, None, None
if hasattr(array, "_azimuthal_type"):
azimuthal = array._azimuthal_type.ObjectClass(
*[out[x] for x in _coordinate_class_to_names[_aztype(array)]]
)
if hasattr(array, "_longitudinal_type"):
longitudinal = array._longitudinal_type.ObjectClass(
*[out[x] for x in _coordinate_class_to_names[_ltype(array)]]
)
if hasattr(array, "_temporal_type"):
temporal = array._temporal_type.ObjectClass(
*[out[x] for x in _coordinate_class_to_names[_ttype(array)]]
)
if temporal is not None:
return array.ObjectClass(azimuthal, longitudinal, temporal)
elif longitudinal is not None:
return array.ObjectClass(azimuthal, longitudinal)
elif azimuthal is not None:
return array.ObjectClass(azimuthal)
else:
return array.ObjectClass(*out.view(numpy.ndarray))
else:
return out
def _array_repr(array, is_momentum):
name = type(array).__name__
array = array.view(numpy.ndarray)
if is_momentum:
array = array.view(
[
(_repr_generic_to_momentum.get(x, x), array.dtype[x])
for x in array.dtype.names
]
)
return name + repr(array)[5:].replace("\n ", "\n" + " " * len(name))
def _has(array, names):
dtype_names = array.dtype.names
if dtype_names is None:
dtype_names = ()
return all(x in dtype_names for x in names)
def _toarrays(result):
istuple = True
if not isinstance(result, tuple):
istuple = False
result = (result,)
result = tuple(
x if isinstance(x, numpy.ndarray) else numpy.array([x], numpy.float64)
for x in result
)
if istuple:
return result
else:
return result[0]
def _shape_of(result):
if not isinstance(result, tuple):
result = (result,)
shape = None
for x in result:
if hasattr(x, "shape"):
thisshape = list(x.shape)
elif isinstance(x, collections.abc.Sized):
thisshape = [len(x)]
if shape is None or thisshape[0] > shape[0]:
shape = thisshape
return tuple(shape)
class CoordinatesNumpy:
lib = numpy
class AzimuthalNumpy(CoordinatesNumpy):
pass
class LongitudinalNumpy(CoordinatesNumpy):
pass
class TemporalNumpy(CoordinatesNumpy):
pass
class VectorNumpy:
def allclose(self, other, rtol=1e-05, atol=1e-08, equal_nan=False):
return self.isclose(other, rtol=rtol, atol=atol, equal_nan=equal_nan).all()
def __eq__(self, other):
return numpy.equal(self, other)
def __ne__(self, other):
return numpy.not_equal(self, other)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if not isinstance(_handler(inputs), VectorNumpy):
# Let the array-of-vectors object handle it.
return NotImplemented
if isinstance(self, Vector2D):
from vector.compute.planar import add, dot, equal, not_equal
from vector.compute.planar import rho as absolute
from vector.compute.planar import rho2 as absolute2
from vector.compute.planar import scale, subtract
elif isinstance(self, Vector3D):
from vector.compute.spatial import add, dot, equal
from vector.compute.spatial import mag as absolute
from vector.compute.spatial import mag2 as absolute2
from vector.compute.spatial import not_equal, scale, subtract
elif isinstance(self, Vector4D):
from vector.compute.lorentz import (
add,
dot,
equal,
not_equal,
scale,
subtract,
)
from vector.compute.lorentz import tau as absolute
from vector.compute.lorentz import tau2 as absolute2
outputs = kwargs.get("out", ())
if any(not isinstance(x, VectorNumpy) for x in outputs):
raise TypeError(
"ufunc operating on VectorNumpys can only fill another VectorNumpy "
"with 'out' keyword"
)
if ufunc is numpy.absolute and len(inputs) == 1:
result = absolute.dispatch(inputs[0])
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
elif ufunc is numpy.add and len(inputs) == 2:
result = add.dispatch(inputs[0], inputs[1])
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
elif ufunc is numpy.subtract and len(inputs) == 2:
result = subtract.dispatch(inputs[0], inputs[1])
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
elif (
ufunc is numpy.multiply
and not isinstance(inputs[0], (Vector, Coordinates))
and len(inputs) == 2
):
result = scale.dispatch(inputs[0], inputs[1])
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
elif (
ufunc is numpy.multiply
and not isinstance(inputs[1], (Vector, Coordinates))
and len(inputs) == 2
):
result = scale.dispatch(inputs[1], inputs[0])
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
elif ufunc is numpy.negative and len(inputs) == 1:
result = scale.dispatch(-1, inputs[0])
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
elif ufunc is numpy.positive and len(inputs) == 1:
result = inputs[0]
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
elif (
ufunc is numpy.true_divide
and not isinstance(inputs[1], (Vector, Coordinates))
and len(inputs) == 2
):
result = scale.dispatch(1 / inputs[1], inputs[0])
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
elif (
ufunc is numpy.power
and not isinstance(inputs[1], (Vector, Coordinates))
and len(inputs) == 2
):
result = absolute.dispatch(inputs[0]) ** inputs[1]
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
elif ufunc is numpy.square and len(inputs) == 1:
result = absolute2.dispatch(inputs[0])
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
elif ufunc is numpy.sqrt and len(inputs) == 1:
result = numpy.sqrt(absolute.dispatch(inputs[0]))
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
elif ufunc is numpy.cbrt and len(inputs) == 1:
result = numpy.cbrt(absolute.dispatch(inputs[0]))
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
elif ufunc is numpy.matmul and len(inputs) == 2:
result = dot.dispatch(inputs[0], inputs[1])
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
elif ufunc is numpy.equal and len(inputs) == 2:
result = equal.dispatch(inputs[0], inputs[1])
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
elif ufunc is numpy.not_equal and len(inputs) == 2:
result = not_equal.dispatch(inputs[0], inputs[1])
for output in outputs:
for name in output.dtype.names:
output[name] = result[name]
return result
else:
return NotImplemented
def __array_function__(self, func, types, args, kwargs):
if func is numpy.isclose:
return type(self).isclose(*args, **kwargs)
elif func is numpy.allclose:
return type(self).allclose(*args, **kwargs)
else:
return NotImplemented
class AzimuthalNumpyXY(AzimuthalNumpy, AzimuthalXY, numpy.ndarray):
ObjectClass = vector.backends.object_.AzimuthalObjectXY
def __new__(cls, *args, **kwargs):
return numpy.array(*args, **kwargs).view(cls)
def __array_finalize__(self, obj):
if not _has(self, ("x", "y")):
raise TypeError(
f"{type(self).__name__} must have a structured dtype containing "
'fields ("x", "y")'
)
@property
def elements(self):
return (self["x"], self["y"])
@property
def x(self):
return self["x"]
@property
def y(self):
return self["y"]
def __getitem__(self, where):
return _getitem(self, where, False)
class AzimuthalNumpyRhoPhi(AzimuthalNumpy, AzimuthalRhoPhi, numpy.ndarray):
ObjectClass = vector.backends.object_.AzimuthalObjectRhoPhi
def __new__(cls, *args, **kwargs):
return numpy.array(*args, **kwargs).view(cls)
def __array_finalize__(self, obj):
if not _has(self, ("rho", "phi")):
raise TypeError(
f"{type(self).__name__} must have a structured dtype containing "
'fields ("rho", "phi")'
)
@property
def elements(self):
return (self["rho"], self["phi"])
@property
def rho(self):
return self["rho"]
@property
def phi(self):
return self["phi"]
def __getitem__(self, where):
return _getitem(self, where, False)
class LongitudinalNumpyZ(LongitudinalNumpy, LongitudinalZ, numpy.ndarray):
ObjectClass = vector.backends.object_.LongitudinalObjectZ
def __new__(cls, *args, **kwargs):
return numpy.array(*args, **kwargs).view(cls)
def __array_finalize__(self, obj):
if not _has(self, ("z",)):
raise TypeError(
f"{type(self).__name__} must have a structured dtype containing "
'field "z"'
)
@property
def elements(self):
return (self["z"],)
@property
def z(self):
return self["z"]
def __getitem__(self, where):
return _getitem(self, where, False)
class LongitudinalNumpyTheta(LongitudinalNumpy, LongitudinalTheta, numpy.ndarray):
ObjectClass = vector.backends.object_.LongitudinalObjectTheta
def __new__(cls, *args, **kwargs):
return numpy.array(*args, **kwargs).view(cls)
def __array_finalize__(self, obj):
if not _has(self, ("theta",)):
raise TypeError(
f"{type(self).__name__} must have a structured dtype containing "
'field "theta"'
)
@property
def elements(self):
return (self["theta"],)
@property
def theta(self):
return self["theta"]
def __getitem__(self, where):
return _getitem(self, where, False)
class LongitudinalNumpyEta(LongitudinalNumpy, LongitudinalEta, numpy.ndarray):
ObjectClass = vector.backends.object_.LongitudinalObjectEta
def __new__(cls, *args, **kwargs):
return numpy.array(*args, **kwargs).view(cls)
def __array_finalize__(self, obj):
if not _has(self, ("eta",)):
raise TypeError(
f"{type(self).__name__} must have a structured dtype containing "
'field "eta"'
)
@property
def elements(self):
return (self["eta"],)
@property
def eta(self):
return self["eta"]
def __getitem__(self, where):
return _getitem(self, where, False)
class TemporalNumpyT(TemporalNumpy, TemporalT, numpy.ndarray):
ObjectClass = vector.backends.object_.TemporalObjectT
def __new__(cls, *args, **kwargs):
return
|
numpy.array(*args, **kwargs)
|
numpy.array
|
from abc import ABC, abstractmethod
import numpy as np
from scipy.special import gammaln
import numbers
from .constants import MIN_POSTERIOR_VARIANCE
__all__ = ['Distribution',
'Gamma',
'Uniform',
'Normal',
'LogNormal',
'Horseshoe']
class Distribution(ABC):
@abstractmethod
def negative_log_density(self, F, x):
"""
Negative log density, computed in MXNet. lower and upper limits are
ignored. If x is not a scalar, the distribution is i.i.d. over all
entries.
"""
pass
class Gamma(Distribution):
"""
Gamma(mean, alpha):
p(x) = C(alpha, beta) x^{alpha - 1} exp( -beta x), beta = alpha / mean,
C(alpha, beta) = beta^alpha / Gamma(alpha)
"""
def __init__(self, mean, alpha):
self._assert_positive_number(mean, 'mean')
self._assert_positive_number(alpha, 'alpha')
self.mean = np.maximum(mean, MIN_POSTERIOR_VARIANCE)
self.alpha = np.maximum(alpha, MIN_POSTERIOR_VARIANCE)
self.beta = self.alpha / self.mean
self.log_const = gammaln(self.alpha) - self.alpha *
|
np.log(self.beta)
|
numpy.log
|
from constants import WHOLEBODY_KEYPOINTS, WHOLEBODY_SKELETON, WHOLEBODY_SCORE_WEIGHTS, \
WHOLEBODY_STANDING_POSE
from constants import SKELETON_CONNECT, CAR_KEYPOINTS, CAR_SKELETON, CAR_SCORE_WEIGHTS, \
CAR_POSE, HFLIP_ids
import networkx as nx
import numpy as np
import json
import pandas as pd
import os
def get_normalized_weights(centrality_measure, kps):
list_centralities = []
for i in range(len(kps)):
list_centralities.append(centrality_measure[i])
w = np.array(list_centralities)
w = w/np.sum(w) * len(kps)
return w
def inverse_normalize(weights, kps):
w = 1/weights
w = w/np.sum(w) * len(kps)
return w.tolist()
def harmonic_centrality_local_radius(G_w, radius, kps, distance="euclidean_dist"):
weights = []
for node in G_w.nodes:
# print(node)
subgraph = nx.generators.ego.ego_graph(G_w, n=node, radius=radius)
centr = nx.harmonic_centrality(subgraph, distance="euclidean_dist")
weights.append(centr[node]/(len(subgraph.nodes())-1))
w = np.array(weights)
w = w/np.sum(w) * len(kps)
return w
def draw_ann(ann, *, keypoint_painter, filename=None, margin=0.5, aspect=None, **kwargs):
from openpifpaf import show # pylint: disable=import-outside-toplevel
bbox = ann.bbox()
xlim = bbox[0] - margin, bbox[0] + bbox[2] + margin
ylim = bbox[1] - margin, bbox[1] + bbox[3] + margin
if aspect == 'equal':
fig_w = 5.0
else:
fig_w = 5.0 / (ylim[1] - ylim[0]) * (xlim[1] - xlim[0]) + 0.5
with show.canvas(filename, figsize=(fig_w, 5), nomargin=True, **kwargs) as ax:
ax.set_axis_off()
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
if aspect is not None:
ax.set_aspect(aspect)
keypoint_painter.annotation(ax, ann)
def draw_skeletons_wb(pose, weights, prefix=""):
from openpifpaf.annotation import Annotation # pylint: disable=import-outside-toplevel
from painters import KeypointPainter
scale = 1.0/25
KeypointPainter.show_joint_scales = True
keypoint_painter = KeypointPainter(line_width=1,
monocolor_connections=False)
ann = Annotation(keypoints=WHOLEBODY_KEYPOINTS,
skeleton=WHOLEBODY_SKELETON,
score_weights=WHOLEBODY_SCORE_WEIGHTS)
ann.set(pose, np.array(weights) * scale)
draw_ann(ann, filename='./'+prefix+'_skeleton_wholebody.png',
keypoint_painter=keypoint_painter)
def draw_skeletons_apollo(pose, weights, prefix=""):
from openpifpaf.annotation import Annotation # pylint: disable=import-outside-toplevel
from painters import KeypointPainter
scale = 1.0/25
KeypointPainter.show_joint_scales = True
keypoint_painter = KeypointPainter(line_width=1,
monocolor_connections=False)
ann = Annotation(keypoints=CAR_KEYPOINTS,
skeleton=CAR_SKELETON,
score_weights=CAR_SCORE_WEIGHTS)
ann.set(pose, np.array(weights) * scale)
draw_ann(ann, filename='./'+prefix+'_skeleton_apollocar.png',
keypoint_painter=keypoint_painter)
def rotate(pose, angle=45, axis=2):
sin = np.sin(np.radians(angle))
cos = np.cos(np.radians(angle))
pose_copy = np.copy(pose)
pose_copy[:, 2] = pose_copy[:, 2]
if axis == 0:
rot_mat = np.array([[1, 0, 0],
[0, cos, -sin],
[0, sin, cos]])
elif axis == 1:
rot_mat = np.array([[cos, 0, sin],
[0, 1, 0],
[-sin, 0, cos]])
elif axis == 2:
rot_mat = np.array([[cos, -sin, 0],
[sin, cos, 0],
[0, 0, 1]])
else:
raise Exception("Axis must be 0,1 or 2 (corresponding to x,y,z).")
rotated_pose = np.transpose(np.matmul(rot_mat, np.transpose(pose_copy)))
rotated_pose[:, 2] = rotated_pose[:, 2] + 4
return rotated_pose
def create_weights_wholebody():
name = "wb"
skel = [(bone[0]-1, bone[1]-1) for bone in WHOLEBODY_SKELETON]
kps = WHOLEBODY_KEYPOINTS
G = nx.Graph()
G.add_nodes_from(range(len(kps)))
G.add_edges_from(skel)
with open("Edge_weights_train_wb.json", 'r') as f:
edge_weights = json.load(f)
G_w = nx.Graph()
G_w.add_nodes_from(range(len(kps)))
for bone_id, bone in enumerate(skel):
G_w.add_edge(bone[0], bone[1], euclidean_dist=edge_weights[bone_id])
G_w.add_edge(bone[0], bone[1], euclidean_dist_inverse=1/edge_weights[bone_id])
G_synthetic = nx.Graph()
G_synthetic.add_nodes_from(range(len(kps)))
for bone_id, bone in enumerate(skel):
dist_bone = np.linalg.norm(WHOLEBODY_STANDING_POSE[bone[0], :2] -
WHOLEBODY_STANDING_POSE[bone[1], :2])
G_synthetic.add_edge(bone[0], bone[1], euclidean_dist=dist_bone)
w_cl = get_normalized_weights(nx.closeness_centrality(G), kps=kps)
w_cl_euclid = get_normalized_weights(nx.closeness_centrality(G_w, distance="euclidean_dist"),
kps=kps)
w_harm_cl = get_normalized_weights(nx.harmonic_centrality(G), kps=kps)
w_harm_cl_euclid = get_normalized_weights(
nx.harmonic_centrality(G_w, distance="euclidean_dist"), kps=kps)
w_harm_euclid_radius_1 = get_normalized_weights(
harmonic_centrality_local_radius(G_w, radius=1, kps=kps,
distance="euclidean_dist"), kps=kps)
w_harm_euclid_radius_2 = get_normalized_weights(
harmonic_centrality_local_radius(G_w, radius=2, kps=kps,
distance="euclidean_dist"), kps=kps)
w_harm_euclid_radius_3 = get_normalized_weights(
harmonic_centrality_local_radius(G_w, radius=3, kps=kps,
distance="euclidean_dist"), kps=kps)
w_harm_euclid_radius_3_synthetic = get_normalized_weights(
harmonic_centrality_local_radius(G_synthetic, radius=3, kps=kps,
distance="euclidean_dist"),
kps=kps)
w_harm_cl_euclid_synthetic = get_normalized_weights(
nx.harmonic_centrality(G_synthetic, distance="euclidean_dist"), kps=kps)
hand_crafted = np.array(23*[3.0] + 110*[1.0])
hand_crafted = hand_crafted/np.sum(hand_crafted) * len(hand_crafted) # normalize
results = {"keypoints": kps,
"centrality_closeness_inverse": inverse_normalize(w_cl, kps=kps),
"centrality_closeness_euclid_inverse": inverse_normalize(w_cl_euclid, kps=kps),
"centrality_harmonic_inverse": inverse_normalize(w_harm_cl, kps=kps),
"centrality_harmonic_euclid_inverse": inverse_normalize(w_harm_cl_euclid, kps=kps),
"w_harm_cl_euclid_synthetic": inverse_normalize(w_harm_cl_euclid_synthetic,
kps=kps),
"w_harm_euclid_radius_1": inverse_normalize(w_harm_euclid_radius_1, kps=kps),
"w_harm_euclid_radius_2": inverse_normalize(w_harm_euclid_radius_2, kps=kps),
"w_harm_euclid_radius_3": inverse_normalize(w_harm_euclid_radius_3, kps=kps),
"w_harm_euclid_radius_3_synthetic": inverse_normalize(
w_harm_euclid_radius_3_synthetic, kps=kps),
"hand_crafted": list(hand_crafted),
}
if not os.path.isdir("docs_wb"):
os.makedirs("docs_wb")
WHOLEBODY_STANDING_POSE[:, 2] = 1.0
draw_skeletons_wb(WHOLEBODY_STANDING_POSE, inverse_normalize(w_harm_cl_euclid, kps=kps),
prefix="docs_wb/centrality_harmonic_euclid_global_inverse")
draw_skeletons_wb(WHOLEBODY_STANDING_POSE, inverse_normalize(w_harm_euclid_radius_3, kps=kps),
prefix="docs_wb/w_harm_euclid_radius_3")
draw_skeletons_wb(WHOLEBODY_STANDING_POSE, inverse_normalize(w_harm_euclid_radius_1, kps=kps),
prefix="docs_wb/w_harm_euclid_radius_1")
draw_skeletons_wb(WHOLEBODY_STANDING_POSE, inverse_normalize(w_harm_euclid_radius_2, kps=kps),
prefix="docs_wb/w_harm_euclid_radius_2")
draw_skeletons_wb(WHOLEBODY_STANDING_POSE, inverse_normalize(w_harm_cl_euclid_synthetic,
kps=kps),
prefix="docs_wb/w_harm_cl_euclid_synthetic")
draw_skeletons_wb(WHOLEBODY_STANDING_POSE, inverse_normalize(w_harm_euclid_radius_3_synthetic,
kps=kps),
prefix="docs_wb/w_harm_euclid_radius_3_synthetic")
with open("Weights_"+name+".json", 'w') as f:
json.dump(results, f)
df = pd.read_json("Weights_"+name+".json")
df.to_csv("Weights_"+name+".csv", index=None, header=True)
print("Compututed weights written to: Weights_"+name+".csv")
def create_weights_apollo():
name = "apollocar"
skel = [(bone[0]-1, bone[1]-1) for bone in CAR_SKELETON]
kps = CAR_KEYPOINTS
G = nx.Graph()
G.add_nodes_from(range(len(kps)))
G.add_edges_from(skel)
with open("Edge_weights_train_apollocar.json", 'r') as f:
edge_weights = json.load(f)
G_w = nx.Graph()
G_w.add_nodes_from(range(len(kps)))
for bone_id, bone in enumerate(skel):
G_w.add_edge(bone[0], bone[1], euclidean_dist=edge_weights[bone_id])
G_w.add_edge(bone[0], bone[1], euclidean_dist_inverse=1/edge_weights[bone_id])
G_synthetic = nx.Graph()
G_synthetic.add_nodes_from(range(len(kps)))
for bone_id, bone in enumerate(skel):
dist_bone =
|
np.linalg.norm(CAR_POSE[bone[0]]-CAR_POSE[bone[1]])
|
numpy.linalg.norm
|
# -*- coding: utf-8 -*-
import numpy as np
import os
from math import ceil
from scipy.signal import cont2discrete
import json
class Acutator(object):
def __init__(self, linear):
self.linear = linear
self.controller = None
def initiate_model(self):
raise Exception('The _initiate_model() function needs to be overwritten in '+
'actator subclasses')
def initiate_controller(self, controlmodule):
try:
exec('from controllers.%s import Controller' % controlmodule)
self.controller = Controller()
except:
raise Exception(('Could not load the controller in '+
'initiate_controller in %s. Please check that the '+
'specified controller file "%s" exists in the '+
'/controllers directory, and that it is '+
'syntactically correct.') % (str(self), controlmodule))
def _reference_generator(self):
raise Exception('The _reference_generator() function needs to be overwritten in '+
'actator subclasses')
def _step_model(self):
raise Exception('The _step_model() function needs to be overwritten in '+
'actator subclasses')
def _step_controller(self, ref):
raise Exception('The _step_controller() function needs to be '+
'overwritten in actator subclasses')
def __str__(self):
return 'Acutator object'
def simulate(self, tf, simtype):
# Computes reference signal
N = int(ceil(float(tf)/self.h))
ref = self._reference_generator(tf, simtype)
# Sets up emty data arrays
u = np.zeros((self.nControl, N))
x = np.zeros((self.nStates, N))
y = np.zeros((self.nMeasurements, N))
# Simulates the system
for ii in range(0, N-1):
# Computes the control signal using the generated controller
if not self.controller:
u[:, ii] = self._step_controller(ref[:, ii]) # Simple response test
else:
u[:, ii] = self.controller(ref[0, ii], x[0, ii])
# Computes the system reponse to the control signal
x[:, ii + 1], y[:, ii] = self._step_model(x[:, ii], u[0:1, ii])
return ref, u, x, y
class DC_Motor(Acutator):
def __init__(self, configfile=None, controlmodule=None):
"""
Initializes the Acutator constructor, sets the configuration and
controller files and initializes the model with the specified
configuration.
ARGS:
configfile (str) - The filename of the configuration file which is
to be used, set to None by default (then uses the default
settings)
controller (str) - The filename of the controller file which is
to be used, set to None by default. The system cannot be
simulated unless a controller or a control signal sequence is
specified.
RETURNS:
None
"""
Acutator.__init__(self, linear=True)
self.configfile = configfile
self.controlmodule = controlmodule
self.initiate_model(configfile)
self.initiate_controller(controlmodule)
def _reference_generator(self, tf, simtype):
N = int(ceil(float(tf)/self.h))
u = np.zeros((3, N))
if simtype == 'steps':
# Alternating steps in theta (±π) with thetadot and i set to 0, the
# period time is set to 5 seconds
T = 10. # [s]
for ii in range(N):
if ceil(ii*(tf/T)/float(N)) % 2:
u[0, ii] = 1.
else:
u[0, ii] = -1.
return u
def initiate_model(self, configfile):
# Load parameters from configuration file
J = 0.01
b = 0.1
K = 0.01
K = 0.01
R = 1.
L = 0.5
# Time step
self.h = 0.1
self.nStates = 3
self.nMeasurements = 1
self.nControl = 1
# Continuous time DC model
A = np.array([[0., 1., 0.],[0., -b/J, K/J],[0., -K/L, -R/L]])
B = np.array([[0.],[0.],[1./L]])
C =
|
np.array([[0.,1.,0.]])
|
numpy.array
|
import unittest
import jax
import numpy as np
from fe import estimator_abfe
from timemachine.lib import LangevinIntegrator, potentials, MonteCarloBarostat
from parallel.client import CUDAPoolClient
from md.barostat.utils import get_bond_list, get_group_indices
def get_harmonic_bond(n_atoms, n_bonds):
atom_idxs =
|
np.arange(n_atoms)
|
numpy.arange
|
# Pyhton code to run the ABC-SMC algorithm to parametrise the exponential model with cell generations
# making use of OT-I T cells data.
# Reference: "Approximate Bayesian Computation scheme for parameter inference and model selection
# in dynamical systems" by <NAME>. et al. (2008).
# Import the required modules.
import numpy as np
from scipy.linalg import expm
G = 11 # Total number of generations.
dist_gen = 6 # Used to define generation 5+.
n_pars = 4 # Number of parameters in the exponential model.
N0 = 1 # Number of stages in generation 0.
N1 = 1 # Number of stages all generations but 0.
# Reading the data.
data = np.loadtxt("data_OTI.txt")
std_dev = np.loadtxt("std_dev_OTI.txt")
# Define the time points (unit of hours).
t2 = np.array([72,96,120,144,168])
# Define the exponential model with cell generations.
def diag(g,N,l,m):
if g < 5:
return(np.diag([-(l[g]+m[g])]*N) + np.diag([l[g]]*(N-1),-1))
else:
return(np.diag([-(l[5]+m[5])]*N) + np.diag([l[5]]*(N-1),-1))
def matrix(N0,N1,l,m):
M = np.zeros((N0+(G-1)*N1, N0+(G-1)*N1))
M[0:N0,0:N0] = diag(0,N0,l,m)
for i in range(1,G):
M[N0+(i-1)*N1:N0+i*N1,N0+(i-1)*N1:N0+i*N1] = diag(i,N1,l,m)
M[N0,N0-1] = 2*l[0]
for i in range(1,G-1):
if i < 5:
M[N0+i*N1,N0+i*N1-1] = 2*l[i]
else:
M[N0+i*N1,N0+i*N1-1] = 2*l[5]
return(M)
def exp_matrix(N0,N1,inits,times,l,m):
output = np.zeros((len(times),N0+N1*(G-1)))
A = matrix(N0,N1,l,m)
for i in range(len(times)):
sol = np.dot(expm(A*times[i]),inits)
output[i] = sol
return output.T
# Define the functions to use in the ABC-SMC algorithm to generate the first epsilon, to run the first iteration
# and to run all the other iterations.
# As it may be difficult to decide on a reasonably large value of epsilon to use at the first iteration,
# we defined the function below to generate it.
def generate_eps1(nn,rr):
# Empty array to store the distance.
results = np.empty((0))
# Empty array to store the accepted parameters.
params = np.empty((0,n_pars))
for run in range(nn*rr):
# Sample the parameters from uniform prior distributions.
l0, lambd = 10**np.random.uniform(-3,1,2)
l = np.array([lambd for _ in range(dist_gen)])
l[0] = l0
alpha = 10**np.random.uniform(-5,-1)
m = np.zeros(dist_gen)
for i in range(dist_gen):
m[i] = alpha*i
C0 = 10**np.random.uniform(4,6)
inits = np.zeros((N0+(G-1)*N1))
inits[0] = C0
# Run the model to compute the expected number of cells in each generation.
generations = [[] for _ in range(dist_gen)]
modelexp = exp_matrix(N0,N1,inits,t2,l,m)
s0 = sum(modelexp[0:N0])
generations[0].append(s0)
for i in range(1,dist_gen):
if i < 5:
s = sum(modelexp[N0+(i-1)*N1:N0+i*N1])
generations[i].append(s)
else:
s = sum(modelexp[N0+(i-1)*N1:N0+(G-1)*N1])
generations[i].append(s)
# Compute the distance between the model predictions and the experimental data.
generationsravel = np.ravel(generations)
dataravel = np.ravel(data)
std_ravel = np.ravel(std_dev)
distance = np.sqrt(np.sum(((generationsravel-dataravel)/std_ravel)**2))
results = np.hstack((results, distance))
params = np.vstack((params, np.hstack((C0,l0,lambd,alpha))))
# Compute epsilon to use at the first iteration.
epsilon = np.median(results)
return epsilon
# Define the function for the first iteration of ABC-SMC in which the parameters are sampled
# from the uniform prior distributions.
def iteration1(nn):
# Empty array to store the distance.
results = np.empty((0,1))
# Empty array to store the accepted parameters.
params = np.empty((0,n_pars))
number = 0 # Counter for the sample size.
truns = 0 # Counter for the total number of runs.
while number < nn:
truns+=1
# Sample the parameters from uniform prior distributions.
l0, lambd = 10**np.random.uniform(-3,1,2)
l = np.array([lambd for _ in range(dist_gen)])
l[0] = l0
alpha = 10**np.random.uniform(-5,-1)
m = np.zeros(dist_gen)
for i in range(dist_gen):
m[i] = alpha*i
C0 = 10**np.random.uniform(4,6)
inits = np.zeros((N0+(G-1)*N1))
inits[0] = C0
pars=np.hstack((C0,l0,lambd,alpha))
# Run the model to compute the expected number of cells in each generation.
generations = [[] for _ in range(dist_gen)]
modelexp = exp_matrix(N0,N1,inits,t2,l,m)
s0 = sum(modelexp[0:N0])
generations[0].append(s0)
for i in range(1,dist_gen):
if i < 5:
s = sum(modelexp[N0+(i-1)*N1:N0+i*N1])
generations[i].append(s)
else:
s = sum(modelexp[N0+(i-1)*N1:N0+(G-1)*N1])
generations[i].append(s)
# Compute the distance between the model predictions and the experimental data.
generationsravel = np.ravel(generations)
dataravel = np.ravel(data)
std_ravel = np.ravel(std_dev)
distance = np.sqrt(np.sum(((generationsravel-dataravel)/std_ravel)**2))
# If the distance is less than epsilon, store the parameters values and increase by one the counter for
# the sample size.
if distance < eps1:
number+=1
results = np.vstack((results, distance))
params = np.vstack((params, pars))
# Compute the weight for each accepted parameter set - at iteration 1, parameter sets have equal weight.
weights = np.empty((0,1))
for i in range(nn):
weights = np.vstack((weights,1/nn))
# Return the results: distance, accepted parameters, weights and total number of runs.
return [np.hstack((results,params,weights)), truns]
# Function for the other iterations of the ABC-SMC algorithm, where the parameter values are sampled
# from the posterior distributions of the previous iteration.
def other_iterations(nn,it):
# Compute uniform areas to sample within in order to perturb the parameters.
ranges = []
for i in range(n_pars):
r1 = np.max(np.log10(ABC_runs[it][:,i+1])) - np.min(np.log10(ABC_runs[it][:,i+1]))
ranges.append(r1)
ranges_arr = np.asarray(ranges)
sigma = 0.1*ranges_arr
# Define epsilon as median of the accepted distance values from previous iteration.
epsilon = np.median(ABC_runs[it][:,0])
# To use when sampling the new parameters.
p_list = [i for i in range(nn)]
# Define upper and lower bounds of the prior distributions for each parameter in the model.
lower_bounds = np.hstack((10**4,10**(-3),10**(-3),10**(-5)))
upper_bounds = np.hstack((10**6,10,10,10**(-1)))
# Empty array to store the distance.
results = np.empty((0))
# Empty array to store accepted parameters.
params = np.empty((0,n_pars))
# Empty array to store the prior samples.
priors_abc = np.empty((0,n_pars))
# Empty array to store the weights.
weights_arr = np.empty((0))
number = 0 # Counter for the sample size.
truns = 0 # Counter for the total number of runs.
while number < nn:
truns+=1
check = 0
# The following while loop is to sample the parameters from the posterior distributions of the previous
# iteration. Then the parameters are perturbed making use of a uniform perturbation kernel.
# If the new parameters lie within the initial prior ranges, they are used to obtaining model predictions,
# otherwise they are sampled again.
while check < 1:
# Randomly choose a parameter set from the posterior obtained from the previous iteration.
choice = np.random.choice(p_list,1,p=ABC_runs[it][:,n_pars+1])
prior_sample = ABC_runs[it][:,range(1,n_pars+1)][choice]
# Generate new parameters through perturbation.
parameters = []
for i in range(n_pars):
lower = np.log10(prior_sample[0,i])-sigma[i]
upper = np.log10(prior_sample[0,i])+sigma[i]
pars = np.random.uniform(lower,upper)
parameters.append(10**pars)
# Check that the new parameters lie within the initial prior ranges.
check_out = 0
for ik in range(n_pars):
if parameters[ik] < lower_bounds[ik] or parameters[ik] > upper_bounds[ik]:
check_out = 1
if check_out == 0:
check+=1
C0 = float(parameters[0])
l0, lambd = parameters[1:3]
l = np.array([lambd for _ in range(dist_gen)])
l[0] = l0
m =
|
np.zeros(dist_gen)
|
numpy.zeros
|
import numpy as np
from torch.utils.data.sampler import RandomSampler
class Customsampler(RandomSampler):
def __init__(self, data_source, replacement=False, num_samples=None, batch_size=None, generator=None):
super(Customsampler, self).__init__(data_source=data_source, replacement=replacement,
num_samples=num_samples, generator=generator)
self.l = data_source.num_classes
self.g = data_source.num_groups
self.nbatch_size = batch_size // (self.l*self.g)
self.num_data = data_source.num_data
pos = np.unravel_index(np.argmax(self.num_data), self.num_data.shape)
self.max_pos = pos[0] * self.g + pos[1]
def __iter__(self):
final_list = []
index_list = []
total_num = 0
for i in range(self.l*self.g):
tmp = np.arange(self.num_data[i//self.l, i%self.l]) + total_num
np.random.shuffle(tmp)
index_list.append(list(tmp))
if i != self.max_pos:
while len(index_list[-1]) < np.max(self.num_data):
tmp = np.arange(self.num_data[i//self.l, i%self.l]) + total_num
|
np.random.shuffle(tmp)
|
numpy.random.shuffle
|
#!/usr/bin/env python
from datetime import datetime
import copy
import traceback
import os, subprocess, time, signal
#from cv_bridge import CvBridge
import gym
import math
import random
# u
import numpy as np
import cv2 as cv
import rospy
# Brings in the SimpleActionClient
import actionlib
# Brings in the .action file and messages used by the move base action
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Image
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Point
from geometry_msgs.msg import Point32
from geometry_msgs.msg import TransformStamped
from rosgraph_msgs.msg import Clock
from costmap_converter.msg import ObstacleArrayMsg
from costmap_converter.msg import ObstacleMsg
from gazebo_msgs.msg import ModelStates
from geometry_msgs.msg import Twist
from gazebo_msgs.srv import SetModelState
from gym.utils import seeding
import threading
import _thread
from squaternion import Quaternion
from simple_pid import PID
import pickle
import logging
logger = logging.getLogger(__name__)
# Environment Parameters
class EnvConfig:
# Boolean to make robots spawn at constant locations
USE_TESTING = False
# If False, Moves obstacles out of the way
USE_OBSTACLES = False
# Pattern to init obstacles
# 0: Places obstacles between robot and person
# 1: Places obstacles randomly within circle
OBSTACLE_MODE = 1
# Radius(meters) away from person robot for random placement(mode 1) of objects
OBSTACLE_RADIUS_AWAY = 3
# Obstacle size
OBSTACLE_SIZE = 0.5
# Allows/Denies Robot TEB Local Planner to avoid obstacles
SEND_TEB_OBSTACLES = True
# Gets person robot to use move base
PERSON_USE_MB = True
# Episode Length
EPISODE_LEN = 15
# Returns Human State only in get_observations if True
RETURN_HINN_STATE = False # was True
# Size to reduce laser scan to
SCAN_REDUCTION_SIZE = 20
# If True, calls init_simulator() on set_agent() call
INIT_SIM_ON_AGENT = False
# If True, moves jackal bot out of the way and puts obstacles around person
TRAIN_HINN = False
# For NON-HINN OUTPUT ONLY: Outputs laser scan if true
OUTPUT_OBSTACLES_IN_STATE = True
# Evaluation Mode, Removes stochasticity when initializing environment
EVALUATION_MODE = True
class History():
def __init__(self, window_size, update_rate, save_rate=10):
self.idx = 0
self.update_rate = update_rate
self.save_rate = save_rate
self.lock = threading.Lock()
self.memory_size = int(math.ceil(save_rate/update_rate*window_size)+1)
self.data = [None for x in range(self.memory_size)]
self.prev_add_time = rospy.Time.now().to_sec() - 1
self.window_size = window_size
self.avg_frame_rate = None
self.time_data_ = []
def add_element(self, element):
"""
element: the data that we put inside the history data array
"""
if abs(rospy.Time.now().to_sec() - self.prev_add_time) < 1./self.save_rate:
return
with self.lock:
self.idx = (self.idx + 1) % self.window_size
self.prev_add_time = rospy.Time.now().to_sec()
if self.data[self.idx] is None:
for idx in range(self.memory_size):
self.data[idx] = element
self.data[self.idx] = element
if not len(self.time_data_) > 50:
self.time_data_.append(self.prev_add_time)
if len(self.time_data_) > 3:
prev_t = self.time_data_[0]
time_intervals = []
for t in self.time_data_[1:]:
time_intervals.append(t - prev_t)
prev_t = t
self.avg_frame_rate = 1.0 / np.average(time_intervals)
def get_elemets(self):
return_data = []
while self.avg_frame_rate is None:
time.sleep(0.1)
skip_frames = -int(math.ceil(self.avg_frame_rate / self.update_rate))
with self.lock:
index = self.idx # (self.idx - 1)% self.window_size
if self.window_size * abs(skip_frames) >= self.memory_size:
rospy.logerr("error in get element memory not enough update rate{} avg_frame_rate{} mem_size {} skipf: {}".format(self.update_rate, self.avg_frame_rate, self.memory_size, skip_frames))
for i in range(self.window_size):
return_data.append(self.data[index])
index = (index + skip_frames) % self.window_size
return return_data
def get_latest(self):
with self.lock:
return self.data[self.idx]
class Robot():
def __init__(self, name, max_angular_speed=1, max_linear_speed=1, relative=None, agent_num=None, use_goal=False, use_movebase=False, use_jackal=False, window_size=10, is_testing=False):
self.name = name
self.use_jackal = use_jackal
self.init_node = False
self.alive = True
self.prev_call_gazeboros_ = None
if relative is None:
relative = self
self.relative = relative
self.is_testing = is_testing
if self.is_testing:
self.all_pose_ = []
self.last_time_added = rospy.Time.now().to_sec()
self.log_history = []
self.agent_num = agent_num
self.init_node = True
self.deleted = False
self.update_rate_states = 2.0
self.window_size_history = window_size
self.current_vel_ = Twist()
self.goal = {"pos": None, "orientation": None}
self.use_goal = use_goal
self.use_movebase = use_movebase
self.max_angular_vel = max_angular_speed
self.max_linear_vel = max_linear_speed
self.max_rel_pos_range = 5.0 # meter
self.width_laserelement_image = 100
self.height_laser_image = 50
self.state_ = {'position': (None, None),
'orientation': None}
if self.use_jackal:
self.cmd_vel_pub = rospy.Publisher(
'/{}/jackal_velocity_controller/cmd_vel'.format(name), Twist, queue_size=1)
else:
self.cmd_vel_pub = rospy.Publisher(
'/{}/cmd_vel'.format(name), Twist, queue_size=1)
if "tb3" in self.name and self.use_movebase:
# Create an action client called "move_base" with action definition file "MoveBaseAction"
self.action_client_ = actionlib.SimpleActionClient(
'/move_base_{}'.format(self.agent_num), MoveBaseAction)
# Waits until the action server has started up and started listening for goals.
self.action_client_.wait_for_server(rospy.rostime.Duration(0.4))
else:
self.action_client_ = None
if "person" == self.name:
self.angular_pid = PID(0.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(1.0, 0, 0.05, setpoint=0)
else:
self.angular_pid = PID(2.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(2.5, 0, 0.05, setpoint=0)
self.pos_history = History(
self.window_size_history, self.update_rate_states)
self.orientation_history = History(
self.window_size_history, self.update_rate_states)
self.velocity_history = History(
self.window_size_history, self.update_rate_states)
self.is_collided = False
self.is_pause = False
self.reset = False
self.scan_image = None
def calculate_ahead(self, distance):
x = self.state_['position'][0] + \
math.cos(self.state_["orientation"]) * distance
y = self.state_['position'][1] + \
math.sin(self.state_["orientation"]) * distance
return (x, y)
def movebase_cancel_goals(self):
self.action_client_.cancel_all_goals()
self.stop_robot()
def movebase_client_goal(self, goal_pos, goal_orientation):
# Creates a new goal with the MoveBaseGoal constructor
move_base_goal = MoveBaseGoal()
move_base_goal.target_pose.header.frame_id = "tb3_{}/odom".format(self.agent_num)
move_base_goal.target_pose.header.stamp = rospy.Time.now()
move_base_goal.target_pose.pose.position.x = goal_pos[0]
move_base_goal.target_pose.pose.position.y = goal_pos[1]
quaternion_rotation = Quaternion.from_euler(0, goal_orientation, 0)
move_base_goal.target_pose.pose.orientation.x = quaternion_rotation[3]
move_base_goal.target_pose.pose.orientation.y = quaternion_rotation[1]
move_base_goal.target_pose.pose.orientation.z = quaternion_rotation[2]
move_base_goal.target_pose.pose.orientation.w = quaternion_rotation[0]
# Sends the move_base_goal to the action server.
self.action_client_.send_goal(move_base_goal)
# Waits for the server to finish performing the action.
#wait = self.action_client_.wait_for_result(rospy.rostime.Duration(0.4))
# If the result doesn't arrive, assume the Server is not available
# if not wait:
# rospy.logerr("Action server not available!")
# else:
# # Result of executing the action
# return self.action_client_.get_result()
def get_pos(self):
counter_problem = 0
while self.state_['position'] is None:
if self.reset:
return (None, None)
if counter_problem > 20:
rospy.logdebug("waiting for pos to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.001)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
return self.state_['position']
def get_orientation(self):
counter_problem = 0
while self.state_['orientation'] is None:
if self.reset:
return None
if counter_problem > 20:
rospy.logdebug("waiting for pos to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.001)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
return self.state_['orientation']
def is_current_state_ready(self):
return (self.state_['position'][0] is not None)
def is_observation_ready(self):
return (self.pos_history.avg_frame_rate is not None and
self.orientation_history.avg_frame_rate is not None and
self.velocity_history.avg_frame_rate is not None)
def update(self, init_pose):
self.alive = True
self.goal = {"pos": None, "orientation": None}
if "person" == self.name:
self.angular_pid = PID(0.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(1.0, 0, 0.05, setpoint=0)
else:
self.angular_pid = PID(2.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(2.5, 0, 0.05, setpoint=0)
self.pos_history = History(self.window_size_history, self.update_rate_states)
self.orientation_history = History(self.window_size_history, self.update_rate_states)
self.velocity_history = History(self.window_size_history, self.update_rate_states)
self.velocity_history.add_element((0,0))
self.pos_history.add_element((init_pose["pos"][0],init_pose["pos"][1]))
self.orientation_history.add_element(init_pose["orientation"])
self.log_history = []
if self.is_testing:
self.all_pose_ = []
#self.prev_call_gazeboros_ = None
#self.is_collided = False
self.is_pause = False
self.reset = False
def add_log(self, log):
self.log_history.append(log)
def remove(self):
self.reset = True
def set_state(self, state):
self.state_["position"] = state["position"]
self.state_["orientation"] = state["orientation"]
self.state_["velocity"] = state["velocity"]
self.orientation_history.add_element(state["orientation"])
self.pos_history.add_element(state["position"])
self.velocity_history.add_element(state["velocity"])
if self.is_testing and abs(rospy.Time.now().to_sec() - self.last_time_added) > 0.01:
self.all_pose_.append(self.state_.copy())
self.last_time_added = rospy.Time.now().to_sec()
def get_state(self):
return self.state_
def get_velocity(self):
return self.velocity_history.get_latest()
def pause(self):
self.is_pause = True
self.stop_robot()
def resume(self):
self.is_pause = False
def take_action(self, action, target_orientation=None):
if self.is_pause:
return
if self.use_goal:
if "person" in self.name:
pose = self.get_pos()
pos_global = [pose[0]+action[0], pose[1]+action[1]]
else:
pos = GazeborosEnv.denormalize(action[0:2], self.max_rel_pos_range)
pos_global = GazeborosEnv.get_global_position(pos, self.relative)
if target_orientation:
self.goal["orientation"] = target_orientation
else:
self.goal["orientation"] = self.get_orientation()
self.goal["pos"] = pos_global
if self.use_movebase:
self.movebase_client_goal(pos_global, self.goal["orientation"])
else:
linear_vel = max(min(action[0]*self.max_linear_vel, self.max_linear_vel), -self.max_linear_vel)
angular_vel = max(min(action[1]*self.max_angular_vel, self.max_angular_vel), -self.max_angular_vel)
cmd_vel = Twist()
cmd_vel.linear.x = linear_vel #float(self.current_vel_.linear.x -(self.current_vel_.linear.x - linear_vel)*0.9)
cmd_vel.angular.z = angular_vel #-float(self.current_vel_.angular.z - (self.current_vel_.angular.z - angular_vel)*0.9)
self.current_vel_ = cmd_vel
self.cmd_vel_pub.publish(cmd_vel)
def stop_robot(self):
self.cmd_vel_pub.publish(Twist())
def angle_distance_to_point(self, pos):
current_pos = self.get_pos()
if current_pos[0] is None:
return None, None
angle = math.atan2(pos[1] - current_pos[1], pos[0] - current_pos[0])
distance = math.hypot(pos[0] - current_pos[0], pos[1] - current_pos[1])
angle = (angle - self.state_["orientation"] + math.pi) % (math.pi * 2) - math.pi
return angle, distance
def publish_cmd_vel(self, linear, angular):
cmd_vel = Twist()
angular_vel = min(max(angular, -self.max_angular_vel), self.max_angular_vel)
linear_vel = min(max(linear, 0), self.max_linear_vel)
cmd_vel.linear.x = float(linear_vel)
cmd_vel.angular.z = float(angular_vel)
self.cmd_vel_pub.publish(cmd_vel)
def use_selected_person_mod(self, person_mode):
while person_mode <= 6:
if self.is_pause:
self.stop_robot()
return
if self.reset:
self.stop_robot()
return
angular_vel = 0
linear_vel = 0
if person_mode == 0:
linear_vel = self.max_linear_vel
if person_mode == 1:
#linear_vel = self.max_linear_vel * random.random()
linear_vel = self.max_linear_vel * 0.35
elif person_mode == 2:
linear_vel = self.max_linear_vel/2
angular_vel = self.max_angular_vel/6
elif person_mode == 3:
linear_vel = self.max_linear_vel/2
angular_vel = -self.max_angular_vel/6
elif person_mode == 4:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = -self.max_angular_vel/6
elif person_mode == 5:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = self.max_angular_vel/6
elif person_mode == 6:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = angular_vel - (angular_vel - (random.random()-0.5)*2)/2.
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.002)
def go_to_goal(self):
while True:
if self.reset:
return
while self.goal["pos"] is None:
time.sleep(0.1)
continue
diff_angle, distance = self.angle_distance_to_point(self.goal["pos"])
time_prev = rospy.Time.now().to_sec()
while not distance < 0.1 and abs(rospy.Time.now().to_sec() - time_prev) < 5:
if self.is_pause:
self.stop_robot()
return
if self.reset:
self.stop_robot()
return
diff_angle, distance = self.angle_distance_to_point(self.goal["pos"])
if distance is None:
return
if self.reset:
return
angular_vel = -min(max(self.angular_pid(diff_angle), -self.max_angular_vel), self.max_angular_vel)
linear_vel = min(max(self.linear_pid(-distance), 0), self.max_linear_vel)
linear_vel = linear_vel * math.pow((abs(math.pi - abs(diff_angle))/math.pi), 1.5)
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.01)
self.stop_robot()
def go_to_pos(self, pos, stop_after_getting=False):
if self.is_pause:
self.stop_robot()
return
if self.reset:
return
diff_angle, distance = self.angle_distance_to_point(pos)
if distance is None:
print(self.get_pos())
return
time_prev = rospy.Time.now().to_sec()
while not distance < 0.2 and abs(rospy.Time.now().to_sec() - time_prev) < 5:
if self.is_pause:
self.stop_robot()
return
if self.reset:
return
diff_angle, distance = self.angle_distance_to_point(pos)
if distance is None:
return
if self.reset:
return
angular_vel = -min(max(self.angular_pid(diff_angle), -self.max_angular_vel),self.max_angular_vel)
linear_vel = min(max(self.linear_pid(-distance), 0), self.max_linear_vel)
linear_vel = linear_vel * math.pow((abs(math.pi - abs(diff_angle))/math.pi), 2)
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.01)
if stop_after_getting:
self.stop_robot()
def get_goal(self):
counter_problem = 0
while self.goal["pos"] is None:
if self.reset:
return (None, None)
if counter_problem > 20:
rospy.logwarn("waiting for goal to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.01)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
# if not self.use_movebase:
# pos = GazeborosEnv.get_global_position(self.goal["pos"], self)
# goal = {"pos":pos, "orientation":None}
# else:
# goal = self.goal
return self.goal
def get_laser_image(self):
return np.expand_dims(self.scan_image, axis=2)
class GazeborosEnv(gym.Env):
def __init__(self, is_evaluation=False):
self.is_evaluation_ = is_evaluation
# self.bridge = CvBridge()
# self.image_pub = rospy.Publisher("image_observation", Image)
# self.image_pub_gt = rospy.Publisher("image_observation_gt", Image)
self.is_reseting = True
self.use_path = True
self.use_jackal = True
self.lock = _thread.allocate_lock()
self.path_follower_test_settings = {0:(0,0, "straight",False), 1:(2,0, "right", False), 2:(3,0, "left", False),\
3:(1,4, "straight_Behind", False), 4:(2,3, "right_behind", False), 5:(3,3, "left_behind", False), 6:(7,2, "traj_1", True, True),\
7:(7, 12, "traj_2", True, True), 8:(7, 43, "traj_3", True),\
9:(2,1, "right_left", False), 10:(2,2, "right_right", False),\
11:(3,1, "left_left", False), 12:(3,2, "left_right", False)\
}
#self.path_follower_test_settings = {0:(7, 43, "traj_3", True)#(7,2, "traj_1", True, True), 1:(7, 12, "traj_2", True, True)}
self.is_testing = EnvConfig.USE_TESTING
self.small_window_size = False
self.use_predifined_mode_person = True
self.use_goal = True
self.use_orientation_in_observation = True
self.collision_distance = 0.3
self.best_distance = 1.5
self.robot_mode = 0
self.window_size = 10
self.use_movebase = True
self.use_reachability = False
self.use_obstacles = EnvConfig.USE_OBSTACLES
self.obstacle_mode = EnvConfig.OBSTACLE_MODE
self.obstacle_names = []
self.person_scan = [1000.0 for i in range(EnvConfig.SCAN_REDUCTION_SIZE)]
self.person_use_move_base = EnvConfig.PERSON_USE_MB
self.person_mode = 0
self.position_thread = None
self.eval_x = -4
self.eval_y = -4
self.eval_orientation = 0
self.robot_eval_x = -1
self.robot_eval_y = -1
self.path_follower_current_setting_idx = 0
self.use_supervise_action = False
self.mode_person = 0
self.use_noise = True
self.is_use_test_setting = False
self.use_reverse = True
if self.small_window_size:
self.window_size = 5
if self.is_testing:
self.use_noise = False
self.use_reverse = False
self.is_use_test_setting = True
self.fallen = False
self.is_max_distance = False
self.use_random_around_person_ = False
self.max_mod_person_ = 7
self.wait_observation_ = 0
# being use for observation visualization
self.center_pos_ = (0, 0)
self.colors_visualization = cv.cvtColor(cv.applyColorMap(np.arange(0, 255, dtype=np.uint8), cv.COLORMAP_WINTER), cv.COLOR_BGR2RGB).reshape(255, 3).tolist()
self.color_index = 0
self.first_call_observation = True
self.test_simulation_ = False
self.person_scan = [1000.0 for i in range(EnvConfig.SCAN_REDUCTION_SIZE)]
self.person_use_move_base = EnvConfig.PERSON_USE_MB
self.person_mode = 0
self.position_thread = None
self.eval_x = -4
self.eval_y = -4
self.eval_orientation = 0
self.robot_eval_x = -1
self.robot_eval_y = -1
self.min_distance = 1
self.max_distance = 2.5
if self.test_simulation_ or self.is_evaluation_:
self.max_numb_steps = 80
elif self.is_use_test_setting:
self.max_numb_steps = 100
else:
self.max_numb_steps = 80
self.reward_range = [-1, 1]
self.reachabilit_value = None
if self.use_reachability:
with open('data/reachability.pkl', 'rb') as f:
self.reachabilit_value = pickle.load(f)
def get_person_pos(self):
theta = self.person.get_orientation()
xy = self.person.get_pos()
return [xy[0], xy[1], theta]
def get_system_velocities(self):
robot_state = self.robot.get_state()
person_state = self.person.get_state()
robot_lin_velocity = robot_state["velocity"][0]
robot_angular_velocity = robot_state["velocity"][1]
robot_orientation = robot_state["orientation"]
person_lin_velocity = person_state["velocity"][0]
person_angular_velocity = person_state["velocity"][1]
x_distance_between = person_state["position"][0] - robot_state["position"][0]
y_distance_between = person_state["position"][1] - robot_state["position"][1]
dx_dt = -person_lin_velocity + robot_lin_velocity * math.cos(robot_orientation) + person_angular_velocity * y_distance_between
dy_dt = robot_lin_velocity * math.sin(robot_orientation) - person_angular_velocity * x_distance_between
da_dt = robot_angular_velocity - person_angular_velocity
return (dx_dt, dy_dt, da_dt)
def get_test_path_number(self):
rospy.loginfo("current path idx: {}".format(self.path_follower_current_setting_idx))
return self.path_follower_test_settings[self.path_follower_current_setting_idx][2]
def use_test_setting(self):
self.is_use_test_setting = True
def set_person_mode(self, setting):
self.person_mode = setting
def set_use_obstacles(self, setting):
self.use_obstacles = setting
def set_agent(self, agent_num):
try:
self.node = rospy.init_node('gym_gazeboros_{}'.format(agent_num))
except Exception as e:
rospy.logerr("probably already init in another node {}".format(e))
rospy.wait_for_service('/gazebo/set_model_state')
self.set_model_state_sp = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
date_time = datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
self.agent_num = agent_num
self.obstacle_pub_ = rospy.Publisher('/move_base_node_tb3_{}/TebLocalPlannerROS/obstacles'.format(self.agent_num), ObstacleArrayMsg, queue_size=1)
self.person_obstacle_pub_ = rospy.Publisher('/move_base_node_person_{}/TebLocalPlannerROS/obstacles'.format(self.agent_num), ObstacleArrayMsg, queue_size=1)
self.create_robots()
self.path = {}
self.paths = []
self.log_file = None
try:
with open('data/person_trajectories_rl.pkl', 'rb') as f:
paths = pickle.load(f)
for path in paths:
angle_person = path['start_person']['orientation']
for angle in [x for x in range(0, 360, 10)]:
for angle_robot_person in [x for x in range(0, 360, 90)]:
path_angle = path.copy()
angle_from_person = np.deg2rad(angle) + angle_person
angle_person_robot = np.deg2rad(angle_robot_person) + angle_person
path_angle['start_robot']['pos'] = (path_angle['start_person']['pos'][0] + math.cos(angle_from_person)*2, path_angle['start_person']['pos'][1] + math.sin(angle_from_person)*2)
path_angle['start_robot']['orientation'] = angle_person_robot
path_angle['name'] = path['name'] + " " + str(angle) +" " + str(angle_robot_person)
self.paths.append(path_angle)
self.path_idx = -1
self.path = self.paths[self.path_idx]
except Exception as e:
print("error happend in writing {}".format(e))
self.agent_num = agent_num
self.state_cb_prev_time = None
self.model_states_sub = rospy.Subscriber("/gazebo/model_states", ModelStates, self.model_states_cb)
self.scan_sub = rospy.Subscriber("/person_{}/scan".format(self.agent_num), LaserScan, self.scan_cb)
if EnvConfig.INIT_SIM_ON_AGENT:
with self.lock:
self.init_simulator()
def scan_cb(self, msg):
reduced_size = EnvConfig.SCAN_REDUCTION_SIZE
large_n = 1000.0
div = int(len(msg.ranges)/reduced_size)
reduced_scan = []
count = 0
a_size = 0
avg = 0
# Reduce from 720 to reduced size
for r in msg.ranges:
if r > 0 and r < 20:
avg += r
a_size += 1
count += 1
if count == div:
if a_size != 0:
avg /= a_size
else:
avg = large_n
reduced_scan.append(avg)
count = 0
a_size = 0
avg = 0
self.person_scan = reduced_scan
pass
def create_obstacle_msg(self, name, pose):
obstacle_msg = ObstacleMsg()
obstacle_msg.id = 1
point = Point32()
point.x = pose.position.x
point.y = pose.position.y
point.z = pose.position.z
obstacle_msg.polygon.points.append(point)
# TODO probably needs some tweaking but works for regular cyn/box
# - I think the robot could be ok to get closer to the obstacles?
# TODO polygon for box instead of using a circle
obstacle_msg.radius = EnvConfig.OBSTACLE_SIZE/2
obstacle_msg.orientation.x = pose.orientation.x
obstacle_msg.orientation.y = pose.orientation.y
obstacle_msg.orientation.z = pose.orientation.z
obstacle_msg.orientation.w = pose.orientation.w
obstacle_msg.velocities.twist.linear.x = 0
obstacle_msg.velocities.twist.angular.z = 0
return obstacle_msg
def model_states_cb(self, states_msg):
# Grab Obstacle Names for Agent
if not self.obstacle_names:
for name in states_msg.name:
if "obstacle" in name:
for char in name:
if char.isdigit():
if int(char) == self.agent_num:
self.obstacle_names.append(name)
obstacle_msg_array = ObstacleArrayMsg()
obstacle_msg_array.header.stamp = rospy.Time.now()
obstacle_msg_array.header.frame_id = "tb3_{}/odom".format(self.agent_num)
person_obs_msg_array = ObstacleArrayMsg()
person_obs_msg_array.header.stamp = rospy.Time.now()
person_obs_msg_array.header.frame_id = "person_{}/odom".format(self.agent_num)
for model_idx in range(len(states_msg.name)):
found = False
for robot in [self.robot, self.person]:
if states_msg.name[model_idx] == robot.name:
found = True
break
elif "obstacle" in states_msg.name[model_idx] and EnvConfig.SEND_TEB_OBSTACLES:
obstacle_msg_array.obstacles.append(
self.create_obstacle_msg(
states_msg.name[model_idx], states_msg.pose[model_idx]
)
)
person_obs_msg_array.obstacles.append(
self.create_obstacle_msg(
states_msg.name[model_idx], states_msg.pose[model_idx]
)
)
if not found:
continue
pos = states_msg.pose[model_idx]
euler = Quaternion(w=pos.orientation.w, x=pos.orientation.x, y=pos.orientation.y, z=pos.orientation.z).to_euler()
if EnvConfig.PERSON_USE_MB:
orientation = euler[2]
else:
# Preserve how Payam had it setup...
orientation = euler[0]
fall_angle = np.deg2rad(90)
if abs(abs(euler[1]) - fall_angle)< 0.1 or abs(abs(euler[2]) - fall_angle)<0.1:
self.fallen = True
# get velocity
twist = states_msg.twist[model_idx]
linear_vel = twist.linear.x
angular_vel = twist.angular.z
pos_x = pos.position.x
pos_y = pos.position.y
state = {}
state["velocity"] = (linear_vel, angular_vel)
state["position"] = (pos_x, pos_y)
state["orientation"] = orientation
robot.set_state(state)
if self.use_movebase:
obstacle_msg = ObstacleMsg()
obstacle_msg.id = 0
for x in range (5):
for y in range (5):
point = Point32()
point.x = pos.position.x + (x-2)*0.1
point.y = pos.position.y + (y-2)*0.1
point.z = pos.position.z
obstacle_msg.polygon.points.append(point)
obstacle_msg.orientation.x = pos.orientation.x
obstacle_msg.orientation.y = pos.orientation.y
obstacle_msg.orientation.z = pos.orientation.z
obstacle_msg.orientation.w = pos.orientation.w
obstacle_msg.velocities.twist.linear.x = twist.linear.x
obstacle_msg.velocities.twist.angular.z = twist.linear.z
if robot.name == self.person.name:
obstacle_msg.header = obstacle_msg_array.header
obstacle_msg_array.obstacles.append(obstacle_msg)
else:
obstacle_msg.header = person_obs_msg_array.header
person_obs_msg_array.obstacles.append(obstacle_msg)
self.obstacle_pub_.publish(obstacle_msg_array)
self.person_obstacle_pub_.publish(person_obs_msg_array)
def create_robots(self):
self.person = Robot('person_{}'.format(self.agent_num),
max_angular_speed=1, max_linear_speed=.6, agent_num=self.agent_num, window_size=self.window_size, is_testing=self.is_testing, use_goal=self.use_goal, use_movebase=self.use_movebase)
relative = self.person
self.robot = Robot('tb3_{}'.format(self.agent_num),
max_angular_speed=1.8, max_linear_speed=0.8, relative=relative, agent_num=self.agent_num, use_goal=self.use_goal, use_movebase=self.use_movebase ,use_jackal=self.use_jackal, window_size=self.window_size, is_testing=self.is_testing)
def find_random_point_in_circle(self, radious, min_distance, around_point):
max_r = 2
r = (radious - min_distance) * math.sqrt(random.random()) + min_distance
theta = random.random() * 2 * math.pi
x = around_point[0] + r * math.cos(theta)
y = around_point[1] + r * math.sin(theta)
return (x, y)
def set_mode_person_based_on_episode_number(self, episode_number):
if episode_number < 500:
self.mode_person = 0
elif episode_number < 510:
self.mode_person = 1
elif episode_number < 700:
self.mode_person = 3
elif episode_number < 900:
self.mode_person = 5
elif episode_number < 1000:
self.mode_person = 6
else:
#self.mode_person = 7
if random.random() > 0.5:
self.mode_person = 7
else:
self.mode_person = random.randint(0, 6)
def get_init_pos_robot_person(self):
if self.is_evaluation_:
idx_start = 0
elif self.is_use_test_setting:
idx_start = self.path_follower_test_settings[self.path_follower_current_setting_idx][1]
else:
idx_start = random.randint(0, len(self.path["points"]) - 20)
self.current_path_idx = idx_start
if not self.is_use_test_setting and self.use_reverse and random.random() > 0.5:
self.path["points"].reverse()
if self.person_use_move_base:
if EnvConfig.EVALUATION_MODE:
if self.eval_x > 4:
self.eval_x = -4
self.eval_y = -4
self.eval_orientation = 0
self.robot_eval_x = -1
self.robot_eval_y = -1
if self.robot_eval_x > 1:
self.robot_eval_x = -1
self.robot_eval_y = 1
init_pos_person = {"pos": (self.eval_x, self.eval_y), "orientation":self.eval_orientation}
init_pos_robot = {"pos": (self.robot_eval_x, self.robot_eval_y), "orientation":self.eval_orientation}
self.eval_x += 1
self.eval_y += 1
self.eval_orientation += math.pi/4
self.robot_eval_x += 2
self.robot_eval_y += 2
return init_pos_robot, init_pos_person
else:
x = random.uniform(-3,3)
y = random.uniform(-3,3)
init_pos_person = {"pos": (x, y), "orientation":random.uniform(0, math.pi)}
random_pos_robot = self.find_random_point_in_circle(1.5, 2.5, init_pos_person["pos"])
init_pos_robot = {"pos": random_pos_robot, "orientation":random.uniform(0, math.pi)}
return init_pos_robot, init_pos_person
if self.is_evaluation_:
init_pos_person = self.path["start_person"]
init_pos_robot = self.path["start_robot"]
elif self.is_use_test_setting and not self.path_follower_test_settings[self.path_follower_current_setting_idx][3]:
init_pos_person = {"pos": (0, 0), "orientation": 0}
mode = self.path_follower_test_settings[self.path_follower_current_setting_idx][1]
if mode == 0:
orinetation_person_rob = 0
elif mode == 1:
orinetation_person_rob = -math.pi / 4.
elif mode == 2:
orinetation_person_rob = math.pi / 4.
elif mode == 3:
orinetation_person_rob = -math.pi
else:
orinetation_person_rob = math.pi/8*7
pos_robot = (1.5*math.cos(orinetation_person_rob), 1.5*math.sin(orinetation_person_rob))
init_pos_robot = {"pos": pos_robot, "orientation": 0}
elif not self.use_path:
init_pos_person = {"pos": (0, 0), "orientation": random.random()*2*math.pi - math.pi}
ahead_person = (init_pos_person['pos'][0] + math.cos(init_pos_person["orientation"]) * 2, init_pos_person['pos'][1] + math.sin(init_pos_person["orientation"]) * 2)
random_pos_robot = self.find_random_point_in_circle(1.5, 2.5, init_pos_person["pos"])
init_pos_robot = {"pos": random_pos_robot,\
"orientation": init_pos_person["orientation"]}#random.random()*2*math.pi - math.pi}#self.calculate_angle_using_path(idx_start)}
elif self.use_random_around_person_:
init_pos_person = {"pos": self.path["points"][idx_start], "orientation": self.calculate_angle_using_path(idx_start)}
init_pos_robot = {"pos": self.find_random_point_in_circle(1.5, 1, self.path["points"][idx_start]),\
"orientation": random.random()*2*math.pi - math.pi}#self.calculate_angle_using_path(idx_start)}
else:
init_pos_person = {"pos": self.path["points"][idx_start], "orientation": self.calculate_angle_using_path(idx_start)}
if self.is_use_test_setting and len(self.path_follower_test_settings[self.path_follower_current_setting_idx])>4 and self.path_follower_test_settings[self.path_follower_current_setting_idx][4] :
orinetation_person_rob = math.pi/2.2
pos_robot = (self.path["points"][idx_start][0] + 2*math.cos(orinetation_person_rob+init_pos_person["orientation"]), self.path["points"][idx_start][1] + 2*math.sin(orinetation_person_rob+init_pos_person["orientation"]))
init_pos_robot = {"pos": pos_robot, "orientation":self.calculate_angle_using_path(idx_start+5)}
else:
idx_robot = idx_start + 1
while (math.hypot(self.path["points"][idx_robot][1] - self.path["points"][idx_start][1],
self.path["points"][idx_robot][0] - self.path["points"][idx_start][0]) < 1.6):
idx_robot += 1
init_pos_robot = {"pos": self.path["points"][idx_robot],\
"orientation": self.calculate_angle_using_path(idx_robot)}
if not self.is_testing:
init_pos_robot["pos"] = (init_pos_robot["pos"][0]+ random.random()-0.5, \
init_pos_robot["pos"][1]+ random.random()-0.5)
init_pos_robot["orientation"] = GazeborosEnv.wrap_pi_to_pi(init_pos_robot["orientation"] + random.random()-0.5)
return init_pos_robot, init_pos_person
def set_marker_pose(self, xy):
pose = {"pos": (xy[0], xy[1]), "orientation": 0}
self.set_pos("marker", pose)
def set_pos(self, name, pose):
set_model_msg = ModelState()
set_model_msg.model_name = name
self.prev_action = (0, 0)
quaternion_rotation = Quaternion.from_euler(0, pose["orientation"], 0)
set_model_msg.pose.orientation.x = quaternion_rotation[3]
set_model_msg.pose.orientation.y = quaternion_rotation[1]
set_model_msg.pose.orientation.z = quaternion_rotation[2]
set_model_msg.pose.orientation.w = quaternion_rotation[0]
if self.use_jackal and "tb3" in name:
set_model_msg.pose.position.z = 2.6 * self.agent_num + 0.1635
elif "marker" in name:
set_model_msg.pose.position.z = 1.6
else:
set_model_msg.pose.position.z = 2.6 * self.agent_num + 0.099
set_model_msg.pose.position.x = pose["pos"][0]
set_model_msg.pose.position.y = pose["pos"][1]
rospy.wait_for_service('/gazebo/set_model_state')
self.set_model_state_sp(set_model_msg)
def get_obstacle_init_pos(self, init_pos_robot, init_pos_person):
num_obstacles = len(self.obstacle_names)
out_of_the_way_pose = {"pos": (15,15), "orientation":0}
if not self.use_obstacles:
return [out_of_the_way_pose for i in range(num_obstacles)]
elif self.obstacle_mode == 0:
# Place obstacles between robot and person
# Calculate distance between robots, subtract some buffer room
x_range = abs(init_pos_robot["pos"][0] - init_pos_person["pos"][0])
y_range = abs(init_pos_robot["pos"][1] - init_pos_person["pos"][1])
if x_range != 0:
x_range -= EnvConfig.OBSTACLE_SIZE
if y_range != 0:
y_range -= EnvConfig.OBSTACLE_SIZE
# Check if we have enough space for obstacles between robots
x_buffer_space = y_buffer_space = -1
num_obs_to_place = num_obstacles + 1
while x_buffer_space < 0 and y_buffer_space < 0:
num_obs_to_place -= 1
x_buffer_space = x_range - (EnvConfig.OBSTACLE_SIZE * num_obs_to_place)
y_buffer_space = y_range - ((EnvConfig.OBSTACLE_SIZE * num_obs_to_place))
if num_obs_to_place == 0:
# No space for obstacles so put them away
rospy.logwarn("Not enough space for obstacles between robots.")
return [out_of_the_way_pose for i in range(num_obstacles)]
x_spacing = x_range / num_obs_to_place
y_spacing = y_range / num_obs_to_place
if init_pos_robot["pos"][0] < init_pos_person["pos"][0]:
base_x = init_pos_robot["pos"][0]
else:
base_x = init_pos_person["pos"][0]
if init_pos_robot["pos"][1] < init_pos_person["pos"][1]:
base_y = init_pos_robot["pos"][1]
else:
base_y = init_pos_person["pos"][1]
# Place obstacles on line between robot and person
obstacle_positions = []
for i in range(num_obs_to_place):
base_x += x_spacing
base_y += y_spacing
obstacle_positions.append({"pos": (base_x, base_y), "orientation":0})
obstacle_positions.extend([out_of_the_way_pose for i in range(num_obstacles - num_obs_to_place)])
return obstacle_positions
elif self.obstacle_mode == 1:
# Put obstacles randomly within area
obstacle_radius = EnvConfig.OBSTACLE_RADIUS_AWAY
min_distance_away_from_robot = EnvConfig.OBSTACLE_SIZE * 1.25
obstacle_positions = []
if EnvConfig.EVALUATION_MODE:
x_diff = -1
y_diff = -1
count = 0
for obs_idx in range(num_obstacles):
p_xy = init_pos_robot["pos"]
point = (p_xy[0] + x_diff*1.25, p_xy[1] + y_diff*1.25)
point = self.prevent_overlap(init_pos_person["pos"], point, min_distance_away_from_robot)
point = self.prevent_overlap(init_pos_robot["pos"], point, min_distance_away_from_robot)
obstacle_positions.append({"pos": point, "orientation":0})
if count % 2 == 0:
x_diff += 1
else:
y_diff += 1
x_diff -= 0.5
count += 1
else:
for obs_idx in range(num_obstacles):
random_point = self.find_random_point_in_circle(obstacle_radius, min_distance_away_from_robot, init_pos_robot["pos"])
random_point = self.prevent_overlap(init_pos_person["pos"], random_point, min_distance_away_from_robot)
obstacle_positions.append({"pos": random_point, "orientation":0})
return obstacle_positions
# Prevent point b from overlapping point a
def prevent_overlap(self, point_a, point_b, min_distance):
x = point_b[0]
y = point_b[1]
if abs(point_b[0] - point_a[0]) < min_distance:
x += min_distance
if abs(point_b[1] - point_a[1]) < min_distance:
y += min_distance
return (x, y)
def set_obstacle_pos(self, init_pos_robot, init_pos_person):
obs_positions = self.get_obstacle_init_pos(init_pos_robot, init_pos_person)
for obs_idx in range(len(self.obstacle_names)):
self.set_pos(self.obstacle_names[obs_idx], obs_positions[obs_idx])
def init_simulator(self):
self.number_of_steps = 0
rospy.loginfo("init simulation called")
self.is_pause = True
init_pos_robot, init_pos_person = self.get_init_pos_robot_person()
self.center_pos_ = init_pos_person["pos"]
self.color_index = 0
self.fallen = False
self.is_max_distance = False
self.first_call_observation = True
rospy.loginfo("Waiting for path follower to die")
if self.position_thread:
self.position_thread.join()
rospy.loginfo("Done waiting")
self.current_obsevation_image_.fill(255)
if self.use_movebase:
self.robot.movebase_cancel_goals()
if self.person_use_move_base:
self.person.movebase_cancel_goals()
rospy.sleep(0.5)
self.person.stop_robot()
self.robot.stop_robot()
# if self.use_movebase:
# self.prev_action = (0,0, 0)
# else:
self.prev_action = (0, 0)
if EnvConfig.TRAIN_HINN:
init_pos_robot = {"pos": (30,30), "orientation": 0}
# Set positions of robots and obstacles
self.set_pos(self.robot.name, init_pos_robot)
self.set_pos(self.person.name, init_pos_person)
if EnvConfig.TRAIN_HINN:
self.set_obstacle_pos(init_pos_person, init_pos_robot)
else:
self.set_obstacle_pos(init_pos_robot, init_pos_person)
self.robot.update(init_pos_robot)
self.person.update(init_pos_person)
self.path_finished = False
self.position_thread = threading.Thread(target=self.path_follower, args=(self.current_path_idx, self.robot, init_pos_person,))
self.position_thread.daemon = True
self.is_reseting = False
self.position_thread.start()
self.wait_observation_ = 0
self.is_reseting = False
self.robot.reset = False
self.person.reset = False
# self.resume_simulator()
rospy.loginfo("init simulation finished")
self.is_pause = False
def pause(self):
self.is_pause = True
self.person.pause()
self.robot.pause()
def resume_simulator(self):
rospy.loginfo("resume simulator")
self.is_pause = False
self.person.resume()
self.robot.resume()
rospy.loginfo("resumed simulator")
def calculate_angle_using_path(self, idx):
return math.atan2(self.path["points"][idx+1][1] - self.path["points"][idx][1], self.path["points"][idx+1][0] - self.path["points"][idx][0])
@staticmethod
def denormalize(value, max_val):
if type(value) == tuple or type(value) == list:
norm_val = [float(x) * max_val for x in value]
else:
norm_val = value * float(max_val)
return norm_val
@staticmethod
def normalize(value, max_val, zero_to_one=None):
if type(value) == tuple or type(value) == list:
norm_val = [x/float(max_val) for x in value]
else:
norm_val = value/float(max_val)
if zero_to_one is not None:
if type(value) == tuple or type(value) == list:
norm_val = [(x + 1)/2 for x in norm_val]
else:
norm_val = (norm_val + 1)/2.
return norm_val
@staticmethod
def get_global_position(pos_goal, center):
while not center.is_current_state_ready():
if center.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.01)
rospy.logwarn("waiting for observation to be ready")
#relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
#pos = [x * 5 for x in pos_goal]
relative_pos = np.asarray(pos_goal)
# transform the relative to center coordinat
rotation_matrix = np.asarray([[np.cos(center_orientation), np.sin(center_orientation)], [-np.sin(center_orientation), np.cos(center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
global_pos = np.asarray(relative_pos + center_pos)
return global_pos
@staticmethod
def get_global_position_orientation(pos_goal, orientation_goal, center):
while not center.is_current_state_ready():
if center.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.01)
rospy.logwarn("waiting for observation to be ready")
#relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
#pos = [x * 5 for x in pos_goal]
relative_pos = np.asarray(pos_goal)
relative_pos2 = np.asarray((relative_pos[0] + math.cos(orientation_goal), relative_pos[1] + math.sin(orientation_goal)))
# transform the relative to center coordinat
rotation_matrix = np.asarray([[np.cos(center_orientation), np.sin(center_orientation)], [-np.sin(center_orientation), np.cos(center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
relative_pos2 = np.matmul(relative_pos2, rotation_matrix)
global_pos = np.asarray(relative_pos + center_pos)
global_pos2 = np.asarray(relative_pos2 + center_pos)
new_orientation = np.arctan2(global_pos2[1]-global_pos[1], global_pos2[0]-global_pos[0])
return global_pos, new_orientation
@staticmethod
def wrap_pi_to_pi(angle):
while angle > math.pi:
angle -= 2*math.pi
while angle < - math.pi:
angle += 2*math.pi
return angle
@staticmethod
def get_relative_heading_position(relative, center):
while not relative.is_current_state_ready() or not center.is_current_state_ready():
if relative.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.1)
rospy.loginfo("waiting for observation to be ready heading pos")
relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
# transform the relative to center coordinat
relative_pos = np.asarray(relative.state_['position'] - center_pos)
relative_pos2 = np.asarray((relative_pos[0] + math.cos(relative_orientation) , relative_pos[1] + math.sin(relative_orientation)))
rotation_matrix = np.asarray([[np.cos(-center_orientation), np.sin(-center_orientation)], [-np.sin(-center_orientation), np.cos(-center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
relative_pos2 = np.matmul(relative_pos2, rotation_matrix)
angle_relative =
|
np.arctan2(relative_pos2[1]-relative_pos[1], relative_pos2[0]-relative_pos[0])
|
numpy.arctan2
|
import datetime
import glob
import os
import random
import shutil
import time
from hashlib import md5
import cv2
import keras
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from sklearn.preprocessing import OneHotEncoder
from matplotlib import pyplot
from tensorflow.python.ops.confusion_matrix import confusion_matrix
class GestureDetector:
def __init__(self, prediction_model_filename=None):
self.capture_frequency_sec = 0.2
self.last_capture_timestamp = 0
self.min_validation_images = 50
self.min_test_images = 10
self.gestures = [
'animalhead',
'fingerspread',
'fist',
'indexdown',
'indexleft',
'indexright',
'indexup',
'l',
'spock',
'stop',
'surfer',
'thumbsdown',
'thumbsleft',
'thumbsright',
'thumbsup',
'two',
'unrecognized',
'zero',
'twoleft',
'tworight',
'twodown',
]
self.gesture_image_files = None
physical_devices = tf.config.experimental.list_physical_devices('GPU')
print("GPUs Available: ", len(physical_devices))
tf.config.experimental.set_memory_growth(physical_devices[0], True)
try:
self.prediction_model = keras.models.load_model(prediction_model_filename)
self.prediction_decoder = np.load(f'{prediction_model_filename}.npy')
except Exception as e:
self.prediction_model = None
print('Could not load prediction model from file {} due to {}'.format(prediction_model_filename, e))
def predict_gesture_from_frame(self,
frame,
people_hand_rectangles,
capture_low_confidence_training_img=False,
capture_high_confidence_training_img=False):
cropped_hand_img = self.extract_right_hand_from_images(frame, people_hand_rectangles)
if cropped_hand_img is None:
return None, 0.0
img_array = keras.preprocessing.image.img_to_array(cropped_hand_img).astype('float32') / 255
img_array = keras.preprocessing.image.smart_resize(img_array, (40, 40), interpolation='bilinear')
img_array = np.expand_dims(img_array, axis=0) # Make this single image a rank 4 tensor
predictions = self.prediction_model.predict(img_array)[0]
pred_i = np.argmax(predictions)
if predictions[pred_i] > 0.80:
print('high confidence gesture: {}, confidence: {}'.format(self.prediction_decoder[pred_i],
predictions[pred_i]))
if capture_high_confidence_training_img:
self.capture_training_image(frame,
people_hand_rectangles,
*self.prediction_decoder[pred_i].split('_'),
subdir='autocap_high_confidence')
else:
print('low confidence gesture: {}, confidence: {}'.format(self.prediction_decoder[pred_i], predictions[pred_i]))
if capture_low_confidence_training_img:
self.capture_training_image(frame,
people_hand_rectangles,
*self.prediction_decoder[pred_i].split('_'),
subdir='autocap_low_confidence')
return self.prediction_decoder[pred_i], predictions[pred_i]
def extract_right_hand_from_images(self, frame, people_hand_rectangles):
for person_hands in people_hand_rectangles:
if len(person_hands) != 2:
return None
left_hand_rect = person_hands[0]
right_hand_rect = person_hands[1]
if right_hand_rect.x < 0.:
right_hand_rect.x = 0.
if int(right_hand_rect.x) < 0 or int(right_hand_rect.x + right_hand_rect.width) > frame.shape[1] or \
int(right_hand_rect.y) < 0 or int(right_hand_rect.y + right_hand_rect.height) > frame.shape[0]:
return None
cropped_img = frame[int(right_hand_rect.y):int(right_hand_rect.y+right_hand_rect.height),
int(right_hand_rect.x):int(right_hand_rect.x+right_hand_rect.width)]
return cropped_img
def capture_training_image(self, frame, people_hand_rectangles, hand, gesture, subdir='autocaptured'):
cur_time = time.time()
if cur_time < self.last_capture_timestamp + self.capture_frequency_sec:
return
cropped_img = self.extract_right_hand_from_images(frame, people_hand_rectangles)
if cropped_img is not None:
cv2.imshow('Cropped Training Image', cropped_img)
image_name = 'images/{}/{}_{}_{}.jpg'.format(
subdir,
hand,
gesture,
md5(np.ascontiguousarray(cropped_img)).hexdigest()[:6])
cv2.imwrite(image_name, cropped_img)
print('Captured training image {}'.format(image_name))
self.last_capture_timestamp = cur_time
def scan_image_files(self):
gesture_image_files = {}
for hand in ['rh']:
gesture_image_files[hand] = {}
for pose in self.gestures:
gesture_image_files[hand][pose] = {
'train_filenames': glob.glob('images/{}_{}_*.jpg'.format(hand, pose), recursive=False),
'val_filenames': glob.glob('images/validation/{}_{}_*.jpg'.format(hand, pose), recursive=False),
'test_filenames': glob.glob('images/test/{}_{}_*.jpg'.format(hand, pose), recursive=False),
'autocaptured_filenames': glob.glob('images/autocaptured/{}_{}_*.jpg'.format(hand, pose), recursive=False),
}
# This takes any files matching the pattern, hashes it, and renames using the convention.
bad_filenames = glob.glob('images/{}_{} (*).jpg'.format(hand, pose), recursive=False)
for file in bad_filenames:
img = keras.preprocessing.image.load_img(file, color_mode='rgb')
try:
os.rename(file, 'images\\{}_{}_{}.jpg'.format(hand, pose, md5(np.ascontiguousarray(img)).hexdigest()[:6]))
except FileExistsError:
print('Deleting {} since it is a duplicate'.format(file))
os.remove(file)
gesture_image_files[hand][pose]['all_filenames'] = gesture_image_files[hand][pose]['train_filenames'] + \
gesture_image_files[hand][pose]['val_filenames'] + \
gesture_image_files[hand][pose]['test_filenames'] + \
gesture_image_files[hand][pose]['autocaptured_filenames']
print('Found {} images ({} train, {} validation, {} test) for pose {}_{}'.format(
len(gesture_image_files[hand][pose]['all_filenames']),
len(gesture_image_files[hand][pose]['train_filenames']),
len(gesture_image_files[hand][pose]['val_filenames']),
len(gesture_image_files[hand][pose]['test_filenames']),
hand,
pose
))
return gesture_image_files
def rebalance_test_train_files(self):
self.gesture_image_files = self.scan_image_files()
rescan = False
for hand, poses in self.gesture_image_files.items():
for pose, groups in poses.items():
if len(groups['train_filenames']) <= self.min_validation_images:
print('Insufficient training files to create validation images for pose {}-{}'.format(hand, pose))
continue
# Check for sufficient validation images
if len(groups['val_filenames']) < self.min_validation_images:
new_validation_images = random.sample(groups['train_filenames'],
self.min_validation_images - len(groups['val_filenames']))
for file in new_validation_images:
shutil.move(file, 'images/validation/{}'.format(os.path.basename(file)))
rescan = True
if rescan:
self.gesture_image_files = self.scan_image_files()
rescan = False
for hand, poses in self.gesture_image_files.items():
for pose, groups in poses.items():
if len(groups['train_filenames']) <= self.min_validation_images + self.min_test_images:
print('Insufficient training files to create test images for pose {}-{}'.format(hand, pose))
continue
# Check for sufficient test images
if len(groups['test_filenames']) < self.min_test_images:
new_test_images = random.sample(groups['train_filenames'],
self.min_test_images - len(groups['test_filenames']))
for file in new_test_images:
shutil.move(file, 'images/test/{}'.format(os.path.basename(file)))
rescan = True
if rescan:
self.gesture_image_files = self.scan_image_files()
def train(self, hands, poses, show_training_images=False):
x = {
'train': [],
'val': [],
'test': []
}
y = {
'train': [],
'val': [],
'test': []
}
enc = OneHotEncoder(handle_unknown='ignore', sparse=False)
if self.gesture_image_files is None:
self.gesture_image_files = self.scan_image_files()
for hand in hands:
for pose in poses:
for t in ['train', 'val', 'test']:
for file in self.gesture_image_files[hand][pose][f'{t}_filenames']:
img = keras.preprocessing.image.load_img(file, color_mode='rgb')
img_array = keras.preprocessing.image.img_to_array(img)
img_array = keras.preprocessing.image.smart_resize(img_array, (40, 40), interpolation='bilinear')
x[t].append(img_array)
y[t].append('{}_{}'.format(hand, pose))
train_datagen = ImageDataGenerator(
#preprocessing_function=keras.applications.vgg16.preprocess_input,
width_shift_range=0.1,
height_shift_range=0.1,
fill_mode='nearest',
rescale=1./255
)
val_datagen = ImageDataGenerator(
#preprocessing_function=keras.applications.vgg16.preprocess_input,
width_shift_range=0.1,
height_shift_range=0.1,
fill_mode='nearest',
rescale=1./255
)
test_datagen = ImageDataGenerator(
#preprocessing_function=keras.applications.vgg16.preprocess_input,
rescale=1./255
)
for t in ['train', 'val', 'test']:
x[t] = np.array(x[t])
y[t] = np.array(y[t]).reshape(-1, 1)
y[t] = enc.fit_transform(y[t]) # Encode Y using OneHot
train_datagen.fit(x['train'], augment=True)
val_datagen.fit(x['val'], augment=True)
test_datagen.fit(x['test'])
if show_training_images:
for x_batch, y_batch in train_datagen.flow(x['train'], y['train'], batch_size=100):
for i in range(0, len(x_batch)):
subplot = pyplot.subplot(10, 10, i + 1)
subplot.set_title(enc.inverse_transform(y_batch[i].reshape(1, -1))[0][0])
pyplot.imshow(x_batch[i])
pyplot.subplots_adjust(left=0, right=1.0, bottom=0.025, top=0.975, wspace=0.155, hspace=0.470)
pyplot.get_current_fig_manager().window.showMaximized()
pyplot.show()
break
print('About to train with {} training images'.format(len(x['train'])))
learning_rate = 0.001
epocs = 90
batch_size = 14
model_name = 'hand-pose-right-{}-{}.h5'.format(datetime.date.today(), epocs)
train_gen = train_datagen.flow(x['train'], y['train'], batch_size=batch_size, shuffle=True)
val_gen = val_datagen.flow(x['val'], y['val'], batch_size=batch_size, shuffle=True)
test_gen = test_datagen.flow(x['test'], y['test'], batch_size=batch_size)
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=x['train'][0].shape))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Flatten())
model.add(Dense(units=128, activation='relu'))
model.add(Dropout(rate=0.5))
model.add(Dense(units=len(enc.categories_[0]), activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
metrics=['accuracy'])
print("Training on {} training images, {} validation images model {}".format(
train_gen.n,
val_gen.n,
model_name))
model.fit(train_gen,
epochs=epocs,
steps_per_epoch=train_gen.n // train_gen.batch_size,
validation_data=val_gen,
verbose=2)
# Save the model and the one-hot encoding so we can decode later
model.save(model_name)
np.save(f'{model_name}', enc.categories_[0])
predictions = model.predict(x=test_gen.x, verbose=2)
y_pred = np.argmax(predictions, axis=1)
cm_labels =
|
np.argmax(test_gen.y, axis=1)
|
numpy.argmax
|
import numpy as np
def potential(x,y, mu):
return -np.exp(-mu*np.sqrt(np.sum(np.square(y - x), axis = -1)))
def forces(x,y, mu):
return -mu*(y - x)/(np.finfo(float).eps+np.sqrt(np.sum(np.square(y - x), \
axis = -1, keepdims = True)))*np.exp(-mu*np.sqrt(np.sum(np.square(y - x), axis = -1, keepdims = True)))
# compute exponential potential
def potential_per(x,y, mu, L):
shift_x = np.reshape(np.array([L, 0.]), (1,1,2))
shift_y = np.reshape(np.array([0., L]), (1,1,2))
return potential(x,y, mu) + potential(x+shift_x,y, mu) + potential(x-shift_x,y, mu)\
+potential(x+shift_y,y, mu) + potential(x+shift_x+shift_y,y, mu) + potential(x-shift_x+shift_y,y, mu) \
+potential(x-shift_y,y, mu) + potential(x+shift_x-shift_y,y, mu) + potential(x-shift_x-shift_y,y, mu)
# compute exponential force
def forces_per(x, y, mu, L):
shift_x = np.reshape(np.array([L, 0.]), (1,1,2))
shift_y = np.reshape(np.array([0., L]), (1,1,2))
return forces(x,y, mu) + forces(x+shift_x,y, mu) + forces(x-shift_x,y, mu)\
+ forces(x+shift_y,y, mu) + forces(x+shift_x+shift_y,y, mu) + forces(x-shift_x+shift_y,y, mu)\
+ forces(x-shift_y,y, mu) + forces(x+shift_x-shift_y,y, mu) + forces(x-shift_x-shift_y,y, mu)
# 2D gaussian
def gaussian_2d(x, y, center, tau):
return (1/(2*np.pi*(tau**2)))*\
np.exp( -0.5*( np.square(x - center[0])
+ np.square(y - center[1]))/tau**2)
def computeDerPot2DPer(Nx, mu, Ls, x_center = [0.0, 0.0], nPointSmear = 5):
xGrid = np.linspace(0, Ls, Nx+1)[:-1]
kGrid = 2*np.pi*np.linspace(-(Nx//2), Nx//2, Nx)/Ls
# creating the 2D space and frequency grids
y_grid, x_grid = np.meshgrid(xGrid, xGrid)
ky_grid, kx_grid = np.meshgrid(kGrid, kGrid)
mult = 4*np.pi/( np.square(kx_grid)
+ np.square(ky_grid)
+ np.square(mu))
# here we smear the dirac delta
tau = nPointSmear*Ls/Nx
# periodic distance
x_diff = x_grid - x_center[0]
x_diff_per = x_diff - Ls*np.round(x_diff/Ls)
y_diff = y_grid - x_center[0]
y_diff_per = y_diff - Ls*np.round(y_diff/Ls)
# define the periodic gaussian
tau_gauss = gaussian_2d(x_diff_per,y_diff_per, [0.0, 0.0], tau)
# compute the fourier transform of the gaussian
xFFT = np.fft.fftshift(np.fft.fft2(tau_gauss))
fFFT = xFFT*mult
f = np.real(np.fft.ifft2(np.fft.ifftshift(fFFT)))
# compute force
dfdxFFT = 1.j*kx_grid*fFFT
dfdyFFT = 1.j*ky_grid*fFFT
dfdx = np.fft.ifft2(np.fft.ifftshift(dfdxFFT))
dfdy = np.fft.ifft2(np.fft.ifftshift(dfdyFFT))
return x_grid, y_grid, f, np.real(dfdx), np.real(dfdy)
# compute Yukawa data
def gen_data_yukawa_2d_mixed(n_cells, Np, mu1, mu2,
n_samples, min_delta, L_cell,
weight1, weight2):
points_array = np.zeros((n_samples, Np*n_cells**2, 2))
potential_array =
|
np.zeros((n_samples,1))
|
numpy.zeros
|
from __future__ import division, absolute_import, print_function
try:
# Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import functools
import ctypes
import os
import gc
import weakref
import pytest
from contextlib import contextmanager
from numpy.core.numeric import pickle
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import strchar, unicode
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
temppath, suppress_warnings
)
from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# https://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""
Allocate a new ndarray with aligned memory.
The ndarray is guaranteed *not* aligned to twice the requested alignment.
Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
dtype.alignment."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array alignment not supported")
return np.zeros(shape, dtype=dtype, order=order)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + 2*align + 1, np.uint8)
ptr = buf.__array_interface__['data'][0]
offset = ptr % align
if offset != 0:
offset = align - offset
if (ptr % (2*align)) == 0:
offset += align
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class TestFlags(object):
def setup(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_writeable_from_readonly(self):
# gh-9440 - make sure fromstring, from buffer on readonly buffers
# set writeable False
data = b'\x00' * 100
vals = np.frombuffer(data, 'B')
assert_raises(ValueError, vals.setflags, write=True)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_raises(ValueError, vals.setflags, write=True)
def test_writeable_from_buffer(self):
data = bytearray(b'\x00' * 100)
vals = np.frombuffer(data, 'B')
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies")
def test_writeable_pickle(self):
import pickle
# Small arrays will be copied without setting base.
# See condition for using PyArray_SetBaseObject in
# array_setstate.
a = np.arange(1000)
for v in range(pickle.HIGHEST_PROTOCOL):
vals = pickle.loads(pickle.dumps(a, v))
assert_(vals.flags.writeable)
assert_(isinstance(vals.base, bytes))
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags['C'], True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags.updateifcopy, False)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags['U'], False)
assert_equal(self.a.flags['UPDATEIFCOPY'], False)
assert_equal(self.a.flags.writebackifcopy, False)
assert_equal(self.a.flags['X'], False)
assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(object):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(object):
def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core._multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except Exception as e:
raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(object):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(object):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
def test_unicode_assignment(self):
# gh-5049
from numpy.core.numeric import set_string_function
@contextmanager
def inject_str(s):
""" replace ndarray.__str__ temporarily """
set_string_function(lambda x: s, repr=False)
try:
yield
finally:
set_string_function(None, repr=False)
a1d = np.array([u'test'])
a0d = np.array(u'done')
with inject_str(u'bad'):
a1d[0] = a0d # previously this would invoke __str__
assert_equal(a1d[0], u'done')
# this would crash for the same reason
np.array([np.array(u'\xe5\xe4\xf6')])
def test_stringlike_empty_list(self):
# gh-8902
u = np.array([u'done'])
b = np.array([b'done'])
class bad_sequence(object):
def __getitem__(self): pass
def __len__(self): raise RuntimeError
assert_raises(ValueError, operator.setitem, u, 0, [])
assert_raises(ValueError, operator.setitem, b, 0, [])
assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
def test_longdouble_assignment(self):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
for dtype in (np.longdouble, np.longcomplex):
# gh-8902
tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
# construction
tiny1d = np.array([tinya])
assert_equal(tiny1d[0], tinya)
# scalar = scalar
tiny1d[0] = tinyb
assert_equal(tiny1d[0], tinyb)
# 0d = scalar
tiny1d[0, ...] = tinya
assert_equal(tiny1d[0], tinya)
# 0d = 0d
tiny1d[0, ...] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
# scalar = 0d
tiny1d[0] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
arr = np.array([np.array(tinya)])
assert_equal(arr[0], tinya)
def test_cast_to_string(self):
# cast to str should do "str(scalar)", not "str(scalar.item())"
# Example: In python2, str(float) is truncated, so we want to avoid
# str(np.float64(...).item()) as this would incorrectly truncate.
a = np.zeros(1, dtype='S20')
a[:] = np.array(['1.12345678901234567890'], dtype='f8')
assert_equal(a[0], b"1.1234567890123457")
class TestDtypedescr(object):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
assert_(np.dtype('<i4') != np.dtype('>i4'))
assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
def test_structured_non_void(self):
fields = [('a', '<i2'), ('b', '<i2')]
dt_int = np.dtype(('i4', fields))
assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])")
# gh-9821
arr_int = np.zeros(4, dt_int)
assert_equal(repr(arr_int),
"array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
class TestZeroRank(object):
def setup(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
assert_equal(a[...], 0)
assert_equal(b[...], 'x')
assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
assert_equal(a[()], 0)
assert_equal(b[()], 'x')
assert_(type(a[()]) is a.dtype.type)
assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[0], b)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
assert_equal(a, 42)
b[...] = ''
assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
assert_equal(a, 42)
b[()] = ''
assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
assert_raises(IndexError, assign, a, 0, 42)
assert_raises(IndexError, assign, b, 0, '')
assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
assert_equal(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
assert_equal(x[()], 6)
def test_output(self):
x = np.array(2)
assert_raises(ValueError, np.add, x, [1], x)
def test_real_imag(self):
# contiguity checks are for gh-11245
x = np.array(1j)
xr = x.real
xi = x.imag
assert_equal(xr, np.array(0))
assert_(type(xr) is np.ndarray)
assert_equal(xr.flags.contiguous, True)
assert_equal(xr.flags.f_contiguous, True)
assert_equal(xi, np.array(1))
assert_(type(xi) is np.ndarray)
assert_equal(xi.flags.contiguous, True)
assert_equal(xi.flags.f_contiguous, True)
class TestScalarIndexing(object):
def setup(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
assert_equal(a[...], 0)
assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
assert_equal(a[()], 0)
assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(object):
"""
Test the np.array constructor
"""
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@pytest.mark.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, object)
assert_equal(np.array([4, 2**80, 4]).dtype, object)
assert_equal(np.array([2**80, 4]).dtype, object)
assert_equal(np.array([2**80] * 3).dtype, object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
assert_equal(np.array([2**80, long(4)]).dtype, object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
assert_raises(ValueError, np.array, C()) # segfault?
def test_failed_len_sequence(self):
# gh-7393
class A(object):
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
def test_jagged_ndim_object(self):
# Lists of mismatching depths are treated as object arrays
a = np.array([[1], 2, 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([1, [2], 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([1, 2, [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
def test_jagged_shape_object(self):
# The jagged dimension of a list is turned into an object array
a = np.array([[1, 1], [2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([[1], [2, 2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([[1], [2], [3, 3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
class TestStructured(object):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can change byte order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
# check for no uninitialized memory due to viewing S0 array
assert_equal(xx[:].dtype, xx.dtype)
assert_array_equal(eval(repr(xx), dict(array=np.array)), xx)
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
def test_assignment(self):
def testassign(arr, v):
c = arr.copy()
c[0] = v # assign using setitem
c[1:] = v # assign using "dtype_transfer" code paths
return c
dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])
arr = np.ones(2, dt)
v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])
v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])
v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])
v4 = np.array([(2,)], dtype=[('bar', 'i8')])
v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])
w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})
ans = np.array([(2,3),(2,3)], dtype=dt)
assert_equal(testassign(arr, v1), ans)
assert_equal(testassign(arr, v2), ans)
assert_equal(testassign(arr, v3), ans)
assert_raises(ValueError, lambda: testassign(arr, v4))
assert_equal(testassign(arr, v5), ans)
w[:] = 4
assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))
# test field-reordering, assignment by position, and self-assignment
a = np.array([(1,2,3)],
dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])
a[['foo', 'bar']] = a[['bar', 'foo']]
assert_equal(a[0].item(), (2,1,3))
# test that this works even for 'simple_unaligned' structs
# (ie, that PyArray_EquivTypes cares about field order too)
a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
a[['a', 'b']] = a[['b', 'a']]
assert_equal(a[0].item(), (2,1))
def test_structuredscalar_indexing(self):
# test gh-7262
x = np.empty(shape=1, dtype="(2)3S,(2)3U")
assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]])
assert_equal(x[0], x[0][()])
def test_multiindex_titles(self):
a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
assert_raises(KeyError, lambda : a[['a','c']])
assert_raises(KeyError, lambda : a[['a','a']])
assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated
a[['b','c']] # no exception
class TestBool(object):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
assert_(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
assert_(a1 is b1)
assert_(np.array([True])[0] is a1)
assert_(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=bool)
c = builtins.sum(l)
assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
assert_equal(np.count_nonzero(a), c)
av *= 4
assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@pytest.mark.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=bool)[o+1:]
a[:o] = True
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=bool)[o+1:]
a[:o] = False
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
def _test_cast_from_flexible(self, dtype):
# empty string -> false
for n in range(3):
v = np.array(b'', (dtype, n))
assert_equal(bool(v), False)
assert_equal(bool(v[()]), False)
assert_equal(v.astype(bool), False)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.False_)
# anything else -> true
for n in range(1, 4):
for val in [b'a', b'0', b' ']:
v = np.array(val, (dtype, n))
assert_equal(bool(v), True)
assert_equal(bool(v[()]), True)
assert_equal(v.astype(bool), True)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.True_)
def test_cast_from_void(self):
self._test_cast_from_flexible(np.void)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_unicode(self):
self._test_cast_from_flexible(np.unicode_)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_bytes(self):
self._test_cast_from_flexible(np.bytes_)
class TestZeroSizeFlexible(object):
@staticmethod
def _zeros(shape, dtype=str):
dtype = np.dtype(dtype)
if dtype == np.void:
return np.zeros(shape, dtype=(dtype, 0))
# not constructable directly
dtype = np.dtype([('x', dtype, 0)])
return np.zeros(shape, dtype=dtype)['x']
def test_create(self):
zs = self._zeros(10, bytes)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, np.void)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, unicode)
assert_equal(zs.itemsize, 0)
def _test_sort_partition(self, name, kinds, **kwargs):
# Previously, these would all hang
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
sort_method = getattr(zs, name)
sort_func = getattr(np, name)
for kind in kinds:
sort_method(kind=kind, **kwargs)
sort_func(zs, kind=kind, **kwargs)
def test_sort(self):
self._test_sort_partition('sort', kinds='qhm')
def test_argsort(self):
self._test_sort_partition('argsort', kinds='qhm')
def test_partition(self):
self._test_sort_partition('partition', kinds=['introselect'], kth=2)
def test_argpartition(self):
self._test_sort_partition('argpartition', kinds=['introselect'], kth=2)
def test_resize(self):
# previously an error
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
zs.resize(25)
zs.resize((10, 10))
def test_view(self):
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
# viewing as itself should be allowed
assert_equal(zs.view(dt).dtype, np.dtype(dt))
# viewing as any non-empty type gives an empty result
assert_equal(zs.view((dt, 1)).shape, (0,))
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
p = pickle.dumps(zs, protocol=proto)
zs2 = pickle.loads(p)
assert_equal(zs.dtype, zs2.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_pickle_with_buffercallback(self):
array = np.arange(10)
buffers = []
bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
protocol=5)
array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
# when using pickle protocol 5 with buffer callbacks,
# array_from_buffer is reconstructed from a buffer holding a view
# to the initial array's data, so modifying an element in array
# should modify it in array_from_buffer too.
array[0] = -1
assert array_from_buffer[0] == -1, array_from_buffer[0]
class TestMethods(object):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
oned = np.ones(1)
# gh-12031, caused SEGFAULT
assert_raises(TypeError, oned.choose,np.void(0), [oned])
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, a.prod)
assert_raises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
assert_raises(ValueError, lambda: a.transpose(0))
assert_raises(ValueError, lambda: a.transpose(0, 0))
assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
# test generic class with bogus ordering,
# should not segfault.
class Boom(object):
def __lt__(self, other):
return True
a = np.array([Boom()]*100, dtype=object)
for kind in ['q', 'm', 'h']:
msg = "bogus comparison object sort, kind=%s" % kind
c.sort(kind=kind)
def test_void_sort(self):
# gh-8210 - previously segfaulted
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view('V4')
arr[::-1].sort()
dt = np.dtype([('val', 'i4', (1,))])
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view(dt)
arr[::-1].sort()
def test_sort_raises(self):
#gh-9404
arr = np.array([0, datetime.now(), 1], dtype=object)
for kind in ['q', 'm', 'h']:
assert_raises(TypeError, arr.sort, kind=kind)
#gh-3879
class Raiser(object):
def raises_anything(*args, **kwargs):
raise TypeError("SOMETHING ERRORED")
__eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
np.random.shuffle(arr)
for kind in ['q', 'm', 'h']:
assert_raises(TypeError, arr.sort, kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
assert_raises_regex(ValueError, 'duplicate',
lambda: r.sort(order=['id', 'id']))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'l')
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'l')
assert_array_equal(b, np.zeros(0, dtype=np.intp))
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'l', s[:0])
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'l', s)
assert_array_equal(b, np.zeros(0, dtype=np.intp))
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
d = np.arange(10)
assert_raises(TypeError, d.argpartition, 9.)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.argpartition, 9.)
def test_partition_integer(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(TypeError, d.partition, 9.)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
assert_equal(np.partition(d, 5, kind=k)[5], 5)
assert_equal(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
assert_equal(np.partition(d, 6, kind=k)[6], 6)
assert_equal(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
assert_equal(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(np.AxisError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(np.AxisError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(np.AxisError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(np.AxisError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = assert_
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
assert_equal(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
@pytest.mark.parametrize('func', (np.dot, np.matmul))
def test_arr_mult(self, func):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(func(eaf, eaf), eaf)
assert_equal(func(eaf.T, eaf), eaf)
assert_equal(func(eaf, eaf.T), eaf)
assert_equal(func(eaf.T, eaf.T), eaf)
assert_equal(func(eaf.T.copy(), eaf), eaf)
assert_equal(func(eaf, eaf.T.copy()), eaf)
assert_equal(func(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(func(ebf, ebf), eaf)
assert_equal(func(ebf.T, ebf), eaf)
assert_equal(func(ebf, ebf.T), eaf)
assert_equal(func(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
func(edf[::-1, :], edf.T),
func(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
func(edf[:, ::-1], edf.T),
func(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
func(edf, edf[::-1, :].T),
func(edf, edf[::-1, :].T.copy())
)
assert_equal(
func(edf, edf[:, ::-1].T),
func(edf, edf[:, ::-1].T.copy())
)
assert_equal(
func(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
func(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
func(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
func(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(func(edf, edf.T), eddtf)
assert_equal(func(edf.T, edf), edtdf)
@pytest.mark.parametrize('func', (np.dot, np.matmul))
@pytest.mark.parametrize('dtype', 'ifdFD')
def test_no_dgemv(self, func, dtype):
# check vector arg for contiguous before gemv
# gh-12156
a = np.arange(8.0, dtype=dtype).reshape(2, 4)
b = np.broadcast_to(1., (4, 1))
ret1 = func(a, b)
ret2 = func(a, b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T)
assert_equal(ret1, ret2)
# check for unaligned data
dt = np.dtype(dtype)
a = np.zeros(8 * dt.itemsize // 2 + 1, dtype='int16')[1:].view(dtype)
a = a.reshape(2, 4)
b = a[0]
# make sure it is not aligned
assert_(a.__array_interface__['data'][0] % dt.itemsize != 0)
ret1 = func(a, b)
ret2 = func(a.copy(), b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T.copy())
assert_equal(ret1, ret2)
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_dot_out_mem_overlap(self):
np.random.seed(1)
# Test BLAS and non-BLAS code paths, including all dtypes
# that dot() supports
dtypes = [np.dtype(code) for code in np.typecodes['All']
if code not in 'USVM']
for dtype in dtypes:
a = np.random.rand(3, 3).astype(dtype)
# Valid dot() output arrays must be aligned
b = _aligned_zeros((3, 3), dtype=dtype)
b[...] = np.random.rand(3, 3)
y = np.dot(a, b)
x = np.dot(a, b, out=b)
assert_equal(x, y, err_msg=repr(dtype))
# Check invalid output array
assert_raises(ValueError, np.dot, a, b, out=b[::2])
assert_raises(ValueError, np.dot, a, b, out=b.T)
def test_dot_matmul_out(self):
# gh-9641
class Sub(np.ndarray):
pass
a = np.ones((2, 2)).view(Sub)
b = np.ones((2, 2)).view(Sub)
out = np.ones((2, 2))
# make sure out can be any ndarray (not only subclass of inputs)
np.dot(a, b, out=out)
np.matmul(a, b, out=out)
def test_dot_matmul_inner_array_casting_fails(self):
class A(object):
def __array__(self, *args, **kwargs):
raise NotImplementedError
# Don't override the error from calling __array__()
assert_raises(NotImplementedError, np.dot, A(), A())
assert_raises(NotImplementedError, np.matmul, A(), A())
assert_raises(NotImplementedError, np.inner, A(), A())
def test_matmul_out(self):
# overlapping memory
a = np.arange(18).reshape(2, 3, 3)
b = np.matmul(a, a)
c = np.matmul(a, a, out=a)
assert_(c is a)
assert_equal(c, b)
a = np.arange(18).reshape(2, 3, 3)
c = np.matmul(a, a, out=a[::-1, ...])
assert_(c.base is a.base)
assert_equal(c, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
assert_raises(np.AxisError, a.diagonal, axis1=0, axis2=5)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=0)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=5)
assert_raises(ValueError, a.diagonal, axis1=1, axis2=1)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_size_zero_memleak(self):
# Regression test for issue 9615
# Exercises a special-case code path for dot products of length
# zero in cblasfuncs (making it is specific to floating dtypes).
a = np.array([], dtype=np.float64)
x = np.array(2.0)
for _ in range(100):
np.dot(a, a, out=x)
if HAS_REFCOUNT:
assert_(sys.getrefcount(x) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert_(isinstance(t, MyArray))
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(np.AxisError, a.swapaxes, -5, 0)
assert_raises(np.AxisError, a.swapaxes, 4, 0)
assert_raises(np.AxisError, a.swapaxes, 0, -5)
assert_raises(np.AxisError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestCequenceMethods(object):
def test_array_contains(self):
assert_(4.0 in np.arange(16.).reshape(4,4))
assert_(20.0 not in np.arange(16.).reshape(4,4))
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
# ndarray.__rop__ always calls ufunc
# ndarray.__iop__ always calls ufunc
# ndarray.__op__, __rop__:
# - defer if other has __array_ufunc__ and it is None
# or other is not a subclass and has higher array priority
# - else, call ufunc
def test_ufunc_binop_interaction(self):
# Python method name (without underscores)
# -> (numpy ufunc, has_in_place_version, preferred_dtype)
ops = {
'add': (np.add, True, float),
'sub': (np.subtract, True, float),
'mul': (np.multiply, True, float),
'truediv': (np.true_divide, True, float),
'floordiv': (np.floor_divide, True, float),
'mod': (np.remainder, True, float),
'divmod': (np.divmod, False, float),
'pow': (np.power, True, int),
'lshift': (np.left_shift, True, int),
'rshift': (np.right_shift, True, int),
'and': (np.bitwise_and, True, int),
'xor': (np.bitwise_xor, True, int),
'or': (np.bitwise_or, True, int),
# 'ge': (np.less_equal, False),
# 'gt': (np.less, False),
# 'le': (np.greater_equal, False),
# 'lt': (np.greater, False),
# 'eq': (np.equal, False),
# 'ne': (np.not_equal, False),
}
if sys.version_info >= (3, 5):
ops['matmul'] = (np.matmul, False, float)
class Coerced(Exception):
pass
def array_impl(self):
raise Coerced
def op_impl(self, other):
return "forward"
def rop_impl(self, other):
return "reverse"
def iop_impl(self, other):
return "in-place"
def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
return ("__array_ufunc__", ufunc, method, args, kwargs)
# Create an object with the given base, in the given module, with a
# bunch of placeholder __op__ methods, and optionally a
# __array_ufunc__ and __array_priority__.
def make_obj(base, array_priority=False, array_ufunc=False,
alleged_module="__main__"):
class_namespace = {"__array__": array_impl}
if array_priority is not False:
class_namespace["__array_priority__"] = array_priority
for op in ops:
class_namespace["__{0}__".format(op)] = op_impl
class_namespace["__r{0}__".format(op)] = rop_impl
class_namespace["__i{0}__".format(op)] = iop_impl
if array_ufunc is not False:
class_namespace["__array_ufunc__"] = array_ufunc
eval_namespace = {"base": base,
"class_namespace": class_namespace,
"__name__": alleged_module,
}
MyType = eval("type('MyType', (base,), class_namespace)",
eval_namespace)
if issubclass(MyType, np.ndarray):
# Use this range to avoid special case weirdnesses around
# divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
return np.arange(3, 7).reshape(2, 2).view(MyType)
else:
return MyType()
def check(obj, binop_override_expected, ufunc_override_expected,
inplace_override_expected, check_scalar=True):
for op, (ufunc, has_inplace, dtype) in ops.items():
err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
% (op, ufunc, has_inplace, dtype))
check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
if check_scalar:
check_objs.append(check_objs[0][0])
for arr in check_objs:
arr_method = getattr(arr, "__{0}__".format(op))
def first_out_arg(result):
if op == "divmod":
assert_(isinstance(result, tuple))
return result[0]
else:
return result
# arr __op__ obj
if binop_override_expected:
assert_equal(arr_method(obj), NotImplemented, err_msg)
elif ufunc_override_expected:
assert_equal(arr_method(obj)[0], "__array_ufunc__",
err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_method(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_method, obj, err_msg=err_msg)
# obj __op__ arr
arr_rmethod = getattr(arr, "__r{0}__".format(op))
if ufunc_override_expected:
res = arr_rmethod(obj)
assert_equal(res[0], "__array_ufunc__",
err_msg=err_msg)
assert_equal(res[1], ufunc, err_msg=err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_rmethod(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
# __array_ufunc__ = "asdf" creates a TypeError
assert_raises((TypeError, Coerced),
arr_rmethod, obj, err_msg=err_msg)
# arr __iop__ obj
# array scalars don't have in-place operators
if has_inplace and isinstance(arr, np.ndarray):
arr_imethod = getattr(arr, "__i{0}__".format(op))
if inplace_override_expected:
assert_equal(arr_method(obj), NotImplemented,
err_msg=err_msg)
elif ufunc_override_expected:
res = arr_imethod(obj)
assert_equal(res[0], "__array_ufunc__", err_msg)
assert_equal(res[1], ufunc, err_msg)
assert_(type(res[-1]["out"]) is tuple, err_msg)
assert_(res[-1]["out"][0] is arr, err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
assert_(arr_imethod(obj) is arr, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_imethod, obj,
err_msg=err_msg)
op_fn = getattr(operator, op, None)
if op_fn is None:
op_fn = getattr(operator, op + "_", None)
if op_fn is None:
op_fn = getattr(builtins, op)
assert_equal(op_fn(obj, arr), "forward", err_msg)
if not isinstance(obj, np.ndarray):
if binop_override_expected:
assert_equal(op_fn(arr, obj), "reverse", err_msg)
elif ufunc_override_expected:
assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
err_msg)
if ufunc_override_expected:
assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
err_msg)
# No array priority, no array_ufunc -> nothing called
check(make_obj(object), False, False, False)
# Negative array priority, no array_ufunc -> nothing called
# (has to be very negative, because scalar priority is -1000000.0)
check(make_obj(object, array_priority=-2**30), False, False, False)
# Positive array priority, no array_ufunc -> binops and iops only
check(make_obj(object, array_priority=1), True, False, True)
# ndarray ignores array_priority for ndarray subclasses
check(make_obj(np.ndarray, array_priority=1), False, False, False,
check_scalar=False)
# Positive array_priority and array_ufunc -> array_ufunc only
check(make_obj(object, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
check(make_obj(np.ndarray, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
# array_ufunc set to None -> defer binops only
check(make_obj(object, array_ufunc=None), True, False, False)
check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
check_scalar=False)
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_array_ufunc_index(self):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
# This also checks implicitly that 'out' is always a tuple.
class CheckIndex(object):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
for i, a in enumerate(inputs):
if a is self:
return i
# calls below mean we must be in an output.
for j, a in enumerate(kw['out']):
if a is self:
return (j,)
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), (0,))
assert_equal(np.sin(dummy, out=a), (0,))
assert_equal(np.sin(dummy, out=(a,)), (0,))
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), (0,))
assert_equal(np.modf(dummy, None, a), (1,))
assert_equal(np.modf(dummy, dummy, a), (1,))
assert_equal(np.modf(dummy, out=(a, None)), (0,))
assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
assert_equal(np.modf(dummy, out=(None, a)), (1,))
assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
assert_equal(np.modf(a, out=(dummy, a)), 0)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(np.modf(dummy, out=a), (0,))
assert_(w[0].category is DeprecationWarning)
assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), (0,))
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), (0,))
assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# regression test for github bug 4753
class OutClass(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][0][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
def test_pow_override_with_errors(self):
# regression test for gh-9112
class PowerOnly(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if ufunc is not np.power:
raise NotImplementedError
return "POWER!"
# explicit cast to float, to ensure the fast power path is taken.
a = np.array(5., dtype=np.float64).view(PowerOnly)
assert_equal(a ** 2.5, "POWER!")
with assert_raises(NotImplementedError):
a ** 0.5
with assert_raises(NotImplementedError):
a ** 0
with assert_raises(NotImplementedError):
a ** 1
with assert_raises(NotImplementedError):
a ** -1
with assert_raises(NotImplementedError):
a ** 2
def test_pow_array_object_dtype(self):
# test pow on arrays of object dtype
class SomeClass(object):
def __init__(self, num=None):
self.num = num
# want to ensure a fast pow path is not taken
def __mul__(self, other):
raise AssertionError('__mul__ should not be called')
def __div__(self, other):
raise AssertionError('__div__ should not be called')
def __pow__(self, exp):
return SomeClass(num=self.num ** exp)
def __eq__(self, other):
if isinstance(other, SomeClass):
return self.num == other.num
__rpow__ = __pow__
def pow_for(exp, arr):
return np.array([x ** exp for x in arr])
obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
def test_pos_array_ufunc_override(self):
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*[i.view(np.ndarray) for
i in inputs], **kwargs)
tst = np.array('foo').view(A)
with assert_raises(TypeError):
+tst
class TestTemporaryElide(object):
# elision is only triggered on relatively large arrays
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide
d = np.ones(100000)
orig, res = incref_elide(d)
d + d
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwritten
l = [1, 1, 1, 1, np.ones(100000)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(100000))
assert_array_equal(res, l[4] + l[4])
def test_temporary_with_cast(self):
# check that we don't elide into a temporary which would need casting
d = np.ones(200000, dtype=np.int64)
assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
r = ((d + d) / 2)
assert_equal(r.dtype, np.dtype('f8'))
r = np.true_divide((d + d), 2)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) / 2.)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) // 2)
assert_equal(r.dtype, np.dtype(np.int64))
# commutative elision into the astype result
f = np.ones(100000, dtype=np.float32)
assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
# no elision into lower type
d = f.astype(np.float64)
assert_equal(((f + f) + d).dtype, d.dtype)
l = np.ones(100000, dtype=np.longdouble)
assert_equal(((d + d) + l).dtype, l.dtype)
# test unary abs with different output dtype
for dt in (np.complex64, np.complex128, np.clongdouble):
c = np.ones(100000, dtype=dt)
r = abs(c * 2.0)
assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
def test_elide_broadcast(self):
# test no elision on broadcast to higher dimension
# only triggers elision code path in debug mode as triggering it in
# normal mode needs 256kb large matching dimension, so a lot of memory
d = np.ones((2000, 1), dtype=int)
b = np.ones((2000), dtype=bool)
r = (1 - d) + b
assert_equal(r, 1)
assert_equal(r.shape, (2000, 2000))
def test_elide_scalar(self):
# check inplace op does not create ndarray from scalars
a = np.bool_()
assert_(type(~(a & a)) is np.bool_)
def test_elide_scalar_readonly(self):
# The imaginary part of a real array is readonly. This needs to go
# through fast_scalar_power which is only called for powers of
# +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
# elision which can be gotten for the imaginary part of a real
# array. Should not error.
a = np.empty(100000, dtype=np.float64)
a.imag ** 2
def test_elide_readonly(self):
# don't try to elide readonly temporaries
r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
assert_equal(r, 0)
def test_elide_updateifcopy(self):
a = np.ones(2**20)[::2]
b = a.flat.__array__() + 1
del b
assert_equal(a, 1)
class TestCAPI(object):
def test_IsPythonScalar(self):
from numpy.core._multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(object):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
assert_(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
assert_(isinstance(x[0], int))
assert_(type(x[0, ...]) is np.ndarray)
class TestPickling(object):
def test_highest_available_pickle_protocol(self):
try:
import pickle5
except ImportError:
pickle5 = None
if sys.version_info[:2] >= (3, 8) or pickle5 is not None:
assert pickle.HIGHEST_PROTOCOL >= 5
else:
assert pickle.HIGHEST_PROTOCOL < 5
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
reason=('this tests the error messages when trying to'
'protocol 5 although it is not available'))
def test_correct_protocol5_error_message(self):
array = np.arange(10)
if sys.version_info[:2] in ((3, 6), (3, 7)):
# For the specific case of python3.6 and 3.7, raise a clear import
# error about the pickle5 backport when trying to use protocol=5
# without the pickle5 package
with pytest.raises(ImportError):
array.__reduce_ex__(5)
elif sys.version_info[:2] < (3, 6):
# when calling __reduce_ex__ explicitly with protocol=5 on python
# raise a ValueError saying that protocol 5 is not available for
# this python version
with pytest.raises(ValueError):
array.__reduce_ex__(5)
def test_record_array_with_object_dtype(self):
my_object = object()
arr_with_object = np.array(
[(my_object, 1, 2.0)],
dtype=[('a', object), ('b', int), ('c', float)])
arr_without_object = np.array(
[('xxx', 1, 2.0)],
dtype=[('a', str), ('b', int), ('c', float)])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_arr_with_object = pickle.loads(
pickle.dumps(arr_with_object, protocol=proto))
depickled_arr_without_object = pickle.loads(
pickle.dumps(arr_without_object, protocol=proto))
assert_equal(arr_with_object.dtype,
depickled_arr_with_object.dtype)
assert_equal(arr_without_object.dtype,
depickled_arr_without_object.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_f_contiguous_array(self):
f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')
buffers = []
# When using pickle protocol 5, Fortran-contiguous arrays can be
# serialized using out-of-band buffers
bytes_string = pickle.dumps(f_contiguous_array, protocol=5,
buffer_callback=buffers.append)
assert len(buffers) > 0
depickled_f_contiguous_array = pickle.loads(bytes_string,
buffers=buffers)
assert_equal(f_contiguous_array, depickled_f_contiguous_array)
def test_non_contiguous_array(self):
non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]
assert not non_contiguous_array.flags.c_contiguous
assert not non_contiguous_array.flags.f_contiguous
# make sure non-contiguous arrays can be pickled-depickled
# using any protocol
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_non_contiguous_array = pickle.loads(
pickle.dumps(non_contiguous_array, protocol=proto))
assert_equal(non_contiguous_array, depickled_non_contiguous_array)
def test_roundtrip(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
refs = [weakref.ref(a) for a in DATA]
for a in DATA:
assert_equal(
a, pickle.loads(pickle.dumps(a, protocol=proto)),
err_msg="%r" % a)
del a, DATA, carray
gc.collect()
# check for reference leaks (gh-12793)
for ref in refs:
assert ref() is None
def _loads(self, obj):
if sys.version_info[0] >= 3:
return pickle.loads(obj, encoding='latin1')
else:
return pickle.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version0_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version0_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version1_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version1_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
def test_subarray_int_shape(self):
s = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(s)
assert_equal(a, p)
class TestFancyIndexing(object):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(object):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([u"This", u"is", u"example"])
g2 = np.array([u"This", u"was", u"example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
max_val = np.max(arr)
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmax(), 0)
a[3] = 10
assert_equal(a.argmax(), 3)
a[1] = 30
assert_equal(a.argmax(), 1)
class TestArgmin(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
min_val = np.min(arr)
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2, 3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmin(), 0)
a[3] = 30
assert_equal(a.argmin(), 3)
a[1] = 10
assert_equal(a.argmin(), 1)
class TestMinMax(object):
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
assert_raises(np.AxisError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(object):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(object):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress(object):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], T(val))
assert_equal(x.dtype, T)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T), T, mask, val)
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T))
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(object):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setup(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def teardown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(IOError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(IOError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.frombuffer(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.frombuffer(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unseekable_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
raise IOError('Can not tell or seek')
with io.open(self.filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
assert_raises(IOError, np.fromfile, f, dtype=self.dtype)
def test_io_open_unbuffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
with io.open(self.filename, 'rb', buffering=0) as f:
y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_largish_file(self):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(self.filename)
assert_equal(os.path.getsize(self.filename), d.nbytes)
assert_array_equal(d, np.fromfile(self.filename))
# check offset
with open(self.filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
# check append mode (gh-8329)
open(self.filename, "w").close() # delete file contents
with open(self.filename, "ab") as f:
d.tofile(f)
assert_array_equal(d, np.fromfile(self.filename))
with open(self.filename, "ab") as f:
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
def test_io_open_buffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
with io.open(self.filename, 'rb', buffering=-1) as f:
y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_load_object_array_fromfile(self):
# gh-12300
with open(self.filename, 'w') as f:
# Ensure we have a file with consistent contents
pass
with open(self.filename, 'rb') as f:
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, f, dtype=object)
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, self.filename, dtype=object)
def _check_from(self, s, value, **kw):
if 'sep' not in kw:
y = np.frombuffer(s, **kw)
else:
y = np.fromstring(s, **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(s)
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
b"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@pytest.mark.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = b'1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(s)
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
with CommaDecimalPointLocale():
self.test_numbers()
self.test_nan()
self.test_inf()
self.test_counted_string()
self.test_ascii()
self.test_malformed()
self.test_tofile_sep()
self.test_tofile_format()
class TestFromBuffer(object):
@pytest.mark.parametrize('byteorder', ['<', '>'])
@pytest.mark.parametrize('dtype', [float, int, complex])
def test_basic(self, byteorder, dtype):
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7)) * 5).astype(dt)
buf = x.tobytes()
assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
def test_empty(self):
assert_array_equal(np.frombuffer(b''), np.array([]))
class TestFlat(object):
def setup(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
# for 1.14 all are set to non-writeable on the way to replacing the
# UPDATEIFCOPY array returned for non-contiguous arrays.
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is False)
with assert_warns(DeprecationWarning):
assert_(c.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
assert_(d.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
assert_(e.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
# UPDATEIFCOPY is removed.
assert_(f.flags.updateifcopy is False)
assert_(c.flags.writebackifcopy is False)
assert_(d.flags.writebackifcopy is False)
assert_(e.flags.writebackifcopy is False)
assert_(f.flags.writebackifcopy is False)
class TestResize(object):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
x.resize((5, 5), refcheck=False)
else:
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, refcheck=False)
else:
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_0d_shape(self):
# to it multiple times to test it does not break alloc cache gh-9216
for i in range(10):
x = np.empty((1,))
x.resize(())
assert_equal(x.shape, ())
assert_equal(x.size, 1)
x = np.empty(())
x.resize((1,))
assert_equal(x.shape, (1,))
assert_equal(x.size, 1)
def test_invalid_arguments(self):
assert_raises(TypeError, np.eye(3).resize, 'hi')
assert_raises(ValueError, np.eye(3).resize, -1)
assert_raises(TypeError, np.eye(3).resize, order=1)
assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1, refcheck=False)
else:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
x.resize(2, 3, 3, refcheck=False)
else:
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
if IS_PYPY:
a.resize(15, refcheck=False)
else:
a.resize(15,)
assert_equal(a.shape, (15,))
|
assert_array_equal(a['k'][-5:], 0)
|
numpy.testing.assert_array_equal
|
"""Unit tests for the one.alf.io module"""
import logging
import unittest
import tempfile
from pathlib import Path
import shutil
import json
import numpy as np
import pandas as pd
from iblutil.io import jsonable
import one.alf.io as alfio
from one.alf.exceptions import ALFObjectNotFound
from one.alf.spec import FILE_SPEC, regex
class TestAlfBunch(unittest.TestCase):
def test_to_dataframe_scalars(self):
simple = alfio.AlfBunch({'titi': np.random.rand(500), 'toto': np.random.rand(500)})
df = simple.to_df()
self.assertTrue(np.all(df['titi'].values == simple.titi))
self.assertTrue(np.all(df['toto'].values == simple.toto))
self.assertTrue(len(df.columns) == 2)
simple['titi'] = np.random.rand(50)
with self.assertRaises(ValueError):
simple.to_df()
simple['toto'] = np.random.rand(50, 10, 5)
with self.assertLogs(logging.getLogger('one.alf.io'), logging.WARNING):
self.assertTrue('toto' not in simple.to_df().columns)
def test_to_dataframe_vectors(self):
vectors = alfio.AlfBunch({'titi': np.random.rand(500, 1),
'toto': np.random.rand(500),
'tata': np.random.rand(500, 12)})
df = vectors.to_df()
self.assertTrue(np.all(df['titi'].values == vectors.titi[:, 0]))
self.assertTrue(np.all(df['toto'].values == vectors.toto))
self.assertTrue(
|
np.all(df['tata_0'].values == vectors.tata[:, 0])
|
numpy.all
|
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
try:
from vafnet import VAFnet, Visualizer
except:
print("vafnet is not installed yet! Trying to call directly from source...")
from sys import path
path.append("../../")
from vafnet import VAFnet, Visualizer
print("done.")
############################ Data Setting ############################
DATA_PATH = 'parkinsons_updrs.data'
BEST_MODEL_PATH = 'best_model.pkl'
train_prop, nfolds = 0.9, 5
def load_data(train_prop=train_prop, nfolds=nfolds):
import pandas as pd
data = pd.DataFrame.from_csv(path=DATA_PATH, header=0, index_col=0)
data = data.as_matrix().astype(np.float32)
X, y = data[:, :-1], data[:, -1]
y = y[:, None]
ndata = y.shape[0]
npartition = ndata//nfolds
ntrain = int(train_prop*npartition)
train_test_set = []
for fold in range(nfolds):
train_inds = npr.choice(range(npartition), ntrain, replace=False)
test_inds = np.setdiff1d(range(npartition), train_inds)
train_inds += fold*npartition
test_inds += fold*npartition
X_train, y_train = X[train_inds].copy(), y[train_inds].copy()
X_test, y_test = X[test_inds].copy(), y[test_inds].copy()
train_test_set.append([X_train.T, y_train.T, X_test.T, y_test.T])
return train_test_set
############################ Model Setting ############################
archits = [[60, 40, 20]]
plot_metric = 'rmse'
select_params_metric = 'obj'
select_model_metric = 'rmse'
visualizer = None
# fig = plt.figure(figsize=(8, 6), facecolor='white')
# visualizer = Visualizer(fig, plot_metric)
algo = {
'algo': 'adadelta',
'algo_params': {
'learning_rate':1e-3,
'rho':0.75,
}
}
opt_params = {
'obj': select_params_metric,
'algo': algo,
'cv_nfolds': 4,
'cvrg_tol': 1e-3,
'max_cvrg': 50,
'max_iter': 1000
}
evals = {
'score': [
'Model Selection Score',
{"("+",".join([str(D) for D in archit])+")":
[np.inf, np.inf] for archit in archits}
],
'obj': [
'Params Optimization Objective',
{"("+",".join([str(D) for D in archit])+")":
[np.inf, np.inf] for archit in archits}
],
'mse': [
'Mean Square Error',
{"("+",".join([str(D) for D in archit])+")":
[np.inf, np.inf] for archit in archits}
],
'rmse': [
'Root Mean Square Error',
{"("+",".join([str(D) for D in archit])+")":
[np.inf, np.inf] for archit in archits}
],
'nmse': [
'Normalized Mean Square Error',
{"("+",".join([str(D) for D in archit])+")":
[np.inf, np.inf] for archit in archits}
],
'mnlp': [
'Mean Negative Log Probability',
{"("+",".join([str(D) for D in archit])+")":
[np.inf, np.inf] for archit in archits}
],
'time': [
'Training Time(s)',
{"("+",".join([str(D) for D in archit])+")":
[np.inf, np.inf] for archit in archits}
],
}
############################ General Methods ############################
def debug(local):
locals().update(local)
print('Debug Commands:')
while True:
cmd = input('>>> ')
if(cmd == ''):
break
try:
exec(cmd)
except Exception as e:
import traceback
traceback.print_tb(e.__traceback__)
def plot_dist(*args):
import seaborn as sns
for x in args:
plt.figure()
sns.distplot(x)
plt.show()
############################ Training Phase ############################
train_test_set = load_data()
for archit in archits:
model = VAFnet(archit)
model.optimize(*train_test_set[-1][:2], None, visualizer, **opt_params)
funcs = model.get_compiled_funcs()
results = {en:[] for en in evals.keys()}
best_results = {en:[] for en in evals.keys()}
for X_train, y_train, X_test, y_test in train_test_set:
model = VAFnet(archit)
model.optimize(X_train, y_train, funcs, visualizer, **opt_params)
model.score(X_test, y_test)
model.echo('-'*30, 'EVALUATION RESULT', '-'*31)
model._print_current_evals()
if(funcs is None):
funcs = model.get_compiled_funcs()
if(not os.path.exists(BEST_MODEL_PATH)):
model.save(BEST_MODEL_PATH)
best_model = VAFnet(archit).load(BEST_MODEL_PATH)
best_model.fit(X_train, y_train)
best_model.score(X_test, y_test)
best_model._print_evals_comparison(model.evals)
if(model.evals[select_model_metric][1][-1] <
best_model.evals[select_model_metric][1][-1]):
model.save(BEST_MODEL_PATH)
print("!"*80)
print("!"*30, "NEW BEST PREDICTOR", "!"*30)
print("!"*80)
for en in evals.keys():
results[en].append(model.evals[en][1][-1])
best_results[en].append(best_model.evals[en][1][-1])
for en in evals.keys():
eval = (
|
np.mean(results[en])
|
numpy.mean
|
from __future__ import print_function
import os, sys, gc
#from time import time
#from copy import deepcopy
import numpy as np
#import numpy.linalg as la
from warnings import warn
from Kuru import FunctionSpace, QuadratureRule
#from Florence.Base import JacobianError, IllConditionedError
#from Florence.Utils import PWD, RSWD
#from Florence.FunctionSpace import Tri
#from Florence.FunctionSpace import Tet
#from Florence.FunctionSpace import Quad, QuadES
#from Florence.FunctionSpace import Hex, HexES
from Kuru.FiniteElements.LocalAssembly.KinematicMeasures import *
from Kuru.FiniteElements.LocalAssembly._KinematicMeasures_ import _KinematicMeasures_
from Kuru import Mesh
#from Kuru import FEMSolver
#from Florence.MeshGeneration import vtk_writer
#from Florence.Utils import constant_camera_view
class PostProcess(object):
"""Post-process class for finite element solvers"""
def __init__(self,ndim,nvar):
self.domain_bases = None
self.postdomain_bases = None
self.boundary_bases = None
self.ndim = ndim
self.nvar = nvar
self.analysis_type = None
self.analysis_nature = None
self.material_type = None
self.is_scaledjacobian_computed = False
self.is_material_anisotropic = False
self.directions = None
self.mesh = None
self.sol = None
self.recovered_fields = None
self.formulation = None
self.material = None
self.fem_solver = None
self.parallel_model = None
self.ncpu = None
def SetBases(self,domain=None,postdomain=None,boundary=None):
"""Sets bases for all integration points for 'domain', 'postdomain' or 'boundary'
"""
if domain is None and postdomain is None and boundary is None:
warn("Nothing to be set")
self.domain_bases = domain
self.postdomain_bases = postdomain
self.boundary_bases = boundary
def SetAnalysis(self,analysis_type=None, analysis_nature=None):
self.analysis_type = analysis_type
self.analysis_nature = analysis_nature
def SetMesh(self,mesh):
"""Set initial (undeformed) mesh"""
self.mesh = mesh
def SetSolution(self,sol):
self.sol = sol
def SetFormulation(self,formulation):
self.formulation = formulation
def SetMaterial(self,materials):
self.materials = materials
def SetFEMSolver(self,fem_solver):
self.fem_solver = fem_solver
def SetGrowthRemodeling(self,gr_variables):
self.gr_variables = gr_variables
def NodeStressRecovery(self, mynode=0, imat=0, steps=None):
"""
steps: [list,np.1darray] for which time steps/increments the data should
be recovered
"""
if self.mesh is None:
raise ValueError("Mesh not set for post-processing")
if self.sol is None:
raise ValueError("Solution not set for post-processing")
if self.formulation is None:
raise ValueError("formulation not set for post-processing")
if self.materials is None:
raise ValueError("materials not set for post-processing")
if self.fem_solver is None:
raise ValueError("FEM solver not set for post-processing")
if self.sol.shape[1] > self.nvar:
return
det = np.linalg.det
inv = np.linalg.inv
mesh = self.mesh
fem_solver = self.fem_solver
formulation = self.formulation
material = self.materials[imat]
if not mynode in material.node_set:
raise ValueError("Node {} is not in material {}".format(mynode,imat))
# GET THE UNDERLYING LINEAR MESH
# lmesh = mesh.GetLinearMesh()
C = mesh.InferPolynomialDegree() - 1
ndim = mesh.InferSpatialDimension()
elements = mesh.elements
points = mesh.points
nelem = elements.shape[0]; npoint = points.shape[0]
nodeperelem = elements.shape[1]
# GET QUADRATURE
norder = 2*C
if norder == 0:
norder=1
# quadrature = QuadratureRule(qtype="gauss", norder=norder, mesh_type=mesh.element_type, optimal=3)
# Domain = FunctionSpace(mesh, quadrature, p=C+1)
Domain = FunctionSpace(mesh, p=C+1, evaluate_at_nodes=True)
Jm = Domain.Jm
AllGauss = Domain.AllGauss
Bases = Domain.Bases
# requires_geometry_update = fem_solver.requires_geometry_update
requires_geometry_update = True # ALWAYS TRUE FOR THIS ROUTINE
TotalDisp = self.sol[:,:]
if hasattr(self,"number_of_time_increments"):
TimeIncrement = self.number_of_time_increments
else:
TimeIncrement = fem_solver.number_of_time_increments
increments = range(TimeIncrement)
if steps is not None:
TimeIncrement = len(steps)
increments = steps
material_node = np.where(material.node_set==mynode)[0][0]
# COMPUTE THE COMMON/NEIGHBOUR NODES ONCE
Elss, Poss = mesh.GetNodeCommonality()[:2]
Pos = Poss[mynode]
MainDict = {}
MainDict['F'] = np.zeros((TimeIncrement,ndim,ndim))
MainDict['CauchyStress'] = np.zeros((TimeIncrement,ndim,ndim))
if self.gr_variables is not None:
MainDict['FibreStress'] = np.zeros((TimeIncrement,5))
material.ConnectivityOfMaterial(mesh)
Elsm, Posm = material.GetNodeCommonality()[:2]
Elm = Elsm[material_node]
ncommon_nodes_m = Elm.shape[0]
for incr, Increment in enumerate(increments):
if TotalDisp.ndim == 3:
Eulerx = points + TotalDisp[:,:ndim,Increment]
else:
Eulerx = points + TotalDisp[:,:ndim]
F = np.zeros((ncommon_nodes_m,nodeperelem,ndim,ndim))
CauchyStress = np.zeros((ncommon_nodes_m,ndim,ndim))
if self.gr_variables is not None:
FibreStress = np.zeros((ncommon_nodes_m,5))
# LOOP OVER ELEMENTS
for i in range(ncommon_nodes_m):
ielem = Elm[i]
elem = material.element_set[ielem]
# GET THE FIELDS AT THE ELEMENT LEVEL
LagrangeElemCoords = points[elements[elem,:],:]
EulerELemCoords = Eulerx[elements[elem,:],:]
# GROWTH-REMODELING VALUES FOR THIS ELEMENT
if material.has_state_variables:
material.state_variables[:,9:21] = self.gr_variables[imat][:,:,Increment]
material.MappingStateVariables(mesh,Domain,elem)
if material.has_low_level_dispatcher:
CauchyStressAux = np.zeros((nodeperelem,ndim,ndim))
FibreStressAux = np.zeros((nodeperelem,5))
# GET LOCAL KINEMATICS
SpatialGradient, F[i,:,:,:], detJ, dV = _KinematicMeasures_(Jm, AllGauss[:,0],
LagrangeElemCoords, EulerELemCoords, requires_geometry_update)
# PARAMETERS FOR INCOMPRESSIBILITY (MEAN DILATATION METHOD HU-WASHIZU)
if material.is_incompressible:
MaterialVolume = np.sum(dV)
if material.has_growth_remodeling:
dve = np.true_divide(detJ,material.StateVariables[:,20])
CurrentVolume = np.sum(dve)
else:
CurrentVolume = np.sum(detJ)
material.pressure = material.kappa*(CurrentVolume-MaterialVolume)/MaterialVolume
# COMPUTE WORK-CONJUGATES AND HESSIAN AT THIS GAUSS POINT
counter = Pos[i]
CauchyStressAux[:,:], _ = material.KineticMeasures(F[i,:,:,:],elem=elem)
CauchyStress[i,:] = CauchyStressAux[counter,:]
if self.gr_variables is not None:
FibreStressAux[:,:], _ = material.LLConstituentStress(F[i,:,:,:],elem=elem)
FibreStress[i,:] = FibreStressAux[counter,:]
else:
# GAUSS LOOP IN VECTORISED FORM
ParentGradientX = np.einsum('ijk,jl->kil', Jm, LagrangeElemCoords)
# MATERIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla_0 (N)]
MaterialGradient = np.einsum('ijk,kli->ijl', inv(ParentGradientX), Jm)
# DEFORMATION GRADIENT TENSOR [\vec{x} \otimes \nabla_0 (N)]
F[i,:,:,:] = np.einsum('ij,kli->kjl', EulerELemCoords, MaterialGradient)
# COMPUTE REMAINING KINEMATIC MEASURES
StrainTensors = KinematicMeasures(F[i,:,:,:], fem_solver.analysis_nature)
# GEOMETRY UPDATE IS A MUST
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientx = np.einsum('ijk,jl->kil',Jm,EulerELemCoords)
# SPATIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla (N)]
SpatialGradient = np.einsum('ijk,kli->ilj',inv(ParentGradientx),Jm)
# COMPUTE ONCE detJ (GOOD SPEEDUP COMPARED TO COMPUTING TWICE)
detJ = np.einsum('i,i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)),
np.abs(StrainTensors['J']))
# COMPUTE PARAMETERS FOR MEAN DILATATION METHOD, IT NEEDS TO BE BEFORE COMPUTE HESSIAN AND STRESS
if material.is_incompressible:
dV = np.einsum('i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)))
MaterialVolume = np.sum(dV)
if material.has_growth_remodeling:
dve = np.true_divide(detJ,material.StateVariables[:,20])
CurrentVolume = np.sum(dve)
else:
CurrentVolume = np.sum(detJ)
material.pressure = material.kappa*(CurrentVolume-MaterialVolume)/MaterialVolume
counter = Pos[i]
CauchyStress[i,:] = material.CauchyStress(StrainTensors,elem,counter)
if self.gr_variables is not None:
FibreStress[i,:],_ = material.ConstituentStress(StrainTensors,elem,counter)
for i in range(ncommon_nodes_m):
MainDict['F'][incr,:,:] += F[i,Pos[i],:,:]
MainDict['CauchyStress'][incr,:,:] += CauchyStress[i,:,:]
if self.gr_variables is not None:
MainDict['FibreStress'][incr,:] += FibreStress[i,:]
# AVERAGE OUT
MainDict['F'][incr,:,:] /= ncommon_nodes_m
MainDict['CauchyStress'][incr,:,:] /= ncommon_nodes_m
if self.gr_variables is not None:
MainDict['FibreStress'][incr,:] /= ncommon_nodes_m
self.node_recovered_fields = MainDict
return
def StressRecovery(self, steps=None, average_derived_quantities=True, time_problem=True):
"""
steps: [list,np.1darray] for which time steps/increments the data should
be recovered
"""
if self.mesh is None:
raise ValueError("Mesh not set for post-processing")
if self.sol is None:
raise ValueError("Solution not set for post-processing")
if self.formulation is None:
raise ValueError("formulation not set for post-processing")
if self.materials is None:
raise ValueError("materials not set for post-processing")
if self.fem_solver is None:
raise ValueError("FEM solver not set for post-processing")
if self.sol.shape[1] > self.nvar:
return
det = np.linalg.det
inv = np.linalg.inv
mesh = self.mesh
fem_solver = self.fem_solver
formulation = self.formulation
materials = self.materials
# GET THE UNDERLYING LINEAR MESH
# lmesh = mesh.GetLinearMesh()
C = mesh.InferPolynomialDegree() - 1
ndim = mesh.InferSpatialDimension()
elements = mesh.elements
points = mesh.points
nelem = elements.shape[0]; npoint = points.shape[0]
nodeperelem = elements.shape[1]
# GET QUADRATURE
norder = 2*C
if norder == 0:
norder=1
# quadrature = QuadratureRule(qtype="gauss", norder=norder, mesh_type=mesh.element_type, optimal=3)
# Domain = FunctionSpace(mesh, quadrature, p=C+1)
Domain = FunctionSpace(mesh, p=C+1, evaluate_at_nodes=True)
Jm = Domain.Jm
AllGauss = Domain.AllGauss
Bases = Domain.Bases
# requires_geometry_update = fem_solver.requires_geometry_update
requires_geometry_update = True # ALWAYS TRUE FOR THIS ROUTINE
TotalDisp = self.sol[:,:]
if time_problem is True:
if hasattr(self,"number_of_time_increments"):
TimeIncrement = self.number_of_time_increments
else:
TimeIncrement = fem_solver.number_of_time_increments
increments = range(TimeIncrement)
if steps is not None:
TimeIncrement = len(steps)
increments = steps
MainDict = {}
MainDict['F'] = np.zeros((TimeIncrement,npoint,ndim,ndim))
MainDict['CauchyStress'] = [[] for i in range(len(materials))]
MainDict['FibreStress'] = [[] for i in range(len(materials))]
for imat in range(len(materials)):
materials[imat].ConnectivityOfMaterial(mesh)
MainDict['CauchyStress'][imat] = np.zeros((TimeIncrement,materials[imat].node_set.shape[0],ndim,ndim))
if self.gr_variables is not None:
MainDict['FibreStress'][imat] = np.zeros((TimeIncrement,materials[imat].node_set.shape[0],5))
else:
if hasattr(self,"number_of_load_increments"):
LoadIncrement = self.number_of_time_increments
else:
LoadIncrement = fem_solver.number_of_load_increments
increments = range(LoadIncrement)
if steps is not None:
LoadIncrement = len(steps)
increments = steps
MainDict = {}
MainDict['F'] = np.zeros((LoadIncrement,npoint,ndim,ndim))
MainDict['CauchyStress'] = [[] for i in range(len(materials))]
MainDict['FibreStress'] = [[] for i in range(len(materials))]
for imat in range(len(materials)):
materials[imat].ConnectivityOfMaterial(mesh)
MainDict['CauchyStress'][imat] = np.zeros((LoadIncrement,materials[imat].node_set.shape[0],ndim,ndim))
if self.gr_variables is not None:
MainDict['FibreStress'][imat] = np.zeros((LoadIncrement,materials[imat].node_set.shape[0],5))
# COMPUTE THE COMMON/NEIGHBOUR NODES ONCE
all_nodes = np.unique(elements)
Elss, Poss = mesh.GetNodeCommonality()[:2]
for incr, Increment in enumerate(increments):
if TotalDisp.ndim == 3:
Eulerx = points + TotalDisp[:,:ndim,Increment]
else:
Eulerx = points + TotalDisp[:,:ndim]
F = np.zeros((nelem,nodeperelem,ndim,ndim))
for imat in range(len(materials)):
material = materials[imat]
Elsm, Posm = material.GetNodeCommonality()[:2]
CauchyStress = np.zeros((material.element_set.shape[0],nodeperelem,ndim,ndim))
if self.gr_variables is not None:
FibreStress = np.zeros((material.element_set.shape[0],nodeperelem,5))
# LOOP OVER ELEMENTS
for ielem in range(material.element_set.shape[0]):
elem = material.element_set[ielem]
# GET THE FIELDS AT THE ELEMENT LEVEL
LagrangeElemCoords = points[elements[elem,:],:]
EulerELemCoords = Eulerx[elements[elem,:],:]
# GROWTH-REMODELING VALUES FOR THIS ELEMENT
if material.has_state_variables:
if self.gr_variables is None:
material.MappingStateVariables(mesh,Domain,elem)
elif self.gr_variables is not None:
material.state_variables[:,9:21] = self.gr_variables[imat][:,:,Increment]
material.MappingStateVariables(mesh,Domain,elem)
if material.has_low_level_dispatcher:
# GET LOCAL KINEMATICS
SpatialGradient, F[elem,:,:,:], detJ, dV = _KinematicMeasures_(Jm, AllGauss[:,0],
LagrangeElemCoords, EulerELemCoords, requires_geometry_update)
# PARAMETERS FOR INCOMPRESSIBILITY (MEAN DILATATION METHOD HU-WASHIZU)
if material.is_incompressible:
MaterialVolume = np.sum(dV)
if material.has_growth_remodeling:
dve = np.true_divide(detJ,material.StateVariables[:,20])
CurrentVolume = np.sum(dve)
else:
CurrentVolume = np.sum(detJ)
material.pressure = material.kappa*(CurrentVolume-MaterialVolume)/MaterialVolume
# COMPUTE WORK-CONJUGATES AND HESSIAN AT THIS GAUSS POINT
CauchyStress[ielem,:,:], _ = material.KineticMeasures(F[elem,:,:,:],elem=elem)
if self.gr_variables is not None and material.has_state_variables:
FibreStress[ielem,:,:], _ = material._ConstituentMeasures_(F[elem,:,:,:],elem=elem)
else:
# GAUSS LOOP IN VECTORISED FORM
ParentGradientX = np.einsum('ijk,jl->kil', Jm, LagrangeElemCoords)
# MATERIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla_0 (N)]
MaterialGradient = np.einsum('ijk,kli->ijl', inv(ParentGradientX), Jm)
# DEFORMATION GRADIENT TENSOR [\vec{x} \otimes \nabla_0 (N)]
F[elem,:,:,:] = np.einsum('ij,kli->kjl', EulerELemCoords, MaterialGradient)
# COMPUTE REMAINING KINEMATIC MEASURES
StrainTensors = KinematicMeasures(F[elem,:,:,:], fem_solver.analysis_nature)
# GEOMETRY UPDATE IS A MUST
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientx = np.einsum('ijk,jl->kil',Jm,EulerELemCoords)
# SPATIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla (N)]
SpatialGradient = np.einsum('ijk,kli->ilj',inv(ParentGradientx),Jm)
# COMPUTE ONCE detJ (GOOD SPEEDUP COMPARED TO COMPUTING TWICE)
detJ = np.einsum('i,i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)),
np.abs(StrainTensors['J']))
# COMPUTE PARAMETERS FOR MEAN DILATATION METHOD, IT NEEDS TO BE BEFORE COMPUTE HESSIAN AND STRESS
if material.is_incompressible:
dV = np.einsum('i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)))
MaterialVolume = np.sum(dV)
if material.has_growth_remodeling:
dve = np.true_divide(detJ,material.StateVariables[:,20])
CurrentVolume = np.sum(dve)
else:
CurrentVolume = np.sum(detJ)
material.pressure = material.kappa*(CurrentVolume-MaterialVolume)/MaterialVolume
# LOOP OVER GAUSS POINTS
for counter in range(AllGauss.shape[0]):
CauchyStress[ielem,counter,:] = material.CauchyStress(StrainTensors,elem,counter)
if self.gr_variables is not None and material.has_state_variables:
FibreStress[ielem,counter,:],_ = material.ConstituentMeasures(StrainTensors,elem,counter)
if average_derived_quantities:
for inode in range(material.node_set.shape[0]):
Els, Pos = Elsm[inode], Posm[inode]
ncommon_nodes = Els.shape[0]
for uelem in range(ncommon_nodes):
MainDict['CauchyStress'][imat][incr,inode,:,:] += CauchyStress[Els[uelem],Pos[uelem],:,:]
if self.gr_variables is not None:
MainDict['FibreStress'][imat][incr,inode,:] += FibreStress[Els[uelem],Pos[uelem],:]
# AVERAGE OUT
MainDict['CauchyStress'][imat][incr,inode,:,:] /= ncommon_nodes
if self.gr_variables is not None:
MainDict['FibreStress'][imat][incr,inode,:] /= ncommon_nodes
else:
for inode in range(material.node_set.shape[0]):
Els, Pos = Elsm[inode], Posm[inode]
ncommon_nodes = Els.shape[0]
uelem = 0
MainDict['CauchyStress'][imat][incr,inode,:,:] = CauchyStress[Els[uelem],Pos[uelem],:,:]
if self.gr_variables is not None:
MainDict['FibreStress'][imat][incr,inode,:] = FibreStress[Els[uelem],Pos[uelem],:]
if average_derived_quantities:
for inode in all_nodes:
Els, Pos = Elss[inode], Poss[inode]
ncommon_nodes = Els.shape[0]
for uelem in range(ncommon_nodes):
MainDict['F'][incr,inode,:,:] += F[Els[uelem],Pos[uelem],:,:]
# AVERAGE OUT
MainDict['F'][incr,inode,:,:] /= ncommon_nodes
else:
for inode in all_nodes:
Els, Pos = Elss[inode], Poss[inode]
ncommon_nodes = Els.shape[0]
uelem = 0
MainDict['F'][incr,inode,:,:] = F[Els[uelem],Pos[uelem],:,:]
self.recovered_fields = MainDict
return
def AverageDeformationGradient(self, element_sets, fibre_direction):
"""
steps: [list,np.1darray] for which time steps/increments the data should
be recovered
"""
if self.mesh is None:
raise ValueError("Mesh not set for post-processing")
if self.sol is None:
raise ValueError("Solution not set for post-processing")
if self.formulation is None:
raise ValueError("formulation not set for post-processing")
if self.materials is None:
raise ValueError("materials not set for post-processing")
if self.fem_solver is None:
raise ValueError("FEM solver not set for post-processing")
if self.sol.shape[1] > self.nvar:
return
det = np.linalg.det
inv = np.linalg.inv
mesh = self.mesh
fem_solver = self.fem_solver
formulation = self.formulation
materials = self.materials
# GET THE UNDERLYING LINEAR MESH
# lmesh = mesh.GetLinearMesh()
C = mesh.InferPolynomialDegree() - 1
ndim = mesh.InferSpatialDimension()
elements = mesh.elements
points = mesh.points
nelem = elements.shape[0]; npoint = points.shape[0]
nodeperelem = elements.shape[1]
# GET QUADRATURE
norder = 2*C
if norder == 0:
norder=1
# quadrature = QuadratureRule(qtype="gauss", norder=norder, mesh_type=mesh.element_type, optimal=3)
# Domain = FunctionSpace(mesh, quadrature, p=C+1)
Domain = FunctionSpace(mesh, p=C+1, evaluate_at_nodes=True)
Jm = Domain.Jm
AllGauss = Domain.AllGauss
Bases = Domain.Bases
# requires_geometry_update = fem_solver.requires_geometry_update
requires_geometry_update = True # ALWAYS TRUE FOR THIS ROUTINE
TotalDisp = self.sol[:,:]
# COMPUTE THE COMMON/NEIGHBOUR NODES ONCE
all_nodes = np.unique(elements)
Elss, Poss = mesh.GetNodeCommonality()[:2]
I =
|
np.eye(3,3,dtype=np.float64)
|
numpy.eye
|
import numpy as np
from matplotlib import pyplot as plt
import keyboard
size = 15 # random map generator
mapa = [[list(np.random.uniform(0, 1, 3))] * size for i in range(size)]
mapah = [[1] * size for i in range(size)]
for i in range(size-2):
for j in range(size-2):
mapah[i+1][j+1] = np.random.choice([0.3, 0.4, 0.7, 1])
if np.random.uniform() > 0.33:
mapa[i+1][j+1] = 0
posx, posy, posz = (1, np.random.randint(1, size -1), 0.5)
rot, rot_v = (np.pi/4, 0)
x, y, z = (posx, posy, posz)
mapa[x][y] = 0
count = 0
while True:
testx, testy = (x, y)
if np.random.uniform() > 0.5:
testx = testx + np.random.choice([-1, 1])
else:
testy = testy + np.random.choice([-1, 1])
if testx > 0 and testx < size -1 and testy > 0 and testy < size -1:
if mapa[testx][testy] == 0 or count > 5:
count = 0
x, y = (testx, testy)
mapa[x][y] = 0
if x == size-2:
exitx, exity = (x, y)
break
else:
count = count+1
mod = 1 # resolution modifier
inc = 0.05/mod # ray increment
height = int(48*mod)
width = int(60*mod)
while True: #main game loop
pixels = []
for j in range(height): #vertical loop
pixels.append([])
rot_j = np.deg2rad(24 + rot_v - j/mod)
for i in range(width): #horizontal vision loop
rot_i = rot + np.deg2rad(i/mod - 30)
x, y, z = (posx, posy, posz)
sin, cos, = (inc*np.sin(rot_i), inc*np.cos(rot_i))
sinz = inc*np.sin(rot_j)
n = 0
while True: # ray loop
x, y, z = (x + cos, y + sin, z + sinz)
n = n+1
if mapa[int(x)][int(y)] != 0 and z <= mapah[int(x)][int(y)]:
h = np.clip(1/(inc * n), 0, 1)
c = np.asarray(mapa[int(x)][int(y)])*(0.3 + 0.7 * h**2)
pixels[j].append(c)
break
elif z > 1: # ceiling
h = 0.3 + 0.7*np.clip(1/(inc * n), 0, 1)**2
if int(x*5)%2 ==1:
pixels[j].append(np.asarray([.8,1,.9])*h)
else:
pixels[j].append(
|
np.asarray([0.5,0.5,1])
|
numpy.asarray
|
import numpy as np
from deicode._optspace import optspace
from .base import _BaseImpute
from scipy.spatial import distance
import warnings
class OptSpace(_BaseImpute):
def __init__(self, rank=2, iteration=5, tol=1e-5):
"""
OptSpace is a matrix completion algorithm based on a singular value
decomposition (SVD) optimized on a local manifold. It has been shown to
be quite robust to noise in low rank datasets (1).
The objective function that it is trying to optimize
over is given by:
min(P|(Y-U*S*V^{T})|_{2}^{2}
U and V are matrices that are trying to be estimated and S
is analogous to a matrix of eigenvalues. Y are the
observed values and P is a function such that
the errors between Y and USV are only computed
on the nonzero entries.
Parameters
----------
X: numpy.ndarray - a rclr preprocessed matrix of shape (M,N)
N = Features (i.e. OTUs, metabolites)
M = Samples
rank: int, optional : Default is 2
The underlying rank of the default set
to 2 as the default to prevent overfitting.
iteration: float, optional : Default is 5
The number of convex iterations to optomize the solution
If iteration is not specified, then the default iteration is 5.
Which redcues to a satisfactory error threshold.
tol: float, optional : Default is 1e-5
Error reduction break, if the error reduced is
less than this value it will return the solution
Returns
-------
U: numpy.ndarray - "Sample Loadings" or the unitary matrix
having left singular vectors as columns. Of shape (M,rank)
s: numpy.ndarray - The singular values,
sorted in non-increasing order. Of shape (rank,rank).
V: numpy.ndarray - "Feature Loadings" or Unitary matrix
having right singular vectors as rows. Of shape (N,rank)
solution: numpy.ndarray - (U*S*V.transpose()) of shape (M,N)
distance: numpy.ndarray - Distance between each
pair of the two collections of inputs. Of shape (M,M)
Raises
------
ValueError
Raises an error if input is not either dataframe or np.ndarray
`ValueError: Input data is should be type numpy.ndarray`.
Raises an error if input data does not contain any nans or zeros
`ValueError: Data-table contains no missing
data in the format np.nan or 0`.
Raises an error if input data contains infs
`ValueError: Data-table contains either np.inf or -np.inf`.
Raises an error if input data and rank violates min(M,N)<rank
`ValueError: The rank must be significantly less than the
minimum shape of the input table`.
Raises an error if rank*10> M(Samples)
`ValueError: There are not sufficient samples to run
must have rank*10 samples in the table`.
References
----------
.. [1] <NAME>, <NAME>, <NAME>. 2009. Matrix completion
from a few entries (2009_ IEEE International
Symposium on Information Theory
Examples
--------
>>> from deicode.optspace import OptSpace
>>> from deicode.preprocessing import rclr
>>> import numpy as np
rclr preprocessing
data is numpy.ndarray
- a array of counts (samples,features)
(with shape (M,N) where N>M)
>>> data=np.array([[3, 3, 0], [0, 4, 2], [3, 0, 1]])
>>> table_rclr=rclr().fit_transform(data)
OptSpace (RPCA)
>>> opt=OptSpace().fit(table_rclr)
numpy.ndarray - "Sample Loadings"
>>> U=opt.sample_weights
numpy.ndarray - "Feature Loadings"
>>> V=opt.feature_weights
numpy.ndarray - The singular values
>>> s=opt.s
numpy.ndarray - (U*S*V.transpose()) of shape (M,N)
>>> result=opt.solution
or
>>> U,s,V=OptSpace().fit_transform(table_rclr)
numpy.ndarray - fully dense (no zeros) of shape (M,N)
>>> result=np.dot(np.dot(U,s),V.T)
"""
self.rank = rank
self.iteration = iteration
self.tol = tol
return
def fit(self, X):
"""
Fit the model to X_sparse
"""
X_sparse = X.copy().astype(np.float64)
self.X_sparse = X_sparse
self._fit()
return self
def _fit(self):
# make copy for imputation, check type
X_sparse = self.X_sparse
if not isinstance(X_sparse, np.ndarray):
X_sparse = np.array(X_sparse)
if not isinstance(X_sparse, np.ndarray):
raise ValueError('Input data is should be type numpy.ndarray')
if (np.count_nonzero(X_sparse) == 0 and
np.count_nonzero(~np.isnan(X_sparse)) == 0):
raise ValueError('No missing data in the format np.nan or 0')
if np.count_nonzero(np.isinf(X_sparse)) != 0:
raise ValueError('Contains either np.inf or -np.inf')
if self.rank >
|
np.min(X_sparse.shape)
|
numpy.min
|
import unittest
import numpy as np
from nptest import nptest
class MathematicalFunctionsTests(unittest.TestCase):
#region Trigonometric Functions
def test_sin_1(self):
a = np.arange(0, 10, dtype = np.float64)
a = a[::2]
b = np.sin(a)
print(b)
a = np.arange(0, 10, dtype = np.float32)
a = a[::2]
b = np.sin(a)
print(b)
a = np.arange(0, 10, dtype = np.int16)
a = a[::2]
b = np.sin(a)
print(b)
print("********")
a = np.arange(0, 10, dtype = np.float64).reshape((1,2,5))
a = a[::2]
b = np.sin(a)
print(b)
print("********")
a = np.array([[0,1,2,3,4],[5,6,7,8,9]])
a = a[::2]
x = a>2
out = np.zeros_like(a, dtype=np.float64)
b = np.sin(a, where= x, out = out )
print(b)
def test_sin_3(self):
a = np.arange(0, 5, dtype = np.float64)
b = np.sin(a)
c = np.sin(a[::-1])
print(b)
print(c)
def test_cos_1(self):
a = np.arange(0, 10, dtype = np.float64)
a = a[::2]
b = np.cos(a)
print(b)
a = np.arange(0, 10, dtype = np.float32)
a = a[::2]
b = np.cos(a)
print(b)
a = np.arange(0, 10, dtype = np.int16)
a = a[::2]
b = np.cos(a)
print(b)
print("********")
a = np.arange(0, 10, dtype = np.float64).reshape((1,2,5))
a = a[::2]
b = np.cos(a)
print(b)
print("********")
a = np.array([[0,1,2,3,4],[5,6,7,8,9]])
a = a[::2]
x = a>2
out = np.zeros_like(a, dtype=np.float64)
b = np.cos(a, where= x, out = out )
print(b)
def test_tan_1(self):
a = np.arange(0, 10, dtype = np.float64)
a = a[::2]
b = np.tan(a)
print(b)
a = np.arange(0, 10, dtype = np.float32)
a = a[::2]
b = np.tan(a)
print(b)
a = np.arange(0, 10, dtype = np.int16)
a = a[::2]
b = np.tan(a)
print(b)
print("********")
a = np.arange(0, 10, dtype = np.float64).reshape((1,2,5))
a = a[::2]
b = np.tan(a)
print(b)
print("********")
a = np.array([[0,1,2,3,4],[5,6,7,8,9]])
a = a[::2]
x = a>2
out = np.zeros_like(a, dtype=np.float64)
b = np.tan(a, where= x, out = out )
print(b)
def test_arcsin_1(self):
a = np.linspace(-1.0, 1.0, 12)
print(a)
b = np.arcsin(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12).reshape((2,2,3))
a = a[::2]
b = np.arcsin(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12)
a = a[::2]
x = a > -0.5
print(x)
out = np.zeros_like(a, dtype=np.float64)
b = np.arcsin(a, where= x, out = out )
print(b)
def test_arccos_1(self):
a = np.linspace(-1.0, 1.0, 12)
print(a)
b = np.arccos(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12).reshape((2,2,3))
a = a[::2]
b = np.arccos(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12)
a = a[::2]
x = a > -0.5
print(x)
out = np.zeros_like(a, dtype=np.float64)
b = np.arccos(a, where= x, out = out )
print(b)
def test_arctan_1(self):
a = np.linspace(-1.0, 1.0, 12)
print(a)
b = np.arctan(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12).reshape((2,2,3))
a = a[::2]
b = np.arctan(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12)
a = a[::2]
x = a > -0.5
print(x)
out = np.zeros_like(a, dtype=np.float64)
b = np.arctan(a, where= x, out = out )
print(b)
def test_hypot_1(self):
a = np.hypot(np.ones((3, 3)) * 3, np.ones((3, 3)) * 4)
print(a)
b = np.hypot(np.ones((3, 3)) * 3, [4])
print(b)
def test_arctan2_1(self):
x = np.array([-1, +1, +1, -1])
y = np.array([-1, -1, +1, +1])
z = np.arctan2(y, x) * 180 / np.pi
print(z)
a = np.arctan2([1., -1.], [0., 0.])
print(a)
b = np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
print(b)
def test_degrees_1(self):
rad = np.arange(12.)*np.pi/6
a = np.degrees(rad)
print(a)
out = np.zeros((rad.shape))
r = np.degrees(rad, out)
print(np.all(r == out))
def test_radians_1(self):
deg = np.arange(12.0, dtype=np.float64) * 30.0;
a = np.radians(deg)
print(a)
out = np.zeros((deg.shape))
r = np.radians(deg, out)
print(np.all(r == out))
def test_rad2deg_1(self):
rad = np.arange(12.)*np.pi/6
a = np.rad2deg(rad)
print(a)
out = np.zeros((rad.shape))
r = np.rad2deg(rad, out)
print(np.all(r == out))
def test_deg2rad_1(self):
deg = np.arange(12.0, dtype=np.float64) * 30.0;
a = np.deg2rad(deg)
print(a)
out = np.zeros((deg.shape))
r = np.deg2rad(deg, out)
print(np.all(r == out))
#endregion
#region Hyperbolic functions
def test_sinh_1(self):
a = np.arange(0, 10, dtype = np.float64)
a = a[::2]
b = np.sinh(a)
print(b)
a = np.arange(0, 10, dtype = np.float32)
a = a[::2]
b = np.sinh(a)
print(b)
a = np.arange(0, 10, dtype = np.int16)
a = a[::2]
b = np.sinh(a)
print(b)
print("********")
a = np.arange(0, 10, dtype = np.float64).reshape((1,2,5))
a = a[::2]
b = np.sinh(a)
print(b)
print("********")
a = np.array([[0,1,2,3,4],[5,6,7,8,9]])
a = a[::2]
x = a>2
out = np.zeros_like(a, dtype=np.float64)
b = np.sinh(a, where= x, out = out )
print(b)
def test_cosh_1(self):
a = np.arange(0, 10, dtype = np.float64)
a = a[::2]
b = np.cosh(a)
print(b)
a = np.arange(0, 10, dtype = np.float32)
a = a[::2]
b = np.cosh(a)
print(b)
a = np.arange(0, 10, dtype = np.int16)
a = a[::2]
b = np.cosh(a)
print(b)
print("********")
a = np.arange(0, 10, dtype = np.float64).reshape((1,2,5))
a = a[::2]
b = np.cosh(a)
print(b)
print("********")
a = np.array([[0,1,2,3,4],[5,6,7,8,9]])
a = a[::2]
x = a>2
out = np.zeros_like(a, dtype=np.float64)
b = np.cosh(a, where= x, out = out )
print(b)
def test_tanh_1(self):
a = np.arange(0, 10, dtype = np.float64)
a = a[::2]
b = np.tanh(a)
print(b)
a = np.arange(0, 10, dtype = np.float32)
a = a[::2]
b = np.tanh(a)
print(b)
a = np.arange(0, 10, dtype = np.int16)
a = a[::2]
b = np.tanh(a)
print(b)
print("********")
a = np.arange(0, 10, dtype = np.float64).reshape((1,2,5))
a = a[::2]
b = np.tanh(a)
print(b)
print("********")
a = np.array([[0,1,2,3,4],[5,6,7,8,9]])
a = a[::2]
x = a>2
out = np.zeros_like(a, dtype=np.float64)
b = np.tanh(a, where= x, out = out )
print(b)
def test_arcsinh_1(self):
a = np.linspace(-1.0, 1.0, 12)
b = np.arcsinh(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12).reshape((2,2,3))
a = a[::2]
b = np.arcsinh(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12)
a = a[::2]
x = a > -0.5
print(x)
out = np.zeros_like(a, dtype=np.float64)
b = np.arcsinh(a, where= x, out = out )
print(b)
def test_arccosh_1(self):
a = np.linspace(1.0, 2.0, 12)
b = np.arccosh(a)
print(b)
print("********")
a = np.linspace(1.0, 2.0, 12).reshape((2,2,3))
a = a[::2]
b = np.arccosh(a)
print(b)
print("********")
a = np.linspace(1.0, 2.0, 12)
a = a[::2]
x = a > 1.5
print(x)
out = np.zeros_like(a, dtype=np.float64)
b = np.arccosh(a, where= x, out = out )
print(b)
def test_arctanh_1(self):
a = np.linspace(-1.0, 1.0, 12)
b = np.arctanh(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12).reshape((2,2,3))
a = a[::2]
b = np.arctanh(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12)
a = a[::2]
x = a > -0.5
print(x)
out = np.zeros_like(a, dtype=np.float64)
b = np.arctanh(a, where= x, out = out )
print(b)
#endregion
#region Rounding Functions
def test_around_1(self):
a = np.around([0.37, 1.64])
print(a)
b = np.around([0.37, 1.64], decimals=1)
print(b)
c = np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
print(c)
d = np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
print(d)
e = np.around([1,2,3,11], decimals=-1)
print(e)
def test_round_1(self):
a = np.linspace(-1.0, 1.0, 12).reshape((2,2,3))
print(a)
print("********")
b = np.round_(a, 2)
print(b)
print("********")
c = np.round(a,2)
print(c)
print("********")
b = np.round_(a, 4)
print(b)
print("********")
c = np.round(a,4)
print(c)
def test_rint_1(self):
a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0, -4.2])
b = np.rint(a)
print(b)
b = np.rint(a.reshape(2,4))
print(b)
x = a > 0.0
print(x)
b = np.rint(a, where = x)
print(b)
def test_fix_1(self):
a = np.fix(3.14)
print(a)
b = np.fix(3)
print(b)
c = np.fix([2.1, 2.9, -2.1, -2.9])
print(c)
d = np.fix([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
print(d)
def test_floor_1(self):
x = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
y = np.floor(x);
print(x)
print(y)
return
def test_ceil_1(self):
a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
b = np.ceil(a)
print(b)
def test_trunc_1(self):
a = np.trunc(3.14)
print(a)
b = np.trunc(3)
print(b)
c = np.trunc([2.1, 2.9, -2.1, -2.9])
print(c)
d = np.trunc([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
print(d)
#endregion
#region Sums, products, differences
def test_prod_1(self):
#x = np.array([10,15,25,45,78,90,10,15,25,45,78,90], dtype= np.uint32).reshape(3, 2, -1)
x = np.array([10,15,25,45,78,90, 10, 15, 25, 45, 78, 90 ], dtype= np.uint64)
y = np.prod(x);
print(x)
print(y)
return
def test_prod_2(self):
a = np.prod([1.,2.])
print(a)
print("*****")
b = np.prod([[1.,2.],[3.,4.]])
print(b)
print("*****")
c = np.prod([[1.,2.],[3.,4.]], axis=1)
print(c)
print("*****")
d = np.array([1, 2, 3], dtype=np.uint8)
e = np.prod(d).dtype == np.uint
print(e)
print("*****")
f = np.array([1, 2, 3], dtype=np.int8)
g = np.prod(f).dtype == int
print(g)
print("*****")
def test_prod_3(self):
a = np.array([1,2,3])
b = np.prod(a) # intermediate results 1, 1*2
# total product 1*2*3 = 6
print(b)
print("*****")
a = np.array([[1, 2, 3], [4, 5, 6]])
c = np.prod(a, dtype=float) # specify type of output
print(c)
print("*****")
d = np.prod(a, axis=0)
print(d)
print("*****")
e = np.prod(a,axis=1)
print(e)
print("*****")
def test_sum_1(self):
x = np.array([10,15,25,45,78,90,10,15,25,45,78,90], dtype= np.uint32).reshape(3, 2, -1)
x = x * 3
y = np.sum(x);
print(x)
print(y)
return
def test_sum_2(self):
x = np.array([10,15,25,45,78,90,10,15,25,45,78,90], dtype= np.uint32).reshape(3, 2, -1)
x = x * 3
y = np.sum(x, axis=0);
print(y)
print("*****")
y = np.sum(x, axis=1);
print(y)
print("*****")
y = np.sum(x, axis=2);
print(y)
print("*****")
return
def test_sum_3(self):
x = np.array([10,15,25,45,78,90,10,15,25,45,78,90], dtype= np.float64).reshape(3, 2, -1)
x = x * 3.456
y = np.sum(x, axis=0);
print(y)
print("*****")
y = np.sum(x, axis=1);
print(y)
print("*****")
y = np.sum(x, axis=2);
print(y)
print("*****")
return
def test_sum_keepdims(self):
x = np.array([10,15,25,45,78,90], dtype= np.float64)
y = np.sum(x);
print(y)
print(y.shape)
print("*****")
print("keepdims")
y = np.sum(x, keepdims = True);
print(y)
print(y.shape)
print("*****")
x = np.array([10,15,25,45,78,90], dtype= np.float64).reshape(3, 2, -1)
y = np.sum(x, axis=1);
print(y)
print(y.shape)
print("*****")
print("keepdims")
y = np.sum(x, axis=1, keepdims = True);
print(y)
print(y.shape)
print("*****")
x = np.array([10,15,25,45,78,90], dtype= np.float64).reshape(-1, 3, 2)
y = np.sum(x, axis=2);
print(y)
print(y.shape)
print("*****")
print("keepdims")
y = np.sum(x, axis=2, keepdims = True);
print(y)
print(y.shape)
print("*****")
return
def test_cumprod_1(self):
x = np.array([10,15,25,45,78,90,10,15,25,45,78,90], dtype= np.uint32).reshape(3, 2, -1)
x = x * 3
y = np.cumprod(x);
print(y)
x = np.array([10,15,25,45,78,90,10,15,25,45,78,90], dtype= np.int32).reshape(3, 2, -1)
x = x * 3
y = np.cumprod(x);
print(y)
return
def test_cumprod_1a(self):
x = np.array([10,15,25,45,78,90,10,15,25,45,78,90], dtype= np.uint64).reshape(3, 2, -1)
x = x * 1
y = np.cumprod(x);
print(y)
x = np.array([10,15,25,45,78,90,10,15,25,45,78,90], dtype= np.int64).reshape(3, 2, -1)
x = x * 1
y = np.cumprod(x);
print(y)
return
def test_cumprod_2(self):
a = np.array([1,2,3])
b = np.cumprod(a) # intermediate results 1, 1*2
# total product 1*2*3 = 6
print(b)
print("*****")
a = np.array([[1, 2, 3], [4, 5, 6]])
c = np.cumprod(a, dtype=float) # specify type of output
print(c)
print("*****")
d = np.cumprod(a, axis=0)
print(d)
print("*****")
e = np.cumprod(a,axis=1)
print(e)
print("*****")
def test_cumsum_1(self):
x = np.array([10,15,25,45,78,90,10,15,25,45,78,90], dtype= np.uint32).reshape(3, 2, -1)
x = x * 3
y = np.cumsum(x);
print(x)
print(y)
return
def test_cumsum_2(self):
a = np.array([[1,2,3], [4,5,6]])
print(a)
print("*****")
b = np.cumsum(a)
print(b)
print("*****")
c = np.cumsum(a, dtype=float) # specifies type of output value(s)
print(c)
print("*****")
d = np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
print(d)
print("*****")
e = np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
print(e)
return
def test_cumsum_3(self):
a = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]]).reshape(2,3,-1)
print(a)
print("*****")
b = np.cumsum(a)
print(b)
print("*****")
c = np.cumsum(a, dtype=float) # specifies type of output value(s)
print(c)
print("*****")
d = np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
print(d)
print("*****")
e = np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
print(e)
print("*****")
f = np.cumsum(a,axis=2) # sum over columns for each of the 2 rows
print(f)
print("*****")
#g = np.cumsum(a,axis=3) # sum over columns for each of the 2 rows
#print(g)
def test_cumsum_4(self):
a = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]]).reshape(3,2,-1)
print(a)
print("*****")
b = np.cumsum(a)
print(b)
print("*****")
c = np.cumsum(a, dtype=float) # specifies type of output value(s)
print(c)
print("*****")
d = np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
print(d)
print("*****")
e = np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
print(e)
f = np.cumsum(a,axis=2) # sum over columns for each of the 2 rows
print(f)
#g = np.cumsum(a,axis=3) # sum over columns for each of the 2 rows
#print(g)
def test_diff_1(self):
x = np.array([10,15,25,45,78,90], dtype= np.uint32)
x = x * 3
y = np.diff(x[1:]);
print(x)
print(y)
return
def test_diff_2(self):
x = np.array([10,15,25,45,78,90], dtype= np.uint32).reshape(2, -1)
x = x * 3
y = np.diff(x, axis=0);
print(x)
print(y)
return
def test_diff_3(self):
x = np.array([10,15,25,45,78,90,10,15,25,45,78,90], dtype= np.uint32).reshape(3, 2, -1)
x = x * 3
y = np.diff(x,axis=2);
print(x)
print(y)
return
def test_ediff1d_1(self):
x = np.array([1, 2, 4, 7, 0])
y = np.ediff1d(x)
print(y)
print(np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])))
y = [[1, 2, 4], [1, 6, 24]]
print(np.ediff1d(y))
def test_gradient_1(self):
f = np.array([1, 2, 4, 7, 11, 16], dtype=float)
a = nptest.gradient(f)
print(a)
print("***********")
b = nptest.gradient(f, 2)
print(b)
print("***********")
#Spacing can be also specified with an array that represents the coordinates
#of the values F along the dimensions.
#For instance a uniform spacing:
x = np.arange(f.size)
c = nptest.gradient(f, x)
print(c)
print("***********")
#Or a non uniform one:
x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float)
d = nptest.gradient(f, x)
print(d)
def test_gradient_2(self):
#For two dimensional arrays, the return will be two arrays ordered by
#axis. In this example the first array stands for the gradient in
#rows and the second one in columns direction:
a = nptest.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float))
print(a)
print("***********")
#In this example the spacing is also specified:
#uniform for axis=0 and non uniform for axis=1
dx = 2.
y = [1., 1.5, 3.5]
b = nptest.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y)
print(b)
print("***********")
#It is possible to specify how boundaries are treated using `edge_order`
x = np.array([0, 1, 2, 3, 4])
f = x**2
c = nptest.gradient(f, edge_order=1)
print(c)
print("***********")
d = nptest.gradient(f, edge_order=2)
print(d)
print("***********")
#The `axis` keyword can be used to specify a subset of axes of which the
#gradient is calculated
e = nptest.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0)
print(e)
def test_trapz_1(self):
a = np.trapz([1,2,3])
print(a)
b = np.trapz([1,2,3], x=[4,6,8])
print(b)
c = np.trapz([1,2,3], dx=2)
print(c)
a = np.arange(6).reshape(2, 3)
b = np.trapz(a, axis=0)
print(b)
c = np.trapz(a, axis=1)
print(c)
#endregion
#region Exponents and logarithms
def test_exp_1(self):
x = np.array([1e-10, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0, -4.2])
a = np.exp(x)
print(a)
a = np.exp(x.reshape(2,-1))
print(a)
b = x > 0
a = np.exp(x, where= b)
print(a)
return
def test_expm1_1(self):
x = np.array([1e-10, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0, -4.2])
a = np.expm1(x)
print(a)
a = np.expm1(x.reshape(2,-1))
print(a)
b = x > 0
a = np.expm1(x, where= b)
print(a)
return
def test_exp2_1(self):
x = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0, -4.2])
a = np.exp2(x)
print(a)
a = np.exp2(x.reshape(2,-1))
print(a)
b = x > 0
a = np.exp2(x, where= b)
print(a)
return
def test_log_1(self):
x = np.array([1, np.e, np.e**2, 0])
a = np.log(x)
print(a)
a = np.log(x.reshape(2,-1))
print(a)
b = x > 0
a = np.log(x, where= b)
print(a)
return
def test_log10_1(self):
x = np.array([1, np.e, np.e**2, 0])
a = np.log10(x)
print(a)
a = np.log10(x.reshape(2,-1))
print(a)
b = x > 0
a = np.log10(x, where= b)
print(a)
return
def test_log2_1(self):
x = np.array([1, np.e, np.e**2, 0])
a = np.log2(x)
print(a)
a = np.log2(x.reshape(2,-1))
print(a)
b = x > 0
a = np.log2(x, where= b)
print(a)
return
def test_log1p_1(self):
x = np.array([1, np.e, np.e**2, 0])
a = np.log1p(x)
print(a)
a = np.log1p(x.reshape(2,-1))
print(a)
b = x > 0
a = np.log1p(x, where= b)
print(a)
return
def test_logaddexp_1(self):
prob1 = np.log(1e-50)
prob2 = np.log(2.5e-50)
a = np.logaddexp(prob1, prob2)
print(a)
b = np.exp(a)
print(b)
def test_logaddexp2_1(self):
prob1 = np.log2(1e-50)
prob2 = np.log2(2.5e-50)
a = np.logaddexp2(prob1, prob2)
print(a)
b = 2 ** a
print(b)
#endregion
#region Other special Functions
def test_i0_1(self):
a = np.i0(5)
print(a)
a = np.i0(5.0)
print(a)
a = np.i0([5.0, 6.0])
print(a)
a = np.i0([[5.0, 6.0],[7.9, 8.0]])
print(a)
return;
def test_sinc_1(self):
x = np.linspace(-4, 4, 10)
a = np.sinc(x)
print(a)
print("********")
xx = np.outer(x, x)
b = np.sinc(xx)
print(b)
#endregion
#region Floating point routines
def test_signbit_1(self):
a =np.signbit(-1.2)
print(a)
b = np.signbit(np.array([1, -2.3, 2.1]))
print(b)
c = np.signbit(np.array([+0.0, -0.0]))
print(c)
d = np.signbit(np.array([-np.inf, np.inf]))
print(d)
e = np.signbit(np.array([-np.nan, np.nan]))
print(e)
f = np.signbit(np.array([-1, 0, 1]))
print(f)
def test_copysign_1(self):
a = np.copysign(1.3, -1)
print(a)
b = 1/np.copysign(0, 1)
print(b)
c = 1/np.copysign(0, -1)
print(c)
d = np.copysign([-1, 0, 1], -1.1)
print(d)
e = np.copysign([-1, 0, 1], np.arange(3)-1)
print(e)
def test_frexp_1(self):
x = np.arange(9)
y1, y2 = np.frexp(x)
print(y1)
print(y2)
print("***************")
x = np.arange(9, dtype = np.float32).reshape(3,3)
y1, y2 = np.frexp(x)
print(y1)
print(y2)
print("***************")
x = np.arange(9, dtype = np.float64).reshape(3,3)
y1, y2 = np.frexp(x, where = x < 5)
print(y1)
print(y2)
def test_ldexp_1(self):
a = np.ldexp(5, np.arange(4))
print(a)
b = np.ldexp(np.arange(4), 5);
print(b)
def test_nextafter_1(self):
a = np.nextafter(1, 2)
print(a)
b = np.nextafter([1, 2], [2, 1])
d1 = b[0]
d2 = b[1]
print(d1)
print(d2)
c1 = np.array([1, 2], dtype=np.float32)
c2 = np.array([2, 1], dtype=np.float32)
c = np.nextafter(c1,c2)
f1 = c[0]
f2 = c[1]
print(f1)
print(f2)
#endregion
#region Rational routines
def test_lcm_1(self):
a = np.lcm(12, 20)
print(a)
b = np.lcm.reduce([3, 12, 20])
print(b)
c = np.lcm.reduce([40, 12, 20])
print(c)
d = np.lcm(np.arange(6), [20])
print(d)
e = np.lcm([20, 21], np.arange(6).reshape(3, 2))
print(e)
#f = np.lcm(np.arange(8).reshape(2,4), np.arange(16).reshape(4, 4))
#print(f)
def test_gcd_1(self):
a = np.gcd(12, 20)
print(a)
b = np.gcd.reduce([3, 12, 20])
print(b)
c = np.gcd.reduce([40, 12, 20])
print(c)
d = np.gcd(np.arange(6), [20])
print(d)
e = np.gcd([20, 20], np.arange(6).reshape(3, 2))
print(e)
#f = np.lcm(np.arange(8).reshape(2,4), np.arange(16).reshape(4, 4))
#print(f)
#endregion
#region Arithmetic operations
def test_add_1(self):
a = np.add(1.0, 4.0)
print(a)
b = np.arange(9.0).reshape((3, 3))
c = np.arange(3.0)
d = np.add(b, c)
print(d)
def test_reciprocal_operations(self):
a = np.arange(1, 32, 1, dtype = np.float32)
print(a)
b = np.reciprocal(a)
print(b)
a = np.arange(2048, 2048+32, 1, dtype = np.float64)
print(a)
b = np.reciprocal(a)
print(b)
def test_positive_1(self):
d = np.positive([-1, -0, 1])
print(d)
e = np.positive([[1, 0, -1], [-2, 3, -4]])
print(e)
def test_negative_1(self):
d = np.negative([-1, -0, 1])
print(d)
e = np.negative([[1, 0, -1], [-2, 3, -4]])
print(e)
def test_multiply_1(self):
a = np.multiply(1.0, 4.0)
print(a)
b = np.arange(9.0).reshape((3, 3))
c = np.arange(3.0)
d = np.multiply(b, c)
print(d)
def test_divide(self):
a = np.divide(7,3)
print(a)
b = np.divide([1., 2., 3., 4.], 2.5)
print(b)
c = np.divide([1., 2., 3., 4.], [0.5, 2.5, 2.5, 3.5 ])
print(c)
return
def test_power_operations(self):
a = np.arange(0, 32, 1, dtype = np.int16)
print(a)
b = np.power(a, 3.23)
print(b)
a = np.arange(2048, 2048+32, 1, dtype = np.int64)
print(a)
b = np.power(a, 4)
print(b)
b = np.power(a, 0)
print(b)
b = np.power(a, 0.5)
print(b)
def test_subtract_1(self):
a = np.subtract(2.0, 4.0)
print(a)
b = np.arange(9.0).reshape((3, 3))
c = np.arange(3.0)
d = np.subtract(b, c)
print(d)
def test_true_divide(self):
a = np.true_divide(7,3)
print(a)
b = np.true_divide([1., 2., 3., 4.], 2.5)
print(b)
c = np.true_divide([1., 2., 3., 4.], [0.5, 2.5, 2.5, 3.5 ])
print(c)
return
def test_floor_divide(self):
a = np.floor_divide(7,3)
print(a)
b = np.floor_divide([1., 2., 3., 4.], 2.5)
print(b)
c = np.floor_divide([1., 2., 3., 4.], [0.5, 2.5, 2.5, 3.5 ])
print(c)
return
def test_float_power(self):
x1 = range(6)
a = np.float_power(x1, 3)
print(a)
x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
b = np.float_power(x1, x2)
print(b)
x3 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
c = np.float_power(x1, x3)
print(c)
return
def test_fmod_1(self):
x = np.fmod([4, 7], [2, 3])
print(x)
y = np.fmod(np.arange(7), 5)
print(y)
return
def test_fmod_2(self):
x = np.fmod([-4, -7], [2, 3])
print(x)
y = np.fmod(np.arange(7), -5)
print(y)
return
def test_mod_1(self):
x = np.mod([4, 7], [2, 3])
print(x)
y = np.mod(np.arange(7), 5)
print(y)
return
def test_modf_1(self):
x = np.modf([0, 3.5])
print(x)
y = np.modf(np.arange(7))
print(y)
return
def test_remainder_1(self):
x = np.remainder([4, 7], [2, 3])
print(x)
y = np.remainder(np.arange(7), 5)
print(y)
return
def test_remainder_2(self):
x =
|
np.remainder([-4, -7], [2, 3])
|
numpy.remainder
|
#!/usr/bin/env python3
import numpy
import re
import itertools
import json
from collections import OrderedDict
from os import listdir, getcwd, stat, path
from os.path import isfile, join
import copy
import warnings
from urllib.parse import urlparse
import gzip
import pickle
from functools import reduce # forward compatibility for Python 3
import operator
from argparse import ArgumentParser
from ast import literal_eval
from datetime import datetime
class MyEncoder(json.JSONEncoder):
"""Convert numpy arrays to list for JSON serializing."""
def default(self, obj):
"""Modify 'default' method from JSONEncoder."""
# Case where object to be serialized is numpy array
if isinstance(obj, numpy.ndarray):
return obj.tolist()
# All other cases
else:
return super(MyEncoder, self).default(obj)
class UsefulInputFiles(object):
"""Class of input file paths to be used by this routine.
Attributes:
msegs_in (tuple): Database of baseline microsegment stock/energy.
msegs_cpl_in (tuple): Database of baseline technology characteristics.
iecc_reg_map (tuple): Maps IECC climates to AIA or EMM regions/states.
ash_emm_map (tuple): Maps ASHRAE climates to EMM regions.
aia_altreg_map (tuple): Maps AIA climates to EMM regions or states.
metadata (str) = Baseline metadata inc. min/max for year range.
glob_vars (str) = Global settings from ecm_prep to use later in run
cost_convert_in (tuple): Database of measure cost unit conversions.
cbecs_sf_byvint (tuple): Commercial sq.ft. by vintage data.
indiv_ecms (tuple): Individual ECM JSON definitions folder.
ecm_packages (tuple): Measure package data.
ecm_prep (tuple): Prepared measure attributes data for use in the
analysis engine.
ecm_compete_data (tuple): Folder with contributing microsegment data
needed to run measure competition in the analysis engine.
ecm_eff_fs_splt_data (tuple): Folder with data needed to determine the
fuel splits of efficient case results for fuel switching measures.
run_setup (str): Names of active measures that should be run in
the analysis engine.
cpi_data (tuple): Historical Consumer Price Index data.
ss_data (tuple): Site-source, emissions, and price data, national.
ss_data_nonfs (tuple): Site-source, emissions, and price data,
national, to assign in certain cases to non-fuel switching
microsegments under high grid decarb case.
ss_data_altreg (tuple): Emissions/price data, EMM- or state-resolved.
ss_data_altreg_nonfs (tuple): Base-case emissions/price data, EMM– or
state-resolved, to assign in certain cases to non-fuel switching
microsegments under high grid decarb case.
tsv_load_data (tuple): Time sensitive energy demand data.
tsv_cost_data (tuple): Time sensitive electricity price data.
tsv_carbon_data (tuple): Time sensitive average CO2 emissions data.
tsv_cost_data_nonfs (tuple): Time sensitive electricity price data to
assign in certain cases to non-fuel switching microsegments under
high grid decarb case.
tsv_carbon_data_nonfs (tuple): Time sensitive average CO2 emissions
data to assign in certain cases to non-fuel switching microsegments
under high grid decarb case.
tsv_shape_data (tuple): Custom hourly savings shape data.
tsv_metrics_data_tot (tuple): Total system load data by EMM region.
tsv_metrics_data_net (tuple): Net system load shape data by EMM region.
health_data (tuple): EPA public health benefits data by EMM region.
hp_convert_rates (tuple): Fuel switching conversion rates.
"""
def __init__(self, capt_energy, regions, site_energy, grid_decarb):
if regions == 'AIA':
# UNCOMMENT WITH ISSUE 188
# self.msegs_in = ("supporting_data", "stock_energy_tech_data",
# "mseg_res_com_cz_2017.json")
self.msegs_in = ("supporting_data", "stock_energy_tech_data",
"mseg_res_com_cz.json")
# UNCOMMENT WITH ISSUE 188
# self.msegs_cpl_in = ("supporting_data", "stock_energy_tech_data",
# "cpl_res_com_cz_2017.json")
self.msegs_cpl_in = ("supporting_data", "stock_energy_tech_data",
"cpl_res_com_cz.json")
self.iecc_reg_map = ("supporting_data", "convert_data", "geo_map",
"IECC_AIA_ColSums.txt")
elif regions == 'EMM':
self.msegs_in = ("supporting_data", "stock_energy_tech_data",
"mseg_res_com_emm.json")
self.msegs_cpl_in = ("supporting_data", "stock_energy_tech_data",
"cpl_res_com_emm.gz")
self.ash_emm_map = ("supporting_data", "convert_data", "geo_map",
"ASH_EMM_ColSums.txt")
self.aia_altreg_map = ("supporting_data", "convert_data",
"geo_map", "AIA_EMM_ColSums.txt")
self.iecc_reg_map = ("supporting_data", "convert_data", "geo_map",
"IECC_EMM_ColSums.txt")
# Toggle EMM emissions and price data based on whether or not
# a high grid decarbonization scenario is used
if grid_decarb is not False:
self.ss_data_altreg = (
"supporting_data", "convert_data",
"emm_region_emissions_prices-decarb.json")
# Case where the user assesses emissions/cost reductions for
# non-fuel switching measures before grid decarbonization
if grid_decarb == "1":
self.ss_data_altreg_nonfs = (
"supporting_data", "convert_data",
"emm_region_emissions_prices-updated.json")
# Case where the user assesses emissions/cost reductions for
# non-fuel switching measures after grid decarbonization
else:
self.ss_data_altreg_nonfs = None
else:
self.ss_data_altreg = (
"supporting_data", "convert_data",
"emm_region_emissions_prices-updated.json")
self.ss_data_altreg_nonfs = None
elif regions == 'State':
self.msegs_in = ("supporting_data", "stock_energy_tech_data",
"mseg_res_com_state.gz")
self.msegs_cpl_in = ("supporting_data", "stock_energy_tech_data",
"cpl_res_com_cdiv.gz")
self.aia_altreg_map = ("supporting_data", "convert_data",
"geo_map", "AIA_State_ColSums.txt")
self.iecc_reg_map = ("supporting_data", "convert_data", "geo_map",
"IECC_State_ColSums.txt")
# Ensure that state-level regions are not being used alongside
# a high grid decarbonization scenario (incompatible currently)
if grid_decarb is not False:
raise ValueError("Unsupported regional breakout for "
"use with alternate grid decarbonization "
"scenario (" + regions + ")")
else:
self.ss_data_altreg = ("supporting_data", "convert_data",
"state_emissions_prices-updated.json")
self.ss_data_altreg_nonfs = None
else:
raise ValueError("Unsupported regional breakout (" + regions + ")")
self.metadata = "metadata.json"
self.glob_vars = "glob_run_vars.json"
# UNCOMMENT WITH ISSUE 188
# self.metadata = "metadata_2017.json"
self.cost_convert_in = ("supporting_data", "convert_data",
"ecm_cost_convert.json")
self.cbecs_sf_byvint = \
("supporting_data", "convert_data", "cbecs_sf_byvintage.json")
self.indiv_ecms = "ecm_definitions"
self.ecm_packages = ("ecm_definitions", "package_ecms.json")
self.ecm_prep = ("supporting_data", "ecm_prep.json")
self.ecm_compete_data = ("supporting_data", "ecm_competition_data")
self.ecm_eff_fs_splt_data = ("supporting_data", "eff_fs_splt_data")
self.run_setup = "run_setup.json"
self.cpi_data = ("supporting_data", "convert_data", "cpi.csv")
# Use the user-specified grid decarb flag to determine
# which site-source conversions file to select
if grid_decarb is not False:
self.ss_data = ("supporting_data", "convert_data",
"site_source_co2_conversions-decarb.json")
self.tsv_cost_data = ("supporting_data", "tsv_data",
"tsv_cost-decarb.json")
self.tsv_carbon_data = ("supporting_data", "tsv_data",
"tsv_carbon-decarb.json")
# Case where the user assesses emissions/cost reductions for
# non-fuel switching measures before grid decarbonization
if grid_decarb == "1":
self.ss_data_nonfs = ("supporting_data", "convert_data",
"site_source_co2_conversions.json")
self.tsv_cost_data_nonfs = (
"supporting_data", "tsv_data", "tsv_cost.json")
self.tsv_carbon_data_nonfs = (
"supporting_data", "tsv_data", "tsv_carbon.json")
# Case where the user assesses emissions/cost reductions for
# non-fuel switching measures after grid decarbonization
else:
self.ss_data_nonfs, self.tsv_cost_data_nonfs, \
self.tsv_carbon_data_nonfs = (None for n in range(3))
else:
# Use the user-specified captured energy method flag to determine
# which site-source conversions file to select
if capt_energy is True:
self.ss_data = ("supporting_data", "convert_data",
"site_source_co2_conversions-ce.json")
else:
self.ss_data = ("supporting_data", "convert_data",
"site_source_co2_conversions.json")
self.tsv_cost_data = (
"supporting_data", "tsv_data", "tsv_cost.json")
self.tsv_carbon_data = (
"supporting_data", "tsv_data", "tsv_carbon.json")
self.ss_data_nonfs, self.tsv_cost_data_nonfs, \
self.tsv_carbon_data_nonfs = (None for n in range(3))
self.tsv_load_data = (
"supporting_data", "tsv_data", "tsv_load.json")
self.tsv_shape_data = (
"ecm_definitions", "energyplus_data", "savings_shapes")
self.tsv_metrics_data_tot_ref = (
"supporting_data", "tsv_data", "tsv_hrs_tot_ref.csv")
self.tsv_metrics_data_net_ref = (
"supporting_data", "tsv_data", "tsv_hrs_net_ref.csv")
self.tsv_metrics_data_tot_hr = (
"supporting_data", "tsv_data", "tsv_hrs_tot_hr.csv")
self.tsv_metrics_data_net_hr = (
"supporting_data", "tsv_data", "tsv_hrs_net_hr.csv")
self.health_data = (
"supporting_data", "convert_data", "epa_costs.csv")
self.hp_convert_rates = ("supporting_data", "convert_data",
"hp_convert_rates.json")
class UsefulVars(object):
"""Class of variables that are used globally across functions.
Attributes:
adopt_schemes (list): Possible consumer adoption scenarios.
discount_rate (float): Rate to use in discounting costs/savings.
nsamples (int): Number of samples to draw from probability distribution
on measure inputs.
regions (string): User region settings.
aeo_years (list): Modeling time horizon.
aeo_years_summary (list): Reduced set of snapshot years in the horizon.
retro_rate (dict): Annual rate of deep retrofitting existing stock.
demand_tech (list): All demand-side heating/cooling technologies.
zero_cost_tech (list): All baseline technologies with cost of zero.
inverted_relperf_list (list) = Performance units that require
an inverted relative performance calculation (e.g., an air change
rate where lower numbers indicate higher performance).
valid_submkt_urls (list) = Valid URLs for sub-market scaling fractions.
consumer_price_ind (numpy.ndarray) = Historical Consumer Price Index.
ss_conv (dict): Site-source conversion factors by fuel type.
fuel_switch_conv (dict): Performance unit conversions for expected
fuel switching cases.
carb_int (dict): Carbon intensities by fuel type (MMTon/quad).
ecosts (dict): Energy costs by building and fuel type ($/MMBtu).
ccosts (dict): Carbon costs ($/MTon).
com_timeprefs (dict): Commercial adoption time preference premiums.
hp_rates (dict): Exogenous rates of conversions from baseline
equipment to heat pumps, if applicable.
in_all_map (dict): Maps any user-defined measure inputs marked 'all' to
list of climates, buildings, fuels, end uses, or technologies.
valid_mktnames (list): List of all valid applicable baseline market
input names for a measure.
out_break_czones (OrderedDict): Maps measure climate zone names to
the climate zone categories used in summarizing measure outputs.
out_break_bldgtypes (OrderedDict): Maps measure building type names to
the building sector categories used in summarizing measure outputs.
out_break_enduses (OrderedDict): Maps measure end use names to
the end use categories used in summarizing measure outputs.
out_break_fuels (OrderedDict): Maps measure fuel types to electric vs.
non-electric fuels (for heating, cooling, WH, and cooking).
out_break_in (OrderedDict): Breaks out key measure results by
climate zone, building sector, and end use.
cconv_topkeys_map (dict): Maps measure cost units to top-level keys in
an input cost conversion data dict.
cconv_whlbldgkeys_map (dict): Maps measure cost units to whole
building-level cost conversion dict keys.
tech_units_rmv (list): Flags baseline performance units that cannot
currently be handled, thus the associated segment must be removed.
tech_units_map (dict): Maps baseline performance units to measure units
in cases where the conversion is expected (e.g., EER to COP).
sf_to_house (dict): Stores information for mapping stock units in
sf to number of households, as applicable.
cconv_htclkeys_map (dict): Maps measure cost units to cost conversion
dict keys for the heating and cooling end uses.
cconv_tech_htclsupply_map (dict): Maps measure cost units to cost
conversion dict keys for supply-side heating/cooling technologies.
cconv_tech_mltstage_map (dict): Maps measure cost units to cost
conversion dict keys for demand-side heating/cooling
technologies and controls technologies requiring multiple
conversion steps (e.g., $/ft^2 glazing -> $/ft^2 wall ->
$/ft^2 floor; $/node -> $/ft^2 floor -> $/unit).
cconv_bybldg_units (list): Flags cost unit conversions that must
be re-initiated for each new microsegment building type.
cconv_bytech_units_res (list): Flags cost unit conversions that must
be re-initiated for each new microsegment technology type (
applies only to the residential sector, where conversions from
$/ft^2 floor to $/unit depend on number of units per household,
which varies according to technology type).
deflt_choice (list): Residential technology choice capital/operating
cost parameters to use when choice data are missing.
regions (str): Regions to use in geographically breaking out the data.
region_cpl_mapping (str or dict): Maps states to census divisions for
the case where states are used; otherwise empty string.
alt_perfcost_brk_map (dict): Mapping factors used to handle alternate
regional breakouts in measure performance or cost units.
months (str): Month sequence for accessing time-sensitive data.
tsv_feature_types (list): Possible types of TSV features.
tsv_climate_regions (list): Possible ASHRAE/IECC climate regions for
time-sensitive analysis and metrics.
tsv_nerc_regions (list): Possible NERC regions for time-sensitive data.
tsv_metrics_data (str): Includes information on max/min net system load
hours, peak/take net system load windows, and peak days by EMM
region/season, as well as days of year to attribute to each season.
tsv_hourly_price (dict): Dict for storing hourly price factors.
tsv_hourly_emissions (dict): Dict for storing hourly emissions factors.
tsv_hourly_lafs (dict): Dict for storing annual energy, cost, and
carbon adjustment factors by region, building type, and end use.
emm_name_num_map (dict): Maps EMM region names to EIA region numbers.
cz_emm_map (dict): Maps climate zones to EMM region net system load
shape data.
health_scn_names (list): List of public health data scenario names.
health_scn_data (numpy.ndarray): Public health cost data.
heat_ls_tech_scrn (tuple): Heat gains to screen out of time-
sensitive valuation for heating (no load shapes for these gains).
"""
def __init__(self, base_dir, handyfiles, regions, tsv_metrics,
health_costs, split_fuel, floor_start, exog_hp_rates,
adopt_scn_usr):
# Choose default adoption scenarios if user doesn't specify otherwise
if adopt_scn_usr is False:
self.adopt_schemes = [
'Technical potential', 'Max adoption potential']
# Otherwise set adoption scenario to user-specified choice
else:
self.adopt_schemes = adopt_scn_usr
self.discount_rate = 0.07
self.nsamples = 100
self.regions = regions
# Load metadata including AEO year range
with open(path.join(base_dir, handyfiles.metadata), 'r') as aeo_yrs:
try:
aeo_yrs = json.load(aeo_yrs)
except ValueError as e:
raise ValueError(
"Error reading in '" +
handyfiles.metadata + "': " + str(e)) from None
# # Set minimum AEO modeling year
# aeo_min = aeo_yrs["min year"]
# Set minimum year to current year
aeo_min = datetime.today().year
# Set maximum AEO modeling year
aeo_max = aeo_yrs["max year"]
# Derive time horizon from min/max years
self.aeo_years = [
str(i) for i in range(aeo_min, aeo_max + 1)]
self.aeo_years_summary = ["2030", "2050"]
self.retro_rate = {
yr: 0.001 + (0.009/4) * (int(yr) - 2021) if int(yr) < 2025 else (
0.01 + (0.01/9) * (int(yr) - 2025) if int(yr) < 2035 else
0.02) for yr in self.aeo_years}
self.demand_tech = [
'roof', 'ground', 'lighting gain', 'windows conduction',
'equipment gain', 'floor', 'infiltration', 'people gain',
'windows solar', 'ventilation', 'other heat gain', 'wall']
self.zero_cost_tech = ['infiltration']
self.inverted_relperf_list = ["ACH", "CFM/ft^2 @ 0.3 in. w.c.",
"kWh/yr", "kWh/day", "SHGC", "HP/CFM"]
self.valid_submkt_urls = [
'.eia.gov', '.doe.gov', '.energy.gov', '.data.gov',
'.energystar.gov', '.epa.gov', '.census.gov', '.pnnl.gov',
'.lbl.gov', '.nrel.gov', 'www.sciencedirect.com', 'www.costar.com',
'www.navigantresearch.com']
try:
self.consumer_price_ind = numpy.genfromtxt(
path.join(base_dir, *handyfiles.cpi_data),
names=True, delimiter=',',
dtype=[('DATE', 'U10'), ('VALUE', '<f8')])
except ValueError as e:
raise ValueError(
"Error reading in '" +
handyfiles.cpi_data + "': " + str(e)) from None
# Read in national-level site-source, emissions, and costs data
with open(path.join(base_dir, *handyfiles.ss_data), 'r') as ss:
try:
cost_ss_carb = json.load(ss)
except ValueError as e:
raise ValueError(
"Error reading in '" +
handyfiles.ss_data + "': " + str(e)) from None
# Set base-case emissions/cost data to use in assessing reductions for
# non-fuel switching microsegments under a high grid decarbonization
# case, if desired by the user
if handyfiles.ss_data_nonfs is not None:
# Read in national-level site-source, emissions, and costs data
with open(path.join(
base_dir, *handyfiles.ss_data_nonfs), 'r') as ss:
try:
cost_ss_carb_nonfs = json.load(ss)
except ValueError as e:
raise ValueError(
"Error reading in '" +
handyfiles.ss_data + "': " + str(e)) from None
else:
cost_ss_carb_nonfs = None
# Set national site to source conversion factors
self.ss_conv = {
"electricity": cost_ss_carb[
"electricity"]["site to source conversion"]["data"],
"natural gas": {yr: 1 for yr in self.aeo_years},
"distillate": {yr: 1 for yr in self.aeo_years},
"other fuel": {yr: 1 for yr in self.aeo_years}}
# Set electric emissions intensities and prices differently
# depending on whether EMM regions are specified (use EMM-specific
# data) or not (use national data)
if self.regions in ["EMM", "State"]:
# Read in EMM- or state-specific emissions factors and price data
with open(path.join(base_dir,
*handyfiles.ss_data_altreg), 'r') as ss:
try:
cost_ss_carb_altreg = json.load(ss)
except ValueError as e:
raise ValueError(
"Error reading in '" +
handyfiles.ss_data_altreg + "': " + str(e)) from None
# Set base-case emissions/cost data to use in assessing reductions
# for non-fuel switching microsegments under a high grid
# decarbonization case, if desired by the user
if handyfiles.ss_data_altreg_nonfs is not None:
# Read in EMM- or state-specific emissions factors and price
# data
with open(path.join(
base_dir,
*handyfiles.ss_data_altreg_nonfs), 'r') as ss:
try:
cost_ss_carb_altreg_nonfs = json.load(ss)
except ValueError:
raise ValueError(
"Error reading in '" +
path.join(base_dir,
*handyfiles.ss_data_altreg_nonfs) + "'")
else:
cost_ss_carb_altreg_nonfs = None
# Initialize CO2 intensities based on electricity intensities by
# EMM region or state; convert CO2 intensities from Mt/TWh site to
# MMTon/MMBTu site to match expected multiplication by site energy
self.carb_int = {bldg: {"electricity": {reg: {
yr: round((cost_ss_carb_altreg["CO2 intensity of electricity"][
"data"][reg][yr] / 3412141.6331), 10) for
yr in self.aeo_years} for reg in cost_ss_carb_altreg[
"CO2 intensity of electricity"]["data"].keys()}} for
bldg in ["residential", "commercial"]}
# Initialize energy costs based on electricity prices by EMM region
# or state; convert prices from $/kWh site to $/MMBTu site to match
# expected multiplication by site energy units
self.ecosts = {bldg: {"electricity": {reg: {
yr: round((cost_ss_carb_altreg["End-use electricity price"][
"data"][bldg][reg][yr] / 0.003412), 6) for
yr in self.aeo_years} for reg in cost_ss_carb_altreg[
"End-use electricity price"]["data"][bldg].keys()}} for
bldg in ["residential", "commercial"]}
# Finalize base-case emissions/cost data to use in assessing
# reductions for non-fuel switching microsegments under a high grid
# decarbonization case, if desired by the user
if cost_ss_carb_altreg_nonfs is not None:
self.carb_int_nonfs = {bldg: {"electricity": {reg: {
yr: round((cost_ss_carb_altreg_nonfs[
"CO2 intensity of electricity"][
"data"][reg][yr] / 3412141.6331), 10) for
yr in self.aeo_years} for reg in cost_ss_carb_altreg_nonfs[
"CO2 intensity of electricity"]["data"].keys()}} for
bldg in ["residential", "commercial"]}
self.ecosts_nonfs = {bldg: {"electricity": {reg: {
yr: round((cost_ss_carb_altreg_nonfs[
"End-use electricity price"][
"data"][bldg][reg][yr] / 0.003412), 6) for
yr in self.aeo_years} for reg in cost_ss_carb_altreg_nonfs[
"End-use electricity price"]["data"][bldg].keys()}} for
bldg in ["residential", "commercial"]}
else:
self.carb_int_nonfs, self.ecosts_nonfs = (
None for n in range(2))
else:
# Initialize CO2 intensities based on national CO2 intensities
# for electricity; convert CO2 intensities from Mt/quad source to
# Mt/MMBTu source to match expected multiplication by source energy
self.carb_int = {bldg: {"electricity": {yr: cost_ss_carb[
"electricity"]["CO2 intensity"]["data"][bldg][yr] /
1000000000 for yr in self.aeo_years}} for bldg in [
"residential", "commercial"]}
# Initialize energy costs based on national electricity prices; no
# conversion needed as the prices will be multiplied by MMBtu
# source energy units and are already in units of $/MMBtu source
self.ecosts = {bldg: {"electricity": {yr: cost_ss_carb[
"electricity"]["price"]["data"][bldg][yr] for
yr in self.aeo_years}} for bldg in [
"residential", "commercial"]}
# Finalize base-case emissions/cost data to use in assessing
# reductions for non-fuel switching microsegments under a high grid
# decarbonization case, if desired by the user
if cost_ss_carb_nonfs is not None:
self.carb_int_nonfs = {
bldg: {"electricity": {yr: cost_ss_carb_nonfs[
"electricity"]["CO2 intensity"]["data"][bldg][yr] /
1000000000 for yr in self.aeo_years}} for bldg in [
"residential", "commercial"]}
self.ecosts_nonfs = {
bldg: {"electricity": {yr: cost_ss_carb_nonfs[
"electricity"]["price"]["data"][bldg][yr] for
yr in self.aeo_years}} for bldg in [
"residential", "commercial"]}
else:
self.carb_int_nonfs, self.ecosts_nonfs = (
None for n in range(2))
# Pull non-electric CO2 intensities and energy prices and update
# the CO2 intensity and energy cost dicts initialized above
# accordingly; convert CO2 intensities from Mt/quad source to
# Mt/MMBTu source to match expected multiplication by source energy;
# price data are already in units of $/MMBtu source and do not require
# further conversion
carb_int_nonelec = {bldg: {fuel: {yr: (
cost_ss_carb[fuel_map]["CO2 intensity"]["data"][
bldg][yr] / 1000000000) for yr in self.aeo_years}
for fuel, fuel_map in zip(
["natural gas", "distillate", "other fuel"],
["natural gas", "distillate", "propane"])
} for bldg in ["residential", "commercial"]}
ecosts_nonelec = {bldg: {fuel: {yr: cost_ss_carb[
fuel_map]["price"]["data"][bldg][yr] for yr in
self.aeo_years} for fuel, fuel_map in zip([
"natural gas", "distillate", "other fuel"], [
"natural gas", "distillate", "propane"])} for bldg in [
"residential", "commercial"]}
for bldg in ["residential", "commercial"]:
self.carb_int[bldg].update(carb_int_nonelec[bldg])
self.ecosts[bldg].update(ecosts_nonelec[bldg])
# Update base-case emissions/cost data to use in
# assessing reductions for non-fuel switching microsegments
# under a high grid decarbonization case to reflect non-electric
# emissions intensities/energy costs
if self.carb_int_nonfs is not None:
self.carb_int_nonfs[bldg].update(carb_int_nonelec[bldg])
if self.ecosts_nonfs is not None:
self.ecosts_nonfs[bldg].update(ecosts_nonelec[bldg])
# Set carbon costs
ccosts_init = cost_ss_carb["CO2 price"]["data"]
# Multiply carbon costs by 1000000 to reflect
# conversion from import units of $/MTon to $/MMTon
self.ccosts = {
yr_key: (ccosts_init[yr_key] * 1000000) for
yr_key in self.aeo_years}
self.com_timeprefs = {
"rates": [10.0, 1.0, 0.45, 0.25, 0.15, 0.065, 0.0],
"distributions": {
"heating": {
key: [0.265, 0.226, 0.196, 0.192, 0.105, 0.013, 0.003]
for key in self.aeo_years},
"cooling": {
key: [0.264, 0.225, 0.193, 0.192, 0.106, 0.016, 0.004]
for key in self.aeo_years},
"water heating": {
key: [0.263, 0.249, 0.212, 0.169, 0.097, 0.006, 0.004]
for key in self.aeo_years},
"ventilation": {
key: [0.265, 0.226, 0.196, 0.192, 0.105, 0.013, 0.003]
for key in self.aeo_years},
"cooking": {
key: [0.261, 0.248, 0.214, 0.171, 0.097, 0.005, 0.004]
for key in self.aeo_years},
"lighting": {
key: [0.264, 0.225, 0.193, 0.193, 0.085, 0.013, 0.027]
for key in self.aeo_years},
"refrigeration": {
key: [0.262, 0.248, 0.213, 0.170, 0.097, 0.006, 0.004]
for key in self.aeo_years}}}
# Load external data on conversion rates for HP measures
if exog_hp_rates in ['1', '2']:
with open(path.join(
base_dir, *handyfiles.hp_convert_rates), 'r') as fs_r:
try:
self.hp_rates = json.load(fs_r)
except ValueError:
print("Error reading in '" +
handyfiles.hp_convert_rates + "'")
else:
self.hp_rates = None
# Set valid region names and regional output categories
if regions == "AIA":
valid_regions = [
"AIA_CZ1", "AIA_CZ2", "AIA_CZ3", "AIA_CZ4", "AIA_CZ5"]
regions_out = [
('AIA CZ1', 'AIA_CZ1'), ('AIA CZ2', 'AIA_CZ2'),
('AIA CZ3', 'AIA_CZ3'), ('AIA CZ4', 'AIA_CZ4'),
('AIA CZ5', 'AIA_CZ5')]
self.region_cpl_mapping = ''
# Read in mapping for alternate performance/cost unit breakouts
# IECC -> AIA mapping
try:
iecc_reg_map = numpy.genfromtxt(
path.join(base_dir, *handyfiles.iecc_reg_map),
names=True, delimiter='\t', dtype=(
['<U25'] * 1 + ['<f8'] * len(valid_regions)))
except ValueError as e:
raise ValueError(
"Error reading in '" +
handyfiles.iecc_reg_map + "': " + str(e)) from None
# Store alternate breakout mapping in dict for later use
self.alt_perfcost_brk_map = {
"IECC": iecc_reg_map, "levels": str([
"IECC_CZ" + str(n + 1) for n in range(8)])}
# HP conversion rates unsupported for AIA regional breakouts
self.hp_rates_reg_map = None
elif regions in ["EMM", "State"]:
if regions == "EMM":
valid_regions = [
'TRE', 'FRCC', 'MISW', 'MISC', 'MISE', 'MISS',
'ISNE', 'NYCW', 'NYUP', 'PJME', 'PJMW', 'PJMC',
'PJMD', 'SRCA', 'SRSE', 'SRCE', 'SPPS', 'SPPC',
'SPPN', 'SRSG', 'CANO', 'CASO', 'NWPP', 'RMRG', 'BASN']
self.region_cpl_mapping = ''
try:
self.ash_emm_map = numpy.genfromtxt(
path.join(base_dir, *handyfiles.ash_emm_map),
names=True, delimiter='\t', dtype=(
['<U25'] * 1 + ['<f8'] * len(valid_regions)))
except ValueError as e:
raise ValueError(
"Error reading in '" +
handyfiles.ash_emm_map + "': " + str(e)) from None
# If applicable, pull regional mapping needed to read in
# HP conversion rate data for certain measures/microsegments
if self.hp_rates:
self.hp_rates_reg_map = {
"midwest": [
"SPPN", "MISW", "SPPC", "MISC",
"PJMW", "PJMC", "MISE"],
"northeast": [
"PJME", "NYCW", "NYUP", "ISNE"],
"south": [
"SPPS", "TRE", "MISS", "SRCE", "PJMD",
"SRCA", "SRSE", "FRCC"],
"west": [
"NWPP", "BASN", "RMRG", "SRSG", "CASO", "CANO"]
}
else:
self.hp_rates_reg_map = None
else:
# Note: for now, exclude AK and HI
valid_regions = [
'AL', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'DC', 'FL',
'GA', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME',
'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH',
'NJ', 'NM', 'NY', 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI',
'SC', 'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WA', 'WV', 'WI',
'WY']
self.region_cpl_mapping = {
"new england": ['CT', 'MA', 'ME', 'NH', 'RI', 'VT'],
"mid atlantic": ['NJ', 'NY', 'PA'],
"east north central": ['IL', 'IN', 'MI', 'OH', 'WI'],
"west north central": [
'IA', 'KS', 'MN', 'MO', 'ND', 'NE', 'SD'],
"south atlantic": [
'DC', 'DE', 'FL', 'GA', 'MD', 'NC', 'SC', 'VA', 'WV'],
"east south central": ['AL', 'KY', 'MS', 'TN'],
"west south central": ['AR', 'LA', 'OK', 'TX'],
"mountain": [
'AZ', 'CO', 'ID', 'MT', 'NM', 'NV', 'UT', 'WY'],
"pacific": ['AK', 'CA', 'HI', 'OR', 'WA']}
# If applicable, pull regional mapping needed to read in
# HP conversion rate data for certain measures/microsegments
if self.hp_rates:
self.hp_rates_reg_map = {
"midwest": [
"ND", "SD", "NE", "KS", "MO", "IA", "MN", "WI",
"IL", "IN", "MI", "OH"],
"northeast": [
"PA", "NY", "NJ", "CT", "RI", "MA", "VT", "NH",
"ME"],
"south": [
"TX", "OK", "AR", "LA", "MS", "AL", "GA", "FL",
"SC", "NC", "TN", "KY", "WV", "VA", "DC", "MD",
"DE"],
"west": [
"WA", "OR", "ID", "MT", "WY", "CA", "NV", "UT",
"AZ", "NM", "CO", "AK", "HI"]
}
else:
self.hp_rates_reg_map = None
regions_out = [(x, x) for x in valid_regions]
# Read in mapping for alternate performance/cost unit breakouts
# AIA -> EMM or State mapping
try:
# Hard code number of valid states at 51 (includes DC) to avoid
# potential issues later when indexing numpy columns by state
if regions == "State":
len_reg = 51
else:
len_reg = len(valid_regions)
# Read in the data
aia_altreg_map = numpy.genfromtxt(
path.join(base_dir, *handyfiles.aia_altreg_map),
names=True, delimiter='\t', dtype=(
['<U25'] * 1 + ['<f8'] * len_reg))
except ValueError as e:
raise ValueError(
"Error reading in '" +
handyfiles.aia_altreg_map + "': " + str(e)) from None
# IECC -> EMM or State mapping
try:
iecc_altreg_map = numpy.genfromtxt(
path.join(base_dir, *handyfiles.iecc_reg_map),
names=True, delimiter='\t', dtype=(
['<U25'] * 1 + ['<f8'] * len(valid_regions)))
except ValueError as e:
raise ValueError(
"Error reading in '" +
handyfiles.iecc_reg_map + "': " + str(e)) from None
# Store alternate breakout mapping in dict for later use
self.alt_perfcost_brk_map = {
"IECC": iecc_altreg_map, "AIA": aia_altreg_map,
"levels": str([
"IECC_CZ" + str(n + 1) for n in range(8)]) + " 0R " + str([
"AIA_CZ" + str(n + 1) for n in range(5)])}
self.months = ["january", "february", "march", "april", "may", "june",
"july", "august", "september", "october", "november",
"december"]
self.in_all_map = {
"climate_zone": valid_regions,
"bldg_type": {
"residential": [
"single family home", "multi family home", "mobile home"],
"commercial": [
"assembly", "education", "food sales", "food service",
"health care", "lodging", "large office", "small office",
"mercantile/service", "warehouse", "other"]},
"structure_type": ["new", "existing"],
"fuel_type": {
"residential": [
"electricity", "natural gas", "distillate", "other fuel"],
"commercial": [
"electricity", "natural gas", "distillate"]},
"end_use": {
"residential": {
"electricity": [
'drying', 'other', 'water heating',
'cooling', 'cooking', 'computers', 'lighting',
'secondary heating', 'TVs', 'heating', 'refrigeration',
'fans and pumps', 'ceiling fan'],
"natural gas": [
'drying', 'water heating', 'cooling', 'heating',
'cooking', 'secondary heating', 'other'],
"distillate": [
'water heating', 'heating', 'secondary heating',
'other'],
"other fuel": [
'water heating', 'cooking', 'heating',
'secondary heating', 'other']},
"commercial": {
"electricity": [
'ventilation', 'water heating', 'cooling',
'heating', 'refrigeration', 'MELs',
'non-PC office equipment', 'PCs', 'lighting',
'cooking'],
"natural gas": [
'cooling', 'water heating', 'cooking', 'heating'],
"distillate": ['water heating', 'heating']}},
"technology": {
"residential": {
"supply": {
"electricity": {
'other': [
'dishwasher', 'clothes washing', 'freezers',
'rechargeables', 'coffee maker',
'dehumidifier', 'electric other',
'microwave', 'pool heaters and pumps',
'security system', 'portable electric spas',
'wine coolers'],
'water heating': ['solar WH', 'electric WH'],
'cooling': [
'room AC', 'ASHP', 'GSHP', 'central AC'],
'computers': [
'desktop PC', 'laptop PC', 'network equipment',
'monitors'],
'lighting': [
'linear fluorescent (T-8)',
'linear fluorescent (T-12)',
'reflector (LED)', 'general service (CFL)',
'external (high pressure sodium)',
'general service (incandescent)',
'external (CFL)',
'external (LED)', 'reflector (CFL)',
'reflector (incandescent)',
'general service (LED)',
'external (incandescent)',
'linear fluorescent (LED)',
'reflector (halogen)'],
'secondary heating': ['secondary heater'],
'TVs': [
'home theater and audio', 'set top box',
'video game consoles', 'DVD', 'TV'],
'heating': ['GSHP', 'resistance heat', 'ASHP'],
'ceiling fan': [None],
'fans and pumps': [None],
'refrigeration': [None],
'drying': [None],
'cooking': [None]},
"natural gas": {
'cooling': ['NGHP'],
'heating': ['furnace (NG)', 'NGHP', 'boiler (NG)'],
'secondary heating': ['secondary heater'],
'drying': [None],
'water heating': [None],
'cooking': [None],
'other': ["other appliances"]},
"distillate": {
'heating': [
'boiler (distillate)', 'furnace (distillate)'],
'secondary heating': ['secondary heater'],
'water heating': [None],
'other': ["other appliances"]},
"other fuel": {
'heating': [
'furnace (kerosene)',
'stove (wood)', 'furnace (LPG)'],
'secondary heating': [
'secondary heater (wood)',
'secondary heater (coal)',
'secondary heater (kerosene)',
'secondary heater (LPG)'],
'cooking': [None],
'water heating': [None],
'other': ["other appliances"]}},
"demand": [
'roof', 'ground', 'windows solar',
'windows conduction', 'equipment gain',
'people gain', 'wall', 'infiltration']},
"commercial": {
"supply": {
"electricity": {
'ventilation': ['VAV_Vent', 'CAV_Vent'],
'water heating': [
'Solar water heater', 'HP water heater',
'elec_booster_water_heater',
'elec_water_heater'],
'cooling': [
'rooftop_AC', 'scroll_chiller',
'res_type_central_AC', 'reciprocating_chiller',
'comm_GSHP-cool', 'centrifugal_chiller',
'rooftop_ASHP-cool', 'wall-window_room_AC',
'screw_chiller'],
'heating': [
'electric_res-heat', 'comm_GSHP-heat',
'rooftop_ASHP-heat', 'elec_boiler'],
'refrigeration': [
'Commercial Beverage Merchandisers',
'Commercial Compressor Rack Systems',
'Commercial Condensers',
'Commercial Ice Machines',
'Commercial Reach-In Freezers',
'Commercial Reach-In Refrigerators',
'Commercial Refrigerated Vending Machines',
'Commercial Supermarket Display Cases',
'Commercial Walk-In Freezers',
'Commercial Walk-In Refrigerators'],
'MELs': [
'elevators', 'escalators', 'coffee brewers',
'kitchen ventilation', 'laundry',
'lab fridges and freezers', 'fume hoods',
'medical imaging', 'large video boards',
'shredders', 'private branch exchanges',
'voice-over-IP telecom', 'IT equipment',
'office UPS', 'data center UPS',
'security systems',
'distribution transformers',
'non-road electric vehicles'
],
'lighting': [
'100W A19 Incandescent',
'100W Equivalent A19 Halogen',
'100W Equivalent CFL Bare Spiral',
'100W Equivalent LED A Lamp',
'Halogen Infrared Reflector (HIR) PAR38',
'Halogen PAR38',
'LED Integrated Luminaire',
'LED PAR38',
'Mercury Vapor',
'Metal Halide',
'Sodium Vapor',
'T5 4xF54 HO High Bay',
'T5 F28',
'T8 F28',
'T8 F32',
'T8 F59',
'T8 F96'
],
'cooking': [
'electric_range_oven_24x24_griddle'],
'PCs': [None],
'non-PC office equipment': [None]},
"natural gas": {
'cooling': [
'gas_eng-driven_RTAC', 'gas_chiller',
'res_type_gasHP-cool',
'gas_eng-driven_RTHP-cool'],
'water heating': [
'gas_water_heater', 'gas_instantaneous_WH',
'gas_booster_WH'],
'cooking': [
'gas_range_oven_24x24_griddle'],
'heating': [
'gas_eng-driven_RTHP-heat',
'res_type_gasHP-heat', 'gas_boiler',
'gas_furnace']},
"distillate": {
'water heating': ['oil_water_heater'],
'heating': ['oil_boiler', 'oil_furnace']}},
"demand": [
'roof', 'ground', 'lighting gain',
'windows conduction', 'equipment gain',
'floor', 'infiltration', 'people gain',
'windows solar', 'ventilation',
'other heat gain', 'wall']}}}
# Find the full set of valid names for describing a measure's
# applicable baseline that do not begin with 'all'
mktnames_non_all = self.append_keyvals(
self.in_all_map, keyval_list=[]) + ['supply', 'demand']
# Find the full set of valid names for describing a measure's
# applicable baseline that do begin with 'all'
mktnames_all_init = ["all", "all residential", "all commercial"] + \
self.append_keyvals(self.in_all_map["end_use"], keyval_list=[])
mktnames_all = ['all ' + x if 'all' not in x else x for
x in mktnames_all_init]
self.valid_mktnames = mktnames_non_all + mktnames_all
self.out_break_czones = OrderedDict(regions_out)
self.out_break_bldgtypes = OrderedDict([
('Residential (New)', [
'new', 'single family home', 'multi family home',
'mobile home']),
('Residential (Existing)', [
'existing', 'single family home', 'multi family home',
'mobile home'],),
('Commercial (New)', [
'new', 'assembly', 'education', 'food sales',
'food service', 'health care', 'mercantile/service',
'lodging', 'large office', 'small office', 'warehouse',
'other']),
('Commercial (Existing)', [
'existing', 'assembly', 'education', 'food sales',
'food service', 'health care', 'mercantile/service',
'lodging', 'large office', 'small office', 'warehouse',
'other'])])
self.out_break_enduses = OrderedDict([
('Heating (Equip.)', ["heating", "secondary heating"]),
('Cooling (Equip.)', ["cooling"]),
('Heating (Env.)', ["heating", "secondary heating"]),
('Cooling (Env.)', ["cooling"]),
('Ventilation', ["ventilation"]),
('Lighting', ["lighting"]),
('Water Heating', ["water heating"]),
('Refrigeration', ["refrigeration", "other"]),
('Cooking', ["cooking"]),
('Computers and Electronics', [
"PCs", "non-PC office equipment", "TVs", "computers"]),
('Other', [
"drying", "ceiling fan", "fans and pumps",
"MELs", "other"])])
# Configure output breakouts for fuel type if user has set this option
if split_fuel is True:
self.out_break_fuels = OrderedDict([
('Electric', ["electricity"]),
('Non-Electric', ["natural gas", "distillate", "other fuel"])])
else:
self.out_break_fuels = {}
# Use the above output categories to establish a dictionary with blank
# values at terminal leaf nodes; this dict will eventually store
# partitioning fractions needed to breakout the measure results
# Determine all possible outcome category combinations
out_levels = [
self.out_break_czones.keys(), self.out_break_bldgtypes.keys(),
self.out_break_enduses.keys()]
out_levels_keys = list(itertools.product(*out_levels))
# Create dictionary using outcome category combinations as key chains
self.out_break_in = OrderedDict()
for kc in out_levels_keys:
current_level = self.out_break_in
for ind, elem in enumerate(kc):
# If fuel splits are desired and applicable for the current
# end use breakout, add the fuel splits to the dict vals
if len(self.out_break_fuels.keys()) != 0 and (elem in [
"Heating (Equip.)", "Cooling (Equip.)", "Heating (Env.)",
"Cooling (Env.)", "Water Heating", "Cooking"]) and \
elem not in current_level:
current_level[elem] = OrderedDict(
[(x, OrderedDict()) for x in
self.out_break_fuels.keys()])
# Otherwise, set dict vals to another empty dict
elif elem not in current_level:
current_level[elem] = OrderedDict()
current_level = current_level[elem]
self.cconv_bybldg_units = [
"$/ft^2 glazing", "$/ft^2 roof", "$/ft^2 wall",
"$/ft^2 footprint", "$/ft^2 floor", "$/occupant", "$/node"]
self.cconv_bytech_units_res = ["$/ft^2 floor", "$/occupant", "$/node"]
self.cconv_topkeys_map = {
"whole building": ["$/ft^2 floor", "$/node", "$/occupant"],
"heating and cooling": [
"$/kBtu/h heating", "$/kBtu/h cooling", "$/ft^2 glazing",
"$/ft^2 roof", "$/ft^2 wall", "$/ft^2 footprint"],
"ventilation": ["$/1000 CFM"],
"lighting": ["$/1000 lm"],
"water heating": ["$/kBtu/h water heating"],
"refrigeration": ["$/kBtu/h refrigeration"],
"cooking": ["$/kBtu/h cooking"],
"PCs": ["$/computer"]}
self.cconv_htclkeys_map = {
"supply": [
"$/kBtu/h heating", "$/kBtu/h cooling"],
"demand": [
"$/ft^2 glazing", "$/ft^2 roof",
"$/ft^2 wall", "$/ft^2 footprint"]}
self.cconv_tech_htclsupply_map = {
"heating equipment": ["$/kBtu/h heating"],
"cooling equipment": ["$/kBtu/h cooling"]}
self.cconv_tech_mltstage_map = {
"windows": {
"key": ["$/ft^2 glazing"],
"conversion stages": ["windows", "walls"]},
"roof": {
"key": ["$/ft^2 roof"],
"conversion stages": ["roof", "footprint"]},
"walls": {
"key": ["$/ft^2 wall"],
"conversion stages": ["walls"]},
"footprint": {
"key": ["$/ft^2 footprint"],
"conversion stages": ["footprint"]}}
self.cconv_whlbldgkeys_map = {
"wireless sensor network": ["$/node"],
"occupant-centered sensing and controls": ["$/occupant"]}
self.tech_units_rmv = ["HHV"]
# Note: EF handling for ECMs written before scout v0.5 (AEO 2019)
self.tech_units_map = {
"COP": {"AFUE": 1, "EER": 0.2930712},
"AFUE": {"COP": 1}, "UEF": {"SEF": 1},
"EF": {"UEF": 1, "SEF": 1, "CEF": 1},
"SEF": {"UEF": 1}}
self.sf_to_house = {}
# Assume that missing technology choice parameters come from the
# appliances/MELs areas; default is thus the EIA choice parameters
# for refrigerator technologies
self.deflt_choice = [-0.01, -0.12]
# Set valid types of TSV feature types
self.tsv_feature_types = ["shed", "shift", "shape"]
# Use EMM region setting as a proxy for desired time-sensitive
# valuation (TSV) and associated need to initialize handy TSV variables
if regions == "EMM":
self.tsv_climate_regions = [
"2A", "2B", "3A", "3B", "3C", "4A", "4B",
"4C", "5A", "5B", "5C", "6A", "6B", "7"]
self.tsv_nerc_regions = [
"FRCC", "MRO", "NPCC", "RFC", "SERC", "SPP", "TRE", "WECC"]
# Set a dict that maps each ASH climate zone to an EMM region
# in the climate zone with the most representative set of
# min/max system load hour and peak/take system load hour
# windows to use for that climate zone. For most climates, two
# of these representative regions is assumed to account for
# varying types of renewable mixes (e.g., high solar vs. low
# solar, which yield differences in net load shapes and net
# peak/take periods). In these cases, the terminal value is
# formatted as a list with the EMM region number with the
# representative load hour data stored in the first element,
# and all other EMM regions in the climate that are covered
# by that representative load hour data stored in the second
# element. NOTE: the selection of representative EMM regions
# for each ASH region is based on the plots found here:
# https://drive.google.com/drive/folders/
# 1JSoQb78LgooUD_uXqBOzAC7Nl7eLJZnc?usp=sharing
self.cz_emm_map = {
"2A": {
"set 1": [2, (1, 2, 17)],
"set 2": [6, (6, 15)]},
"2B": {
"set 1": [20, (1, 20)]},
"3A": {
"set 1": [15, (6, 13, 14, 15, 16)],
"set 2": [1, (1, 17)]},
"3B": {
"set 1": [22, (21, 22)],
"set 2": [25, (1, 17, 20, 25)]},
"3C": {
"set 1": [21, (21, 22)]},
"4A": {
"set 1": [10, (4, 8, 10, 11, 17, 18)],
"set 2": [16, (6, 13, 14, 15, 16)]},
"4B": {
"set 1": [20, (1, 17, 20, 24)],
"set 2": [21, (21, 22)]},
"4C": {
"set 1": [23, (23,)],
"set 2": [21, (21,)]},
"5A": {
"set 1": [11, (3, 4, 7, 9, 10, 11, 18, 19, 24)],
"set 2": [5, (5, 12, 14)]},
"5B": {
"set 1": [24, (20, 23, 24, 25)],
"set 2": [21, (21,)]},
"5C": {
"set 1": [23, (23,)]},
"6A": {
"set 1": [3, (3, 5, 19)],
"set 2": [7, (7, 9, 10, 24)]},
"6B": {
"set 1": [23, (3, 19, 23, 24, 25)],
"set 2": [22, (21, 22)]},
"7": {
"set 1": [3, (3, 19)],
"set 2": [24, (7, 24, 25)]}}
if tsv_metrics is not False:
# Develop weekend day flags
wknd_day_flags = [0 for n in range(365)]
current_wkdy = 1
for d in range(365):
# Flag weekend day
if current_wkdy in [1, 7]:
wknd_day_flags[d] = 1
# Advance day of week by one unless Saturday (7), in which
# case day switches back to 1 (Sunday)
if current_wkdy <= 6:
current_wkdy += 1
else:
current_wkdy = 1
# Develop lists with seasonal day of year ranges, both with and
# without weekends
# Summer days of year
sum_days = list(range(152, 274))
sum_days_wkdy = [
x for x in sum_days if wknd_day_flags[(x - 1)] != 1]
sum_days_wknd = [
x for x in sum_days if wknd_day_flags[(x - 1)] == 1]
# Winter days of year
wint_days = (list(
range(1, 91)) + list(range(335, 366)))
wint_days_wkdy = [
x for x in wint_days if wknd_day_flags[(x - 1)] != 1]
wint_days_wknd = [
x for x in wint_days if wknd_day_flags[(x - 1)] == 1]
# Intermediate days of year
inter_days = (list(
range(91, 152)) + list(range(274, 335)))
inter_days_wkdy = [
x for x in inter_days if wknd_day_flags[(x - 1)] != 1]
inter_days_wknd = [
x for x in inter_days if wknd_day_flags[(x - 1)] == 1]
# Set column names for a dataset that includes information on
# max/min net system load hours and peak/take net system load
# hour windows by season and EMM region
peak_take_names = (
"Region", "Year", "SummerMaxHr", "SummerMinHr",
"SummerPeakStartHr", "SummerPeakEndHr",
"SummerTakeStartHr1", "SummerTakeEndHr1",
"SummerTakeStartHr2", "SummerTakeEndHr2",
"SummerTakeStartHr3", "SummerTakeEndHr3",
"WinterMaxHr", "WinterMinHr",
"WinterPeakStartHr", "WinterPeakEndHr",
"WinterTakeStartHr1", "WinterTakeEndHr1",
"WinterTakeStartHr2", "WinterTakeEndHr2",
"WinterTakeStartHr3", "WinterTakeEndHr3",
"InterMaxHr", "InterMinHr",
"InterPeakStartHr", "InterPeakEndHr",
"InterTakeStartHr1", "InterTakeEndHr1",
"InterTakeStartHr2", "InterTakeEndHr2",
"InterTakeStartHr3", "InterTakeEndHr3")
# Choose the appropriate data to use in determining peak/take
# windows (total vs. net system load under reference vs. "Low
# Renewable Cost" supply-side AEO case)
if tsv_metrics[-2] == "1":
metrics_data = handyfiles.tsv_metrics_data_tot_ref
elif tsv_metrics[-2] == "2":
metrics_data = handyfiles.tsv_metrics_data_tot_hr
elif tsv_metrics[-2] == "3":
metrics_data = handyfiles.tsv_metrics_data_net_ref
else:
metrics_data = handyfiles.tsv_metrics_data_net_hr
# Import system max/min and peak/take hour load by EMM region
sysload_dat = numpy.genfromtxt(
path.join(base_dir, *metrics_data),
names=peak_take_names, delimiter=',', dtype="<i4",
encoding="latin1", skip_header=1)
# Find unique set of projection years in system peak/take data
sysload_dat_yrs = numpy.unique(sysload_dat["Year"])
# Set a dict that maps EMM region names to region
# numbers as defined by EIA
# (https://www.eia.gov/outlooks/aeo/pdf/f2.pdf)
self.emm_name_num_map = {
name: (ind + 1) for ind, name in enumerate(valid_regions)}
# Initialize a set of dicts that will store representative
# system load data for the summer, winter, and intermediate
# seasons by projection year
sysld_sum, sysld_wint, sysld_int = ({
str(yr): {key: {key_sub: None for
key_sub in self.cz_emm_map[key].keys()} if
type(self.cz_emm_map[key]) is dict else None
for key in self.cz_emm_map.keys()} for yr in
sysload_dat_yrs} for n in range(3))
# Fill in the dicts with seasonal system load data by year
# Loop through all projection years available in the system
# peak/take period data
for sys_yr in sysload_dat_yrs:
# Convert projection year to string for dict keys
sys_yr_str = str(sys_yr)
sysload_dat_yr = sysload_dat[
numpy.where((sysload_dat["Year"] == sys_yr))]
# Loop through all climate zones
for cz in self.cz_emm_map.keys():
# Handle climate zones with one representative system
# load shape differently than those with more than one
# such shape
if type(self.cz_emm_map[cz]) == int:
# Fill in seasonal system load data
sysld_sum[sys_yr_str][cz], \
sysld_wint[sys_yr_str][cz], \
sysld_int[sys_yr_str][cz] = self.set_peak_take(
sysload_dat_yr, self.cz_emm_map[cz])
else:
# Loop through the multiple EMM regions with
# representative system load data for the current
# climate zone
for set_v in self.cz_emm_map[cz].keys():
# Fill in seasonal system load data
sysld_sum[sys_yr_str][cz][set_v], \
sysld_wint[sys_yr_str][cz][set_v], \
sysld_int[sys_yr_str][cz][set_v] = \
self.set_peak_take(
sysload_dat_yr,
self.cz_emm_map[cz][set_v][0])
self.tsv_metrics_data = {
"season days": {
"all": {
"summer": sum_days,
"winter": wint_days,
"intermediate": inter_days
},
"weekdays": {
"summer": sum_days_wkdy,
"winter": wint_days_wkdy,
"intermediate": inter_days_wkdy
},
"weekends": {
"summer": sum_days_wknd,
"winter": wint_days_wknd,
"intermediate": inter_days_wknd
}
},
"system load hours": {
"summer": sysld_sum,
"winter": sysld_wint,
"intermediate": sysld_int
},
"peak days": {
"summer": {
"2A": 199,
"2B": 186,
"3A": 192,
"3B": 171,
"3C": 220,
"4A": 192,
"4B": 206,
"4C": 241,
"5A": 199,
"5B": 178,
"5C": 206,
"6A": 186,
"6B": 220,
"7": 206},
"winter": {
"2A": 24,
"2B": 17,
"3A": 31,
"3B": 10,
"3C": 10,
"4A": 31,
"4B": 339,
"4C": 38,
"5A": 26,
"5B": 10,
"5C": 12,
"6A": 10,
"6B": 17,
"7": 31}
},
"hourly index": list(enumerate(
itertools.product(range(365), range(24))))
}
else:
self.tsv_metrics_data = None
self.emm_name_num_map = {
name: (ind + 1) for ind, name in enumerate(valid_regions)}
self.tsv_hourly_price, self.tsv_hourly_emissions = ({
reg: None for reg in valid_regions
} for n in range(2))
self.tsv_hourly_lafs = {
reg: {
"residential": {
bldg: {
eu: None for eu in self.in_all_map[
"end_use"]["residential"]["electricity"]
} for bldg in self.in_all_map[
"bldg_type"]["residential"]
},
"commercial": {
bldg: {
eu: None for eu in self.in_all_map[
"end_use"]["commercial"]["electricity"]
} for bldg in self.in_all_map[
"bldg_type"]["commercial"]
}
} for reg in valid_regions
}
else:
self.tsv_hourly_lafs = None
# Condition health data scenario initialization on whether user
# has requested that public health costs be accounted for
if health_costs is True:
# For each health data scenario, set the intended measure name
# appendage (tuple element 1), type of efficiency to attach health
# benefits to (element 2), and column in the data file from which
# to retrieve these benefit values (element 3)
self.health_scn_names = [
("PHC-EE (low)", "Uniform EE", "2017cents_kWh_7pct_low"),
("PHC-EE (high)", "Uniform EE", "2017cents_kWh_3pct_high")]
# Set data file with public health benefits information
self.health_scn_data = numpy.genfromtxt(
path.join(base_dir, *handyfiles.health_data),
names=("AVERT_Region", "EMM_Region", "Category",
"2017cents_kWh_3pct_low", "2017cents_kWh_3pct_high",
"2017cents_kWh_7pct_low",
"2017cents_kWh_7pct_high"),
delimiter=',', dtype=(['<U25'] * 3 + ['<f8'] * 4))
self.heat_ls_tech_scrn = (
"windows solar", "equipment gain", "people gain",
"other heat gain")
def set_peak_take(self, sysload_dat, restrict_key):
"""Fill in dicts with seasonal system load shape data.
Args:
sysload_dat (numpy.ndarray): System load shape data.
restrict_key (int): EMM region to restrict net load data to.
Returns:
Appropriate min/max net system load hour and peak/take net
system load hour window data for the EMM region of interest,
stored in dicts that are distinguished by season.
"""
# Restrict net system load data to the representative EMM region for
# the current climate zone
peak_take_cz = sysload_dat[numpy.where(
(sysload_dat["Region"] == restrict_key))]
# Set summer max load hour, min load hour, and peak/take windows
sum_peak_take = {
"max": peak_take_cz["SummerMaxHr"][0],
"min": peak_take_cz["SummerMinHr"][0],
"peak range": list(range(peak_take_cz["SummerPeakStartHr"][0],
peak_take_cz["SummerPeakEndHr"][0] + 1)),
"take range": list(range(peak_take_cz["SummerTakeStartHr1"][0],
peak_take_cz["SummerTakeEndHr1"][0] + 1))}
# Set winter max load hour, min load hour, and peak/take windows
wint_peak_take = {
"max": peak_take_cz["WinterMaxHr"][0],
"min": peak_take_cz["WinterMinHr"][0],
"peak range": list(range(peak_take_cz["WinterPeakStartHr"][0],
peak_take_cz["WinterPeakEndHr"][0] + 1)),
"take range": list(range(peak_take_cz["WinterTakeStartHr1"][0],
peak_take_cz["WinterTakeEndHr1"][0] + 1))}
# Set intermediate max load hour, min load hour, and peak/take windows
inter_peak_take = {
"max": peak_take_cz["InterMaxHr"][0],
"min": peak_take_cz["InterMinHr"][0],
"peak range": list(range(peak_take_cz["InterPeakStartHr"][0],
peak_take_cz["InterPeakEndHr"][0] + 1)),
"take range": list(range(peak_take_cz["InterTakeStartHr1"][0],
peak_take_cz["InterTakeEndHr1"][0] + 1))}
# Handle cases where seasonal low demand periods cover two or three
# non-contiguous time segments (e.g., 2-6AM, 10AM-2PM)
# Loop through seasonal take variable names
for seas in ["SummerTake", "WinterTake", "InterTake"]:
# Loop through segment number in the variable name
for seg in ["2", "3"]:
# Sandwich start/end hour information between season and
# segment information in the variable name
st_key = seas + "StartHr" + seg
end_key = seas + "EndHr" + seg
# Check to see whether data are present for the given season
# and segment (use segment starting hour variable as indicator)
if numpy.isfinite(peak_take_cz[st_key][0]):
# Append additional low demand periods as appropriate for
# the given season
if "Summer" in seas:
sum_peak_take["take range"].extend(list(
range(peak_take_cz[st_key][0],
peak_take_cz[end_key][0])))
elif "Winter" in seas:
wint_peak_take["take range"].extend(list(
range(peak_take_cz[st_key][0],
peak_take_cz[end_key][0])))
else:
inter_peak_take["take range"].extend(list(
range(peak_take_cz[st_key][0],
peak_take_cz[end_key][0])))
return sum_peak_take, wint_peak_take, inter_peak_take
def append_keyvals(self, dict1, keyval_list):
"""Append all terminal key values in a dict to a list.
Note:
Values already in the list should not be appended.
Args:
dict1 (dict): Dictionary with terminal key values
to append.
Returns:
List including all terminal key values from dict.
Raises:
ValueError: If terminal key values are not formatted as
either lists or strings.
"""
for (k, i) in dict1.items():
if isinstance(i, dict):
self.append_keyvals(i, keyval_list)
elif isinstance(i, list):
keyval_list.extend([
x for x in i if x not in keyval_list])
elif isinstance(i, str) and i not in keyval_list:
keyval_list.append(i)
else:
raise ValueError(
"Input dict terminal key values expected to be "
"lists or strings in the 'append_keyvals' function"
"for ECM '" + self.name + "'")
return keyval_list
class EPlusMapDicts(object):
"""Class of dicts used to map Scout measure definitions to EnergyPlus.
Attributes:
czone (dict): Scout-EnergyPlus climate zone mapping.
bldgtype (dict): Scout-EnergyPlus building type mapping. Shown are
the EnergyPlus commercial reference building names that correspond
to each AEO commercial building type, and the weights needed in
some cases to map multiple EnergyPlus reference building types to
a single AEO type. See 'convert_data' JSON for more details.
fuel (dict): Scout-EnergyPlus fuel type mapping.
enduse (dict): Scout-EnergyPlus end use mapping.
structure_type (dict): Scout-EnergyPlus structure type mapping.
"""
def __init__(self):
self.czone = {
"sub arctic": "BA-SubArctic",
"very cold": "BA-VeryCold",
"cold": "BA-Cold",
"marine": "BA-Marine",
"mixed humid": "BA-MixedHumid",
"mixed dry": "BA-MixedDry",
"hot dry": "BA-HotDry",
"hot humid": "BA-HotHumid"}
self.bldgtype = {
"assembly": {
"Hospital": 1},
"education": {
"PrimarySchool": 0.26,
"SecondarySchool": 0.74},
"food sales": {
"Supermarket": 1},
"food service": {
"QuickServiceRestaurant": 0.31,
"FullServiceRestaurant": 0.69},
"health care": None,
"lodging": {
"SmallHotel": 0.26,
"LargeHotel": 0.74},
"large office": {
"LargeOfficeDetailed": 0.9,
"MediumOfficeDetailed": 0.1},
"small office": {
"SmallOffice": 0.12,
"OutpatientHealthcare": 0.88},
"mercantile/service": {
"RetailStandalone": 0.53,
"RetailStripmall": 0.47},
"warehouse": {
"Warehouse": 1},
"other": None}
self.fuel = {
'electricity': 'electricity',
'natural gas': 'gas',
'distillate': 'other_fuel'}
self.enduse = {
'heating': [
'heating_electricity', 'heat_recovery_electricity',
'humidification_electricity', 'pump_electricity',
'heating_gas', 'heating_other_fuel'],
'cooling': [
'cooling_electricity', 'pump_electricity',
'heat_rejection_electricity'],
'water heating': [
'service_water_heating_electricity',
'service_water_heating_gas',
'service_water_heating_other_fuel'],
'ventilation': ['fan_electricity'],
'cooking': [
'interior_equipment_gas', 'interior_equipment_other_fuel'],
'lighting': ['interior_lighting_electricity'],
'refrigeration': ['refrigeration_electricity'],
'PCs': ['interior_equipment_electricity'],
'non-PC office equipment': ['interior_equipment_electricity'],
'MELs': ['interior_equipment_electricity']}
# Note: assumed year range for each structure vintage shown in lists
self.structure_type = {
"new": '90.1-2013',
"retrofit": {
'90.1-2004': [2004, 2009],
'90.1-2010': [2010, 2012],
'DOE Ref 1980-2004': [1980, 2003],
'DOE Ref Pre-1980': [0, 1979]}}
class EPlusGlobals(object):
"""Class of global variables used in parsing EnergyPlus results file.
Attributes:
cbecs_sh (xlrd sheet object): CBECs square footages Excel sheet.
vintage_sf (dict): Summary of CBECs square footages by vintage.
eplus_coltypes (list): Expected EnergyPlus variable data types.
eplus_basecols (list): Variable columns that should never be removed.
eplus_perf_files (list): EnergyPlus simulation output file names.
eplus_vintages (list): EnergyPlus building vintage types.
eplus_vintage_weights (dicts): Square-footage-based weighting factors
for EnergyPlus vintages.
"""
def __init__(self, eplus_dir, cbecs_sf_byvint):
# Set building vintage square footage data from CBECS
self.vintage_sf = cbecs_sf_byvint
self.eplus_coltypes = [
('building_type', '<U50'), ('climate_zone', '<U50'),
('template', '<U50'), ('measure', '<U50'), ('status', '<U50'),
('ep_version', '<U50'), ('os_version', '<U50'),
('timestamp', '<U50'), ('cooling_electricity', '<f8'),
('cooling_water', '<f8'), ('district_chilled_water', '<f8'),
('district_hot_water_heating', '<f8'),
('district_hot_water_service_hot_water', '<f8'),
('exterior_equipment_electricity', '<f8'),
('exterior_equipment_gas', '<f8'),
('exterior_equipment_other_fuel', '<f8'),
('exterior_equipment_water', '<f8'),
('exterior_lighting_electricity', '<f8'),
('fan_electricity', '<f8'),
('floor_area', '<f8'), ('generated_electricity', '<f8'),
('heat_recovery_electricity', '<f8'),
('heat_rejection_electricity', '<f8'),
('heating_electricity', '<f8'), ('heating_gas', '<f8'),
('heating_other_fuel', '<f8'), ('heating_water', '<f8'),
('humidification_electricity', '<f8'),
('humidification_water', '<f8'),
('interior_equipment_electricity', '<f8'),
('interior_equipment_gas', '<f8'),
('interior_equipment_other_fuel', '<f8'),
('interior_equipment_water', '<f8'),
('interior_lighting_electricity', '<f8'),
('net_site_electricity', '<f8'), ('net_water', '<f8'),
('pump_electricity', '<f8'),
('refrigeration_electricity', '<f8'),
('service_water', '<f8'),
('service_water_heating_electricity', '<f8'),
('service_water_heating_gas', '<f8'),
('service_water_heating_other_fuel', '<f8'), ('total_gas', '<f8'),
('total_other_fuel', '<f8'), ('total_site_electricity', '<f8'),
('total_water', '<f8')]
self.eplus_basecols = [
'building_type', 'climate_zone', 'template', 'measure']
# Set EnergyPlus data file name list, given local directory
self.eplus_perf_files = [
f for f in listdir(eplus_dir) if
isfile(join(eplus_dir, f)) and '_scout_' in f]
# Import the first of the EnergyPlus measure performance files and use
# it to establish EnergyPlus vintage categories
eplus_file = numpy.genfromtxt(
(eplus_dir + '/' + self.eplus_perf_files[0]), names=True,
dtype=self.eplus_coltypes, delimiter=",", missing_values='')
self.eplus_vintages = numpy.unique(eplus_file['template'])
# Determine appropriate weights for mapping EnergyPlus vintages to the
# 'new' and 'retrofit' building structure types of Scout
self.eplus_vintage_weights = self.find_vintage_weights()
def find_vintage_weights(self):
"""Find square-footage-based weighting factors for building vintages.
Note:
Use CBECs building vintage square footage data to derive weighting
factors that will map the EnergyPlus building vintages to the 'new'
and 'retrofit' building structure types of Scout.
Returns:
Weights needed to map each EnergyPlus vintage category to the 'new'
and 'retrofit' structure types defined in Scout.
Raises:
ValueError: If vintage weights do not sum to 1.
KeyError: If unexpected vintage names are discovered in the
EnergyPlus file.
"""
handydicts = EPlusMapDicts()
# Set the expected names of the EnergyPlus building vintages and the
# low and high year limits of each building vintage category
expected_eplus_vintage_yr_bins = [
handydicts.structure_type['new']] + \
list(handydicts.structure_type['retrofit'].keys())
# Initialize a variable meant to translate the summed square footages
# of multiple 'retrofit' building vintages into weights that sum to 1;
# also initialize a variable used to check that these weights indeed
# sum to 1
total_retro_sf, retro_weight_sum = (0 for n in range(2))
# Check for expected EnergyPlus vintage names
if sorted(self.eplus_vintages) == sorted(
expected_eplus_vintage_yr_bins):
# Initialize a dictionary with the EnergyPlus vintages as keys and
# associated square footage values starting at zero
eplus_vintage_weights = dict.fromkeys(self.eplus_vintages, 0)
# Loop through the EnergyPlus vintages and assign associated
# weights by mapping to cbecs square footage data
for k in eplus_vintage_weights.keys():
# If looping through the EnergyPlus vintage associated with the
# 'new' Scout structure type, set vintage weight to 1 (only one
# vintage category will be associated with this structure type)
if k == handydicts.structure_type['new']:
eplus_vintage_weights[k] = 1
# Otherwise, set EnergyPlus vintage weight initially to the
# square footage that corresponds to that vintage in cbecs
else:
# Loop through all cbecs vintage bins
for k2 in self.vintage_sf.keys():
# Find the limits of the cbecs vintage bin
cbecs_match = re.search(
r'(\D*)(\d*)(\s*)(\D*)(\s*)(\d*)', k2)
cbecs_t1 = cbecs_match.group(2)
cbecs_t2 = cbecs_match.group(6)
# Handle a 'Before Year X' case in cbecs (e.g., 'Before
# 1920'), setting the lower year limit to zero
if cbecs_t2 == '':
cbecs_t2 = 0
# Determine a single average year that represents the
# current cbecs vintage bin
cbecs_yr = (int(cbecs_t1) + int(cbecs_t2)) / 2
# If the cbecs bin year falls within the year limits of
# the current EnergyPlus vintage bin, add the
# associated cbecs ft^2 data to the EnergyPlus
# vintage weight value
if cbecs_yr >= handydicts.structure_type[
'retrofit'][k][0] and \
cbecs_yr < handydicts.structure_type[
'retrofit'][k][1]:
eplus_vintage_weights[k] += self.vintage_sf[k2]
total_retro_sf += self.vintage_sf[k2]
# Run through all EnergyPlus vintage weights, normalizing the
# square footage-based weights for each 'retrofit' vintage to the
# total square footage across all 'retrofit' vintage categories
for k in eplus_vintage_weights.keys():
# If running through the 'new' EnergyPlus vintage bin, register
# the value of its weight (should be 1)
if k == handydicts.structure_type['new']:
new_weight_sum = eplus_vintage_weights[k]
# If running through a 'retrofit' EnergyPlus vintage bin,
# normalize the square footage for that vintage by total
# square footages across 'retrofit' vintages to arrive at the
# final weight for that EnergyPlus vintage
else:
eplus_vintage_weights[k] /= total_retro_sf
retro_weight_sum += eplus_vintage_weights[k]
# Check that the 'new' EnergyPlus vintage weight equals 1 and that
# all 'retrofit' EnergyPlus vintage weights sum to 1
if new_weight_sum != 1:
raise ValueError("Incorrect new vintage weight total when "
"instantiating 'EPlusGlobals' object")
elif retro_weight_sum != 1:
raise ValueError("Incorrect retrofit vintage weight total when"
"instantiating 'EPlusGlobals' object")
else:
raise KeyError(
"Unexpected EnergyPlus vintage(s) when instantiating "
"'EPlusGlobals' object; "
"check EnergyPlus vintage assumptions in structure_type "
"attribute of 'EPlusMapDict' object")
return eplus_vintage_weights
class Measure(object):
"""Set up a class representing efficiency measures as objects.
Attributes:
**kwargs: Arbitrary keyword arguments used to fill measure attributes
from an input dictionary.
remove (boolean): Determines whether measure should be removed from
analysis engine due to insufficient market source data.
energy_outputs (dict): Records several user command line input
selections that affect measure energy outputs.
eff_fs_splt (dict): Data needed to determine the fuel splits of
efficient case results for fuel switching measures.
handyvars (object): Global variables useful across class methods.
retro_rate (float or list): Stock retrofit rate specific to the ECM.
technology_type (string): Flag for supply- or demand-side technology.
yrs_on_mkt (list): List of years that the measure is active on market.
markets (dict): Data grouped by adoption scheme on:
a) 'master_mseg': a measure's master market microsegments (stock,
energy, carbon, cost),
b) 'mseg_adjust': all microsegments that contribute to each master
microsegment (required later for measure competition).
c) 'mseg_out_break': master microsegment breakdowns by key
variables (climate zone, building class, end use)
sector_shapes (dict): Sector-level hourly baseline and efficient load
shapes by adopt scheme, EMM region, and year
"""
def __init__(
self, base_dir, handyvars, handyfiles, site_energy,
capt_energy, regions, tsv_metrics, health_costs, split_fuel,
floor_start, exog_hp_rates, grid_decarb, adopt_scn_usr,
**kwargs):
# Read Measure object attributes from measures input JSON.
for key, value in kwargs.items():
setattr(self, key, value)
# Check to ensure that measure name is proper length for plotting;
# for now, exempt measures with public health cost adders
if len(self.name) > 45 and "PHC" not in self.name:
raise ValueError(
"ECM '" + self.name + "' name must be <= 45 characters")
self.remove = False
# Flag custom energy output settings (user-defined)
self.energy_outputs = {
"site_energy": False, "grid_decarb": False,
"captured_energy_ss": False, "alt_regions": False,
"tsv_metrics": False, "health_costs": False,
"split_fuel": False, "floor_start": False, "exog_hp_rates": False,
"adopt_scn_restrict": False}
if site_energy is True:
self.energy_outputs["site_energy"] = True
if capt_energy is True:
self.energy_outputs["captured_energy_ss"] = True
if regions != "AIA":
self.energy_outputs["alt_regions"] = regions
if tsv_metrics is not False:
if (self.fuel_type not in ["electricity", ["electricity"]]) and \
self.fuel_switch_to != "electricity":
raise ValueError(
"Non-electric fuel found for measure '" + self.name +
" alongside '--tsv_metrics' option. Such metrics cannot "
"be calculated for non-electric baseline segments of "
"energy use. To address this issue, restrict the "
"measure's fuel type to electricity.")
self.energy_outputs["tsv_metrics"] = tsv_metrics
if health_costs is not None:
# Look for pre-determined health cost scenario names in the
# UsefulVars class, "health_scn_names" attribute
if "PHC-EE (low)" in self.name:
self.energy_outputs["health_costs"] = "Uniform EE-low"
elif "PHC-EE (high)" in self.name:
self.energy_outputs["health_costs"] = "Uniform EE-high"
if split_fuel is True:
self.energy_outputs["split_fuel"] = True
if floor_start is not False:
self.energy_outputs["floor_start"] = floor_start
if exog_hp_rates is not False:
self.energy_outputs["exog_hp_rates"] = exog_hp_rates
if grid_decarb is not False:
self.energy_outputs["grid_decarb"] = grid_decarb
if adopt_scn_usr is not False:
self.energy_outputs["adopt_scn_restrict"] = adopt_scn_usr
self.eff_fs_splt = {a_s: {} for a_s in handyvars.adopt_schemes}
self.sector_shapes = {a_s: {} for a_s in handyvars.adopt_schemes}
# Deep copy handy vars to avoid any dependence of changes to these vars
# across other measures that use them
self.handyvars = copy.deepcopy(handyvars)
# Set the rate of baseline retrofitting for ECM stock-and-flow calcs
try:
# Check first to see whether pulling up retrofit rate errors
self.retro_rate[self.handyvars.aeo_years[0]]
# Accommodate retrofit rate input as a probability distribution
if type(self.retro_rate[self.handyvars.aeo_years[0]]) is list and \
isinstance(
self.retro_rate[self.handyvars.aeo_years[0]][0], str):
# Sample measure retrofit rate values
self.retro_rate = {
yr: self.rand_list_gen(
self.retro_rate[yr], self.handyvars.nsamples) for yr in
self.handyvars.aeo_years}
# Raise error in case where distribution is incorrectly specified
elif type(self.retro_rate[self.handyvars.aeo_years[0]]) is list:
raise ValueError(
"ECM " + self.name + " 'retro_rate' distribution must " +
"be formatted as [<distribution name> (string), " +
"<distribution parameters> (floats)]")
# If retrofit rate is set to None, use default retrofit rate value
elif self.retro_rate is None:
self.retro_rate = self.handyvars.retro_rate
# Do nothing in case where retrofit rate is specified as normal
else:
pass
except AttributeError:
# If no 'retro_rate' attribute was given for the ECM, use default
# retrofit rate value
self.retro_rate = self.handyvars.retro_rate
# Determine whether the measure replaces technologies pertaining to
# the supply or the demand of energy services
self.technology_type = None
# Measures replacing technologies in a pre-specified
# 'demand_tech' list are of the 'demand' side technology type
if (isinstance(self.technology, list) and all([
x in self.handyvars.demand_tech for x in self.technology])) or \
self.technology in self.handyvars.demand_tech:
self.technology_type = "demand"
# Measures replacing technologies not in a pre-specified
# 'demand_tech' list are of the 'supply' side technology type
else:
self.technology_type = "supply"
# Reset market entry year if None or earlier than min. year
if self.market_entry_year is None or (int(
self.market_entry_year) < int(self.handyvars.aeo_years[0])):
self.market_entry_year = int(self.handyvars.aeo_years[0])
# If a global delay to market entry of the measure set has been
# imposed by the user and the measure's market entry year is earlier
# than the delayed start year, set to delayed start year
if floor_start is not None and (
self.market_entry_year < floor_start):
self.market_entry_year = floor_start
# Reset measure market exit year if None or later than max. year
if self.market_exit_year is None or (int(
self.market_exit_year) > (int(
self.handyvars.aeo_years[-1]) + 1)):
self.market_exit_year = int(self.handyvars.aeo_years[-1]) + 1
self.yrs_on_mkt = [str(i) for i in range(
self.market_entry_year, self.market_exit_year)]
# Test for whether a user has set time sensitive valuation features
# for the given measure. If no "tsv_features" parameter was
# specified for the ECM, set this parameter to None
try:
# Try to access the ECM's TSV feature dict keys
self.tsv_features.keys()
# If TSV features are present, ensure that EMM regions are selected
# and that the measure only applies to electricity (and no fuel
# switching is selected)
if regions != "EMM":
raise ValueError(
"Measure '" + self.name + "' has time sensitive "
"assessment features (see 'tsv_features' attribute) but "
"regions are not set to EMM; try running 'ecm_prep.py' "
"again with the --alt_regions option included and select "
"EMM regions when prompted.")
if (self.fuel_type not in ["electricity", ["electricity"]]) and \
self.fuel_switch_to != "electricity":
raise ValueError(
"Non-electric fuel found for measure '" + self.name +
" alongside time sensitive valuation features. Such "
"features cannot be implemented for non-electric "
"baseline segments of energy use. To address this issue, "
"restrict the measure's fuel type to electricity.")
# If the ECM is assigned a custom savings shape, load the
# associated custom savings shape data from a CSV file
if "shape" in self.tsv_features.keys() and \
"custom_annual_savings" in \
self.tsv_features["shape"].keys():
# Determine the CSV file name
csv_shape_file_name = \
self.tsv_features["shape"]["custom_annual_savings"]
# Assuming the standard location for ECM savings shape CSV
# files, import custom savings shape data as numpy array and
# store it in the ECM's custom savings shape attribute for
# subsequent use in the 'apply_tsv' function
self.tsv_features["shape"]["custom_annual_savings"] = \
numpy.genfromtxt(
path.join(base_dir, *handyfiles.tsv_shape_data,
csv_shape_file_name),
names=True, delimiter=',', dtype=[
('Hour_of_Year', '<i4'),
('Climate_Zone', '<U25'),
('Net_Load_Version', '<i4'),
('Building_Type', '<U25'),
('End_Use', '<U25'),
('Baseline_Load', '<f8'),
('Measure_Load', '<f8'),
('Relative_Savings', '<f8')],
encoding="latin1")
# Retrieve custom savings shapes for all applicable
# end use, building type, and climate zone combinations
# and store within a dict for use in 'apply_tsv' function
print("Retrieving custom savings shape data for measure "
+ self.name + "...", end="", flush=True)
# Set shorthand for custom savings shape data
css_dat = self.tsv_features["shape"][
"custom_annual_savings"]
# Initialize dict to use in storing shape data
css_dict = {}
# Find all unique end uses in the shape data
euses = numpy.unique(css_dat["End_Use"])
# Loop through all end uses in the data
for eu in euses:
# Handle case where end use names in the data are
# read in with added quotes (e.g., 'heating' comes in
# as '"heating"'), or are not strings. In the first
# instance, use eval() to strip the added quotes from the
# end use name and key in the savings shape information
# by the result
try:
eu_key = eval(eu)
except (NameError, SyntaxError):
eu_key = eu
if type(eu_key) != str:
eu_key = str(eu_key)
# Restrict shape data to that of the current end use
css_dat_eu = css_dat[
numpy.in1d(css_dat["End_Use"], eu)]
# Initialize dict under the current end use key
css_dict[eu_key] = {}
# Find all unique building types and climate zones in
# the end-use-restricted shape data
bldg_types = numpy.unique(
css_dat_eu["Building_Type"])
czones = numpy.unique(
css_dat_eu["Climate_Zone"])
# Loop through all building types under the current
# end use
for bd in bldg_types:
# Handle case where building type names in the data
# are read in with added quotes, or are not strings
try:
bd_key = eval(bd)
except (NameError, SyntaxError):
bd_key = bd
if type(bd_key) != str:
bd_key = str(bd_key)
# Account for possible use of StandAlone naming in
# savings shape CSV, vs. Standalone naming in Scout's
# baseline load shapes file
if bd_key == "RetailStandAlone":
bd_key = "RetailStandalone"
# Account for possible use of MediumOffice naming
# in savings shape CSV, vs. MediumOfficeDetailed in
# Scout's baseline load shapes file
elif bd_key == "MediumOffice":
bd_key = "MediumOfficeDetailed"
# Account for possible use of MediumOffice naming
# in savings shape CSV, vs. MediumOfficeDetailed in
# Scout's baseline load shapes file
elif bd_key == "LargeOffice":
bd_key = "LargeOfficeDetailed"
# Initialize dict under the current end use and
# building type keys
css_dict[eu_key][bd_key] = {}
# Loop through all climate zones under the current
# end use
for cz in czones:
# Handle case where climate zone names in the
# data are read in with added quotes, or are not
# strings
try:
cz_key = eval(cz)
except (NameError, SyntaxError):
cz_key = cz
if type(cz_key) != str:
cz_key = str(cz_key)
# Account for possible use of climate 7A naming
# in savings shape CSV, vs. 7 naming in Scout's
# baseline load shapes file
if cz_key == "7A":
cz_key = "7"
# Restrict shape data to that of the current
# end use, building type, and climate zone
# combination
css_dat_eu_bldg_cz = css_dat_eu[
numpy.in1d(css_dat_eu["Building_Type"], bd) &
numpy.in1d(css_dat_eu["Climate_Zone"], cz)]
# Initialize dict under the current end use and
# building type keys
css_dict[eu_key][bd_key][cz_key] = {}
# Find all unique representative system load
# shapes for the current climate zone
sys_v = numpy.unique(
css_dat_eu_bldg_cz["Net_Load_Version"])
# If "Net_Load_Version" column is blank, set unique
# net load versions to 1
if len(sys_v) == 0 or (
len(sys_v) == 1 and sys_v[0] == -1):
sys_v = [1]
for sv in sys_v:
v_key = "set " + str(sv)
css_dict[eu_key][bd_key][cz_key][v_key] = \
css_dat_eu_bldg_cz[numpy.in1d(
css_dat_eu_bldg_cz[
"Net_Load_Version"], sv)][
"Relative_Savings"]
# Check to ensure that the resultant dict
# value is the expected 8760 elements long; if
# not, throw error
if len(css_dict[eu_key][bd_key][cz_key][
v_key]) != 8760:
raise ValueError(
"Measure '" + self.name +
"', requires "
"custom savings shape data, but the "
"custom shape given for climate "
"zone " + cz_key +
", building type "
+ bd_key + ", and end use " + eu_key +
" has more or less than 8760 values. "
"Check that 8760 hourly savings " +
"fractions are available for all " +
"baseline market segments the " +
"measure applies to in "
"./ecm_definitions/energy_plus_data"
"/savings_shapes.")
# Set custom savings shape information to populated dict
self.tsv_features["shape"]["custom_annual_savings"] = \
css_dict
print("Data import complete")
except AttributeError:
self.tsv_features = None
# Check to ensure that the proper EMM regions are defined in the
# measure 'climate_zone' attribute if time sensitive ECM features
# and/or output metrics are desired
valid_tsv_regions = [
'TRE', 'FRCC', 'MISW', 'MISC', 'MISE', 'MISS',
'ISNE', 'NYCW', 'NYUP', 'PJME', 'PJMW', 'PJMC',
'PJMD', 'SRCA', 'SRSE', 'SRCE', 'SPPS', 'SPPC',
'SPPN', 'SRSG', 'CANO', 'CASO', 'NWPP', 'RMRG', 'BASN']
if ((self.tsv_features is not None or tsv_metrics is not False) and ((
type(self.climate_zone) == list and any([
x not in valid_tsv_regions for x in self.climate_zone])) or
(type(self.climate_zone) != list and self.climate_zone != "all"
and (self.climate_zone not in valid_tsv_regions)))):
raise ValueError(
"Invalid 'climate_zone' attribute value(s) for ECM '" +
self.name + "' given desired time sensitive valuation "
"operations/outputs. Currently, only EMM regions are "
"supported for time sensitive valuation. This issue can "
"be addressed by ensuring all ECM 'climate_zone' values "
"reflect one of the EMM regions.")
self.markets = {}
for adopt_scheme in handyvars.adopt_schemes:
self.markets[adopt_scheme] = OrderedDict([(
"master_mseg", OrderedDict([(
"stock", {
"total": {
"all": None, "measure": None},
"competed": {
"all": None, "measure": None}}),
(
"energy", {
"total": {
"baseline": None, "efficient": None},
"competed": {
"baseline": None, "efficient": None}}),
(
"carbon", {
"total": {
"baseline": None, "efficient": None},
"competed": {
"baseline": None, "efficient": None}}),
(
"cost", {
"stock": {
"total": {
"baseline": None, "efficient": None},
"competed": {
"baseline": None, "efficient": None}},
"energy": {
"total": {
"baseline": None, "efficient": None},
"competed": {
"baseline": None, "efficient": None}},
"carbon": {
"total": {
"baseline": None, "efficient": None},
"competed": {
"baseline": None, "efficient": None}}}),
(
"lifetime", {"baseline": None, "measure": None})])),
(
"mseg_adjust", {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"sub-market": {
"original energy (total)": {},
"adjusted energy (sub-market)": {}},
"stock-and-flow": {
"original energy (total)": {},
"adjusted energy (previously captured)": {},
"adjusted energy (competed)": {},
"adjusted energy (competed and captured)": {}},
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}}),
(
"mseg_out_break", {key: {
"baseline": copy.deepcopy(self.handyvars.out_break_in),
"efficient": copy.deepcopy(self.handyvars.out_break_in),
"savings": copy.deepcopy(self.handyvars.out_break_in)} for
key in ["energy", "carbon", "cost"]})])
def fill_eplus(self, msegs, eplus_dir, eplus_coltypes,
eplus_files, vintage_weights, base_cols):
"""Fill in measure performance with EnergyPlus simulation results.
Note:
Find the appropriate set of EnergyPlus simulation results for
the current measure, and use the relative savings percentages
in these results to determine the measure performance attribute.
Args:
msegs (dict): Baseline microsegment stock/energy data to use in
validating categorization of measure performance information.
eplus_dir (string): Directory of EnergyPlus performance files.
eplus_coltypes (list): Expected EnergyPlus variable data types.
eplus_files (list): EnergyPlus performance file names.
vintage_weights (dict): Square-footage-derived weighting factors
for each EnergyPlus building vintage type.
Returns:
Updated Measure energy_efficiency, energy_efficiency_source, and
energy_efficiency_source attribute values.
Raises:
ValueError: If EnergyPlus file is not matched to Measure
definition or more than one EnergyPlus file matches the
Measure definition.
"""
# Instantiate useful EnergyPlus-Scout mapping dicts
handydicts = EPlusMapDicts()
# Determine the relevant EnergyPlus building type name(s)
bldg_type_names = []
for x in self.bldg_type:
bldg_type_names.extend(handydicts.bldgtype[x].keys())
# Find all EnergyPlus files including the relevant building type
# name(s)
eplus_perf_in = [(eplus_dir + '/' + x) for x in eplus_files if any([
y.lower() in x for y in bldg_type_names])]
# Import EnergyPlus input file as array and use it to fill a dict
# of measure performance data
if len(eplus_perf_in) > 0:
# Assemble the EnergyPlus data into a record array
eplus_perf_array = self.build_array(eplus_coltypes, eplus_perf_in)
# Create a measure performance dictionary, zeroed out, to
# be updated with data from EnergyPlus array
perf_dict_empty = self.create_perf_dict(msegs)
# Update measure performance based on EnergyPlus data
# (* Note: only update efficiency information for
# secondary microsegments if applicable)
if perf_dict_empty['secondary'] is not None:
self.energy_efficiency = self.fill_perf_dict(
perf_dict_empty, eplus_perf_array,
vintage_weights, base_cols, eplus_bldg_types={})
else:
self.energy_efficiency = self.fill_perf_dict(
perf_dict_empty['primary'], eplus_perf_array,
vintage_weights, base_cols, eplus_bldg_types={})
# Set the energy efficiency data source for the measure to
# EnergyPlus and set to highest data quality rating
self.energy_efficiency_source = 'EnergyPlus/OpenStudio'
else:
raise ValueError(
"Failure to find relevant EPlus files for " +
"Scout building type(s) " + str(self.bldg_type) +
"in ECM '" + self.name + "'")
def fill_mkts(self, msegs, msegs_cpl, convert_data, tsv_data_init, opts,
contrib_meas_pkg, tsv_data_nonfs):
"""Fill in a measure's market microsegments using EIA baseline data.
Args:
msegs (dict): Baseline microsegment stock and energy use.
msegs_cpl (dict): Baseline technology cost, performance, and
lifetime.
convert_data (dict): Measure -> baseline cost unit conversions.
tsv_data_init (dict): Data for time sensitive valuation of
efficiency.
opts (object): Stores user-specified execution options.
contrib_meas_pkg (list): List of measure names that contribute
to active packages in the preparation run.
tsv_data_nonfs (dict): If applicable, base-case TSV data to apply
to non-fuel switching measures under a high decarb. scenario.
Returns:
Updated measure stock, energy/carbon, and cost market microsegment
information, as stored in the 'markets' attribute.
Raises:
KeyError: If measure and baseline performance or cost units are
inconsistent, or no valid baseline market microsegments can
be found for the given measure definition.
ValueError: If an input value from the measure definition is
invalid, or if baseline market microsegment information cannot
be mapped to a valid breakout category for measure outputs.
"""
# Check that the measure's applicable baseline market input definitions
# are valid before attempting to retrieve data on this baseline market
self.check_mkt_inputs()
# Notify user that ECM is being updated; suppress new line
# if not in verbose mode ('Success' is appended to this message on
# the same line of the console upon completion of ECM update)
if opts is not None and opts.verbose is True:
print("Updating ECM '" + self.name + "'...")
else:
print("Updating ECM '" + self.name + "'...", end="", flush=True)
# If multiple runs are required to handle probability distributions on
# measure inputs, set a number to seed each random draw of cost,
# performance, and or lifetime with for consistency across all
# microsegments that contribute to the measure's master microsegment
if self.handyvars.nsamples is not None:
rnd_sd = numpy.random.randint(10000)
# Initialize a counter of key chains that yield "stock" and "energy"
# keys in the baseline data dict; that have valid stock/energy data;
# that have valid cost/performance/lifetime data; and that have valid
# consumer choice data. Also initialize a cost conversion flag
valid_keys, valid_keys_stk_energy, valid_keys_cpl, \
valid_keys_consume, cost_converts = (0 for n in range(5))
# Initialize lists of technology names that have yielded warnings
# for invalid stock/energy data, cost/performance/lifetime data
# and consumer data, EIA baseline cost adjustments (in the case
# of heat pump HVAC) and a list of the climate zones, building types,
# and structure type of removed primary microsegments (used to remove
# associated secondary microsegments)
stk_energy_warn, cpl_warn, consume_warn, hp_warn, removed_primary = (
[] for n in range(5))
# Initialize flags for invalid information about sub-market fraction
# source, URL, and derivation
sbmkt_source_invalid, sbmkt_url_invalid, sbmkt_derive_invalid = (
0 for n in range(3))
# Initialize variable indicating use of ft^2 floor area as microsegment
# stock
sqft_subst = 0
# Establish a flag for a commercial lighting case where the user has
# not specified secondary end use effects on heating and cooling. In
# this case, secondary effects are added automatically by adjusting
# the "lighting gain" thermal load component in accordance with the
# lighting efficiency change (e.g., a 40% relative savings from
# efficient lighting equipment translates to a 40% increase in heating
# loads and 40% decrease in cooling load)
light_scnd_autoperf = False
# Initialize a list that tracks completed cost conversions - including
# converted values and units - for cases where the cost conversion need
# occur only once per microsegment building type
bldgs_costconverted = {}
# Fill out any "secondary" end use impact information and any climate
# zone, building type, fuel type, end use, and/or technology attributes
# marked 'all' by users
self.fill_attr()
# Fill in sector baseline/efficient 8760 shapes attribute across all
# applicable regions for the measure with a list of 8760 zeros (if
# necessary)
if opts.sect_shapes is True:
# Find applicable region list (ensure it is in list format)s
if type(self.climate_zone) is str:
grid_regions = copy.deepcopy([self.climate_zone])
else:
grid_regions = copy.deepcopy(self.climate_zone)
for a_s in self.handyvars.adopt_schemes:
self.sector_shapes[a_s] = {reg: {yr: {
"baseline": [0 for x in range(8760)],
"efficient": [0 for x in range(8760)]} for yr in
self.handyvars.aeo_years_summary} for reg in grid_regions}
# Find all possible microsegment key chains. First, determine all
# "primary" microsegment key chains, where "primary" refers to the
# baseline microsegment(s) directly affected by a measure (e.g.,
# incandescent bulb lights for an LED replacement measure). Second,
# if needed, determine all "secondary" microsegment key chains, where
# "secondary" refers to baseline microsegments that are indirectly
# affected by the measure (e.g., heating and cooling for the above
# LED replacement). Secondary microsegments are only relevant for
# energy/carbon and associated energy/carbon cost calculations, as
# they do not indicate additional equipment purchases (and thus do not
# affect stock, stock costs, or equipment lifetime calculations)
# Determine "primary" microsegment key chains
ms_iterable, ms_lists = self.create_keychain("primary")
# Insert a flag for linking heating and cooling microsegments, if
# applicable; when linked, heat pump conversion rates for heating
# microsegments will also be applied to the cooling microsegments
# affected by the measure in a given building type/region
if self.handyvars.hp_rates and all([
x in ms_lists[3] for x in ["heating", "cooling"]]):
link_htcl_fs_rates = True
else:
link_htcl_fs_rates = ""
# If needed, fill out any secondary microsegment fuel type, end use,
# and/or technology input attributes marked 'all' by users. Determine
# secondary microsegment key chains and add to the primary
# microsegment key chain list. In a commercial lighting measure case
# where no heating/cooling effects from lighting are directly
# specified, use the "lighting gain" thermal load component
# microsegments to represent secondary effects of the lighting measure
if self.end_use["secondary"] is not None:
ms_iterable_second, ms_lists_second = self.create_keychain(
"secondary")
ms_iterable.extend(ms_iterable_second)
elif "lighting" in self.end_use["primary"] and (
not opts or opts.no_scnd_lgt is not True) and any([
x not in self.end_use["primary"] for x in [
"heating", "cooling"]]) and \
any([x not in ["single family home", "multi family home",
"mobile home"] for x in self.bldg_type]):
# Set secondary lighting mseg performance flag to True
light_scnd_autoperf = True
# Set secondary energy efficiency value to "Missing"
# (used below as a flag)
self.energy_efficiency["secondary"] = \
"Missing (secondary lighting)"
# Set secondary energy efficiency units to "relative
# savings"
self.energy_efficiency_units["secondary"] = \
"relative savings (constant)"
# Set secondary fuel type to include all heating/cooling
# fuels
self.fuel_type["secondary"] = [
"electricity", "natural gas", "distillate"]
# Set relevant secondary end uses
self.end_use["secondary"] = ["heating", "cooling"]
# Set secondary technology type ("demand" as the lighting
# measure affects heating/cooling loads)
self.technology_type["secondary"] = "demand"
# Set secondary technology class to "lighting gain", which
# will access the portion of commercial heating/cooling
# demand that is attributable to waste heat from lights
self.technology["secondary"] = "lighting gain"
# Determine secondary microsegment key chains and add to
# the primary microsegment key chain list
ms_iterable_second, ms_lists_second = self.create_keychain(
"secondary")
ms_iterable.extend(ms_iterable_second)
# Loop through discovered key chains to find needed performance/cost
# and stock/energy information for measure
for ind, mskeys in enumerate(ms_iterable):
# Set building sector for the current microsegment
if mskeys[2] in [
"single family home", "mobile home", "multi family home"]:
bldg_sect = "residential"
else:
bldg_sect = "commercial"
# Adjust the key chain to be used in registering contributing
# microsegment information for cases where 'windows solar'
# or 'windows conduction' are in the key chain. Change
# such entries to just 'windows' to ensure the competition
# of 'windows conduction' and 'windows solar' contributing
# microsegments in the 'adjust_savings' function below
contrib_mseg_key = mskeys
if any([x is not None and "windows" in x for x in
contrib_mseg_key]):
contrib_mseg_key = list(contrib_mseg_key)
contrib_mseg_key[numpy.where([x is not None and "windows" in x
for x in contrib_mseg_key])[0][0]] = "windows"
contrib_mseg_key = tuple(contrib_mseg_key)
# Initialize measure performance/cost/lifetime, associated units,
# and sub-market scaling fractions/sources if: a) For loop through
# all measure mseg key chains is in first iteration, b) A switch
# has been made from updating "primary" microsegment info. to
# updating "secondary" microsegment info. (relevant to cost/
# lifetime units only), c) Any of performance/cost/lifetime units
# is a dict which must be parsed further to reach the final value,
# or d) A new cost conversion is required for the current mseg
# (relevant to cost only). * Note: cost/lifetime/sub-market
# information is not updated for "secondary" microsegments, which
# do not pertain to these variables; lifetime units are in years
if ind == 0 or (ms_iterable[ind][0] != ms_iterable[ind - 1][0]) \
or isinstance(self.energy_efficiency, dict):
perf_meas = self.energy_efficiency
if ind == 0 or (ms_iterable[ind][0] != ms_iterable[ind - 1][0]) \
or isinstance(self.energy_efficiency_units, dict):
perf_units = self.energy_efficiency_units
if mskeys[0] == "secondary":
cost_meas, life_meas = (0 for n in range(2))
cost_units = "NA"
# * Note: no unique sub-market scaling fractions for secondary
# microsegments; secondary microsegments are only scaled down
# by the sub-market fraction for their associated primary
# microsegments
mkt_scale_frac, mkt_scale_frac_source = (
None for n in range(2))
else:
# Set ECM cost attribute to value previously calculated for
# current microsegment building type, provided microsegment
# does not require re-initiating cost conversions for each
# new technology type (applicable to residential sector)
if mskeys[2] in bldgs_costconverted.keys() and (
bldg_sect != "residential" or all([
x not in self.cost_units for x in
self.handyvars.cconv_bytech_units_res])) and (
not isinstance(self.installed_cost, dict)):
cost_meas, cost_units = [x for x in bldgs_costconverted[
mskeys[2]]]
# Re-initialize ECM cost attribute for each new building
# type or technology type if required for the given cost units
elif ind == 0 or any([
x in self.cost_units for x in
self.handyvars.cconv_bybldg_units]) or (
bldg_sect == "residential" and any([
y in self.cost_units for y in
self.handyvars.cconv_bytech_units_res])):
cost_meas, cost_units = [
self.installed_cost, self.cost_units]
elif isinstance(self.installed_cost, dict) or \
isinstance(self.cost_units, dict):
cost_meas, cost_units = [
self.installed_cost, self.cost_units]
# Set lifetime attribute to initial value
if ind == 0 or isinstance(
self.product_lifetime, dict):
life_meas = self.product_lifetime
# Set market scaling attributes to initial values
if ind == 0 or isinstance(
self.market_scaling_fractions, dict):
mkt_scale_frac = self.market_scaling_fractions
if ind == 0 or isinstance(
self.market_scaling_fractions_source, dict):
mkt_scale_frac_source = \
self.market_scaling_fractions_source
# Set the appropriate carbon intensity, and energy cost data dicts
# to use for the current microsegment; when assuming a high grid
# decarbonization case, the user can choose to assess emissions
# and cost reductions in non-fuel switching microsegments using
# base-case emissions intensities and energy costs (e.g., before
# additional grid decarbonization)
if opts.grid_decarb is not False and all([x is not None for x in [
self.handyvars.carb_int_nonfs,
self.handyvars.ecosts_nonfs]]) and (
self.fuel_switch_to is None or (
self.fuel_switch_to == "electricity" and
"electricity" in mskeys)):
carb_int_dat = self.handyvars.carb_int_nonfs
cost_dat = self.handyvars.ecosts_nonfs
else:
carb_int_dat = self.handyvars.carb_int
cost_dat = self.handyvars.ecosts
# Set baseline and measure site-source conversion factors,
# accounting for any fuel switching from baseline to measure tech.
if self.fuel_switch_to is None:
# Set site-source conversions to 1 if user flagged
# site energy use outputs, to appropriate input data otherwise
if opts is not None and opts.site_energy is True:
site_source_conv_base, site_source_conv_meas = [{
yr: 1 for yr in self.handyvars.aeo_years}
for n in range(2)]
else:
site_source_conv_base, site_source_conv_meas = (
self.handyvars.ss_conv[mskeys[3]] for
n in range(2))
else:
# Set site-source conversions to 1 if user flagged
# site energy use outputs, to appropriate input data otherwise
if opts is not None and opts.site_energy is True:
site_source_conv_base, site_source_conv_meas = [{
yr: 1 for yr in self.handyvars.aeo_years}
for n in range(2)]
else:
site_source_conv_base = self.handyvars.ss_conv[
mskeys[3]]
site_source_conv_meas = self.handyvars.ss_conv[
self.fuel_switch_to]
# Set fuel type string for selection of baseline and measure
# carbon intensities and fuel prices to handle special
# technology cases
if mskeys[6] == 'furnace (kerosene)':
ftkey = 'distillate'
else:
ftkey = mskeys[3]
# Set baseline and measure carbon intensities, accounting for any
# fuel switching from baseline technology to measure technology
if self.fuel_switch_to is None:
# Case where use has flagged site energy outputs
if opts is not None and opts.site_energy is True:
# Intensities are specified by EMM region or state based on
# site energy and require no further conversion to match
# the user's site energy setting
try:
intensity_carb_base, intensity_carb_meas = [{
yr: carb_int_dat[bldg_sect][ftkey][
mskeys[1]][yr] for
yr in self.handyvars.aeo_years} for n in range(2)]
# Intensities are specified nationally based on source
# energy and require multiplication by site-source factor
# to match the user's site energy setting
except KeyError:
intensity_carb_base, intensity_carb_meas = [{
yr: carb_int_dat[bldg_sect][
ftkey][yr] *
self.handyvars.ss_conv[ftkey][yr] for
yr in self.handyvars.aeo_years} for n in range(2)]
# Case where user has not flagged site energy outputs
else:
# Intensities are specified by EMM region or state based on
# site energy and require division by site-source factor to
# match the user's source energy setting
try:
intensity_carb_base, intensity_carb_meas = [{
yr: carb_int_dat[bldg_sect][ftkey][
mskeys[1]][yr] /
self.handyvars.ss_conv[ftkey][yr] for
yr in self.handyvars.aeo_years} for n in range(2)]
# Intensities are specified nationally based on source
# energy and require no further conversion to match the
# user's source energy setting
except KeyError:
intensity_carb_base, intensity_carb_meas = (
carb_int_dat[bldg_sect][
ftkey] for n in range(2))
else:
# Interpretation of the calculations below is the same as for
# the case above without fuel switching; the only difference
# here is that baseline vs. measure settings use different
# fuels and must therefore be calculated separately
# Case where use has flagged site energy outputs
if opts is not None and opts.site_energy is True:
# Intensities broken out by EMM region or state
try:
# Base fuel intensity broken by region
intensity_carb_base = carb_int_dat[
bldg_sect][ftkey][mskeys[1]]
except KeyError:
# Base fuel intensity not broken by region
intensity_carb_base = {yr: carb_int_dat[
bldg_sect][ftkey][yr] *
self.handyvars.ss_conv[ftkey][yr]
for yr in self.handyvars.aeo_years}
try:
# Measure fuel intensity broken by region
intensity_carb_meas = carb_int_dat[
bldg_sect][self.fuel_switch_to][mskeys[1]]
except KeyError:
# Measure fuel intensity not broken by region
intensity_carb_meas = {yr: carb_int_dat[
bldg_sect][self.fuel_switch_to][yr] *
self.handyvars.ss_conv[self.fuel_switch_to][yr]
for yr in self.handyvars.aeo_years}
# Case where user has not flagged site energy outputs
else:
try:
# Base fuel intensity broken by region
intensity_carb_base = {yr: carb_int_dat[
bldg_sect][ftkey][mskeys[1]][yr] /
self.handyvars.ss_conv[ftkey][yr]
for yr in self.handyvars.aeo_years}
except KeyError:
# Base fuel intensity not broken by region
intensity_carb_base = carb_int_dat[
bldg_sect][ftkey]
try:
# Measure fuel intensity broken by region
intensity_carb_meas = {yr: carb_int_dat[
bldg_sect][self.fuel_switch_to][mskeys[1]][yr] /
self.handyvars.ss_conv[self.fuel_switch_to][yr]
for yr in self.handyvars.aeo_years}
except KeyError:
# Measure fuel intensity not broken by region
intensity_carb_meas = carb_int_dat[
bldg_sect][self.fuel_switch_to]
# Set baseline and measure fuel costs, accounting for any
# fuel switching from baseline technology to measure technology;
# interpretation of the calculations is the same as for the carbon
# intensity calculations above
if self.fuel_switch_to is None:
# Case where use has flagged site energy outputs
if opts is not None and opts.site_energy is True:
# Costs broken out by EMM region or state
try:
cost_energy_base, cost_energy_meas = (
cost_dat[bldg_sect][ftkey][
mskeys[1]] for n in range(2))
# National fuel costs
except KeyError:
cost_energy_base, cost_energy_meas = [{
yr: cost_dat[bldg_sect][
ftkey][yr] * self.handyvars.ss_conv[
ftkey][yr] for yr in
self.handyvars.aeo_years} for n in range(2)]
# Case where user has not flagged site energy outputs
else:
# Costs broken out by EMM region or state
try:
cost_energy_base, cost_energy_meas = [{
yr: cost_dat[
bldg_sect][ftkey][mskeys[1]][yr] /
self.handyvars.ss_conv[ftkey][yr] for
yr in self.handyvars.aeo_years} for
n in range(2)]
# National fuel costs
except KeyError:
cost_energy_base, cost_energy_meas = (
cost_dat[bldg_sect][ftkey] for
n in range(2))
else:
# Case where use has flagged site energy outputs
if opts is not None and opts.site_energy is True:
try:
# Base fuel cost broken out by region
cost_energy_base = cost_dat[bldg_sect][
ftkey][mskeys[1]]
except KeyError:
# Base fuel cost not broken out by region
cost_energy_base = {
yr: cost_dat[bldg_sect][
ftkey][yr] * self.handyvars.ss_conv[
ftkey][yr] for yr in
self.handyvars.aeo_years}
try:
# Measure fuel cost broken out by region
cost_energy_meas = cost_dat[bldg_sect][
self.fuel_switch_to][mskeys[1]]
except KeyError:
# Measure fuel cost not broken out by region
cost_energy_meas = {
yr: cost_dat[bldg_sect][
self.fuel_switch_to][yr] *
self.handyvars.ss_conv[self.fuel_switch_to][yr] for
yr in self.handyvars.aeo_years}
# Case where user has not flagged site energy outputs
else:
try:
# Base fuel cost broken out by region
cost_energy_base = {
yr: cost_dat[bldg_sect][ftkey][
mskeys[1]][yr] / self.handyvars.ss_conv[
ftkey][yr] for yr in
self.handyvars.aeo_years}
except KeyError:
# Base fuel cost not broken out by region
cost_energy_base = cost_dat[bldg_sect][
ftkey]
try:
# Measure fuel cost broken out by region
cost_energy_meas = {
yr: cost_dat[bldg_sect][
self.fuel_switch_to][mskeys[1]][yr] /
self.handyvars.ss_conv[self.fuel_switch_to][yr] for
yr in self.handyvars.aeo_years}
except KeyError:
# Measure fuel cost not broken out by region
cost_energy_meas = cost_dat[bldg_sect][
self.fuel_switch_to]
# For the case where the baseline technology is a wood
# stove, set the energy cost and carbon intensity to zero
if mskeys[6] == 'stove (wood)':
intensity_carb_base = dict.fromkeys(intensity_carb_base, 0)
cost_energy_base = dict.fromkeys(cost_energy_base, 0)
if self.fuel_switch_to is None:
intensity_carb_meas = dict.fromkeys(intensity_carb_meas, 0)
cost_energy_meas = dict.fromkeys(cost_energy_meas, 0)
# For electricity microsegments in measure scenarios that
# require the addition of public health cost data, retrieve
# the appropriate cost data for the given EMM region and add
if opts is not None and opts.health_costs is True and (
"PHC" in self.name and (
"electricity" in mskeys or
self.fuel_switch_to == "electricity")):
# Set row/column key information for the public health
# cost scenario suggested by the measure name
row_key = [x[1] for x in self.handyvars.health_scn_names if
x[0] in self.name][0]
col_key = [x[2] for x in self.handyvars.health_scn_names if
x[0] in self.name][0]
# Public health costs are specified in units of $/MMBtu source;
# add a multiplier to account for the case where the energy
# outputs that these data will be multiplied by are specified
# in site units; otherwise, set this multiplier to 1
if opts is not None and opts.site_energy is True:
phc_site_mult = self.handyvars.ss_conv["electricity"]
else:
phc_site_mult = {yr: 1 for yr in self.handyvars.aeo_years}
# Pull the appropriate public health cost information for
# the current health cost scenario and EMM region; convert
# from units of cents/primary kWh to $/MMBtu source and add
# source-site multiplier, if necessary
phc_dat = {yr: ((self.handyvars.health_scn_data[
numpy.in1d(
self.handyvars.health_scn_data["Category"], row_key) &
numpy.in1d(
self.handyvars.health_scn_data["EMM_Region"],
mskeys[1])][col_key])[0] / 100) * 293.07107 *
phc_site_mult[yr] for yr in self.handyvars.aeo_years}
# Update energy costs with public health data; in fuel switch
# case, do not add to baseline as baseline was non-electric
if self.fuel_switch_to == "electricity":
# Update measure
cost_energy_meas = {yr: (val + phc_dat[yr]) for yr, val in
cost_energy_meas.items()}
else:
# Update baseline
cost_energy_base = {yr: (val + phc_dat[yr]) for yr, val in
cost_energy_base.items()}
# Update measure
cost_energy_meas = {yr: (val + phc_dat[yr]) for yr, val in
cost_energy_meas.items()}
# Initialize cost/performance/lifetime, stock/energy, square
# footage, and new building fraction variables for the baseline
# microsegment associated with the current key chain
base_cpl = msegs_cpl
mseg = msegs
mseg_sqft_stock = msegs
new_constr = {"annual new": {}, "total new": {},
"total": {}, "new fraction": {}}
# Initialize a variable for measure relative performance (broken
# out by year in modeling time horizon)
rel_perf = {}
# In cases where measure and baseline cost/performance/lifetime
# data and/or baseline stock/energy market size data are formatted
# as nested dicts, loop recursively through dict levels until
# appropriate terminal value is reached
for i in range(0, len(mskeys)):
# For use of state regions, cost/performance/lifetime data
# are broken out by census division; map the state of the
# current microsegment to the census division it belongs to,
# to enable retrieval of the cost/performance/lifetime data
if (i == 1) and self.handyvars.region_cpl_mapping:
mskeys_cpl_map = [
x[0] for x in
self.handyvars.region_cpl_mapping.items() if
mskeys[1] in x[1]][0]
# Mapping should yield single string for census division
if not isinstance(mskeys_cpl_map, str):
raise ValueError("State " + mskeys[1] +
" could not be mapped to a census "
"division for the purpose of "
"retrieving baseline cost, "
"performance, and lifetime data")
else:
mskeys_cpl_map = ''
# Check whether baseline microsegment cost/performance/lifetime
# data are in dict format and current key is in dict keys; if
# so, proceed further with the recursive loop. * Note: dict key
# hierarchies and syntax are assumed to be consistent across
# all measure and baseline cost/performance/lifetime and
# stock/energy market data, with the exception of state data,
# where cost/performance/lifetime data are broken out by
# census divisions and must be mapped to the state breakouts
# used in the stock_energy market data
if (isinstance(base_cpl, dict) and (
(mskeys[i] in base_cpl.keys()) or (
mskeys_cpl_map and mskeys_cpl_map in base_cpl.keys())) or
mskeys[i] in [
"primary", "secondary", "new", "existing", None]):
# Skip over "primary", "secondary", "new", and "existing"
# keys in updating baseline stock/energy, cost and lifetime
# information (this information is not broken out by these
# categories)
if mskeys[i] not in [
"primary", "secondary", "new", "existing", None]:
# Restrict base cost/performance/lifetime dict to key
# chain info.
if mskeys_cpl_map:
base_cpl = base_cpl[mskeys_cpl_map]
else:
base_cpl = base_cpl[mskeys[i]]
# Restrict stock/energy dict to key chain info.
mseg = mseg[mskeys[i]]
# Restrict ft^2 floor area dict to key chain info.
if i < 3: # Note: ft^2 floor area broken out 2 levels
mseg_sqft_stock = mseg_sqft_stock[mskeys[i]]
# Handle a superfluous 'undefined' key in the ECM
# cost, performance, and lifetime fields that is generated
# by the 'Add ECM' web form in certain cases *** NOTE: WILL
# FIX IN FUTURE UI VERSION ***
if (any([type(x) is dict and "undefined" in x.keys()
for x in [perf_meas, perf_units, cost_meas,
cost_units, life_meas]])):
if isinstance(perf_meas, dict) and "undefined" in \
perf_meas.keys():
perf_meas = perf_meas["undefined"]
if isinstance(perf_units, dict) and "undefined" in \
perf_units.keys():
perf_units = perf_units["undefined"]
if isinstance(cost_meas, dict) and "undefined" in \
cost_meas.keys():
cost_meas = cost_meas["undefined"]
if isinstance(cost_units, dict) and "undefined" in \
cost_units.keys():
cost_units = cost_units["undefined"]
if isinstance(life_meas, dict) and "undefined" in \
life_meas.keys():
life_meas = life_meas["undefined"]
# Check for/handle breakouts of performance, cost,
# lifetime, or market scaling information
# Determine the full set of potential breakout keys
# that should be represented in the given ECM attribute for
# the current microsegment level (used to check for
# missing information below); for region (level 2), provide
# a set of alternate breakout keys that may be used
if (any([(type(x) is dict or type(x) is list) for x in [
perf_meas, perf_units, cost_meas, cost_units,
life_meas, mkt_scale_frac,
mkt_scale_frac_source]])):
# primary/secondary level
if (i == 0):
break_keys = ["primary", "secondary"]
alt_break_keys = ''
err_message = ''
# full set of climate breakouts
elif (i == 1):
break_keys = self.climate_zone
# set of alternate regional breakout possibilities
alt_break_keys = \
self.handyvars.alt_perfcost_brk_map["levels"]
err_message = "regions the measure applies to: "
# full set of building breakouts
elif (i == 2):
break_keys = self.bldg_type
alt_break_keys = ''
err_message = "buildings the measure applies to: "
# full set of fuel breakouts
elif (i == 3):
break_keys = self.fuel_type[mskeys[0]]
alt_break_keys = ''
err_message = "fuel types the measure applies to: "
# full set of end use breakouts
elif (i == 4):
break_keys = self.end_use[mskeys[0]]
alt_break_keys = ''
err_message = "end uses the measure applies to: "
# full set of technology breakouts
elif (i == (len(mskeys) - 2)):
break_keys = self.technology[mskeys[0]]
alt_break_keys = ''
err_message = \
"technologies the measure applies to: "
# full set of vintage breakouts
elif (i == (len(mskeys) - 1)):
break_keys = self.structure_type
alt_break_keys = ''
err_message = \
"building vintages the measure applies to: "
else:
break_keys = ''
alt_break_keys = ''
err_message = ''
# Restrict any measure cost/performance/lifetime/market
# scaling info. that is a dict type to key chain info.
# - in the process, check to ensure that if there is
# breakout information provided for the given level in
# the microsegment, this breakout information is
# complete (for example, if performance is broken out
# by climate region, ALL climate regions should be
# present in the breakout keys)
# Performance data
# Case where data are broken out directly by mseg info.
if isinstance(perf_meas, dict) and break_keys and all([
x in perf_meas.keys() for x in break_keys]):
perf_meas = perf_meas[mskeys[i]]
# Case where region is being looped through in the mseg
# and performance data use alternate regional breakout
elif isinstance(perf_meas, dict) and alt_break_keys:
# Determine the alternate regions by which the
# performance data are broken out (e.g., IECC, or
# - if the analysis uses EMM regions or states -
# AIA)
alt_key_reg_typ = [
x for x in
self.handyvars.alt_perfcost_brk_map.keys()
if any([
x in y for y in perf_meas.keys()])]
# If the alternate regional breakout is supported,
# reformat the performance data for subsequent
# calculations
if len(alt_key_reg_typ) > 0:
alt_key_reg_typ = alt_key_reg_typ[0]
# Check to ensure the expected alternate
# breakout keys are provided
if sorted(perf_meas.keys()) == sorted(
self.handyvars.alt_perfcost_brk_map[
alt_key_reg_typ][alt_key_reg_typ]):
# Store data in a list, where the first
# element is a dict of performance data
# broken out by each alternate region and
# the second element is the portion of
# each alternate region that falls in the
# current mseg region
perf_meas = copy.deepcopy([
perf_meas,
self.handyvars.alt_perfcost_brk_map[
alt_key_reg_typ][mskeys[1]]])
# If unexpected keys are present, yield error
else:
raise KeyError(
self.name +
' energy performance (energy_'
'efficiency) must be broken out '
'by ALL ' + err_message +
str(break_keys) + ' OR alternate '
'regions ' + alt_break_keys)
# Case where performance data broken out by alternate
# regions were reformatted as a list and require
# further work to finalize as a single value for the
# current mseg
elif isinstance(perf_meas, list) and \
isinstance(perf_meas[0], dict):
# Check the first element of the list for
# performance data in each of the alternate regions
# that is still in dict format and must be keyed
# in further by info. for the current mseg.
for k in perf_meas[0].keys():
if isinstance(perf_meas[0][k], dict) and \
break_keys and all([
x in perf_meas[0][k].keys()
for x in break_keys]):
perf_meas[0][k] = perf_meas[0][k][
mskeys[i]]
# If none of the performance data in the first
# element of the list needs to be keyed in further,
# perform a weighted sum of the data across the
# alternate regions into the current mseg region,
# to arrive at a final performance value for that
# region
if all([type(x) != dict for
x in perf_meas[0].values()]):
perf_meas = sum([x * y for x, y in zip(
perf_meas[0].values(), perf_meas[1])])
# If none of the above cases holds, yield error
elif isinstance(perf_meas, dict) and any(
[x in perf_meas.keys() for x in break_keys]):
raise KeyError(
self.name +
' energy performance (energy_efficiency) '
'must be broken out '
'by ALL ' + err_message + str(break_keys))
# Cost data - same approach as performance data
# Case where data are broken out directly by mseg info.
if isinstance(cost_meas, dict) and break_keys and \
all([x in cost_meas.keys() for
x in break_keys]):
cost_meas = cost_meas[mskeys[i]]
# Case where region is being looped through in the mseg
# and cost data use alternate regional breakout
elif isinstance(cost_meas, dict) and alt_break_keys:
# Determine the alternate regions by which the
# cost data are broken out (e.g., IECC, or
# - if the analysis uses EMM regions or states -
# AIA)
alt_key_reg_typ = [
x for x in
self.handyvars.alt_perfcost_brk_map.keys()
if any([
x in y for y in cost_meas.keys()])]
# If the alternate regional breakout is supported,
# reformat the cost data for subsequent
# calculations
if len(alt_key_reg_typ) > 0:
alt_key_reg_typ = alt_key_reg_typ[0]
# Check to ensure the expected alternate
# breakout keys are provided
if sorted(cost_meas.keys()) == sorted(
self.handyvars.alt_perfcost_brk_map[
alt_key_reg_typ][alt_key_reg_typ]):
# Store data in a list, where the first
# element is a dict of cost data
# broken out by each alternate region and
# the second element is the portion of
# each alternate region that falls in the
# current mseg region
cost_meas = copy.deepcopy([
cost_meas,
self.handyvars.alt_perfcost_brk_map[
alt_key_reg_typ][mskeys[1]]])
# If unexpected keys are present, yield error
else:
raise KeyError(
self.name +
' installed cost (installed_'
'cost) must be broken out '
'by ALL ' + err_message +
str(break_keys) + ' OR alternate '
'regions ' + alt_break_keys)
# Case where cost data broken out by alternate
# regions were reformatted as a list and require
# further work to finalize as a single value for the
# current mseg
elif isinstance(cost_meas, list) and \
isinstance(cost_meas[0], dict):
# Check the first element of the list for
# cost data in each of the alternate regions
# that is still in dict format and must be keyed
# in further by info. for the current mseg.
for k in cost_meas[0].keys():
if isinstance(cost_meas[0][k], dict) and \
break_keys and all([
x in cost_meas[0][k].keys()
for x in break_keys]):
cost_meas[0][k] = cost_meas[0][k][
mskeys[i]]
# If none of the cost data in the first element of
# the list needs to be keyed in further, perform a
# weighted sum of the data across the alternate
# regions into the current mseg region, to arrive
# at a final cost value for that region
if all([type(x) != dict for
x in cost_meas[0].values()]):
cost_meas = sum([x * y for x, y in zip(
cost_meas[0].values(), cost_meas[1])])
elif isinstance(cost_meas, dict) and any(
[x in cost_meas.keys() for x in break_keys]):
if alt_break_keys:
pass
else:
raise KeyError(
self.name +
' installed cost (installed_cost) must '
'be broken out '
'by ALL ' + err_message + str(break_keys))
# Performance units data
if isinstance(perf_units, dict) and break_keys and \
all([x in perf_units.keys() for
x in break_keys]):
perf_units = perf_units[mskeys[i]]
elif isinstance(perf_units, dict) and any(
[x in perf_units.keys() for x in break_keys]):
raise KeyError(
self.name +
' energy performance units ('
'energy_efficiency_units) must be broken '
'out by ALL ' + err_message +
str(break_keys))
# Cost units data
if isinstance(cost_units, dict) and break_keys and \
all([x in cost_units.keys() for
x in break_keys]):
cost_units = cost_units[mskeys[i]]
elif isinstance(cost_units, dict) and any(
[x in cost_units.keys() for x in break_keys]):
raise KeyError(
self.name +
' installed cost units (installed_cost_'
'units) must be broken out '
'by ALL ' + err_message + str(break_keys))
# Lifetime data
if isinstance(life_meas, dict) and break_keys and all([
x in life_meas.keys() for x in break_keys]):
life_meas = life_meas[mskeys[i]]
elif isinstance(life_meas, dict) and any(
[x in life_meas.keys() for x in break_keys]):
raise KeyError(
self.name +
' lifetime (product_lifetime) must be '
'broken out '
'by ALL ' + err_message + str(break_keys))
# Market scaling fractions
if isinstance(mkt_scale_frac, dict) and break_keys \
and all([x in mkt_scale_frac.keys() for
x in break_keys]):
mkt_scale_frac = mkt_scale_frac[mskeys[i]]
elif isinstance(mkt_scale_frac, dict) and any(
[x in mkt_scale_frac.keys() for
x in break_keys]):
raise KeyError(
self.name +
' market scaling fractions (market_'
'scaling_fractions) must be '
'broken out by ALL ' + err_message +
str(break_keys))
# Market scaling fraction source
if isinstance(mkt_scale_frac_source, dict) and \
break_keys and all([
x in mkt_scale_frac_source.keys() for
x in break_keys]):
mkt_scale_frac_source = \
mkt_scale_frac_source[mskeys[i]]
elif isinstance(mkt_scale_frac_source, dict) and any(
[x in mkt_scale_frac_source.keys() for
x in break_keys]):
raise KeyError(
self.name +
' market scaling fraction source (market_'
'scaling_fraction_source) must be '
'broken out by ALL ' + err_message +
str(break_keys))
# If no key match, break the loop
else:
if mskeys[i] is not None:
mseg = {}
break
# Continue loop if key chain doesn't yield "stock"/"energy" keys
if any([x not in list(mseg.keys()) for x in ["stock", "energy"]]):
continue
# Continue loop if time-sensitive valuation is required and the
# current microsegment technology does not have the necessary
# load shape information (pertinent to internal heat gains)
elif (((self.energy_outputs["tsv_metrics"] is not False or
opts.sect_shapes is True) or self.tsv_features is not None)
and (mskeys[4] in ["heating", "secondary heating"] and
mskeys[-2] in self.handyvars.heat_ls_tech_scrn)):
continue
# Continue loop if key chain yields "stock"/"energy" keys but
# the stock or energy data are missing
elif any([x == {} for x in [mseg["stock"], mseg["energy"]]]):
if mskeys[-2] not in stk_energy_warn:
stk_energy_warn.append(mskeys[-2])
verboseprint(
opts.verbose,
"WARNING: ECM '" + self.name +
"' missing valid baseline "
"stock/energy data " +
"for technology '" + str(mskeys[-2]) +
"'; removing technology from analysis")
# Add to the overall number of key chains that yield "stock"/
# "energy" keys (but in this case, are missing data)
valid_keys += 1
continue
# Otherwise update all stock/energy/cost information for each year
else:
# Restrict count of key chains with valid stock/energy data to
# "primary" microsegment key chains only (the key chain
# count is used later in stock and stock cost calculations,
# which secondary microsegments do not contribute to)
if mskeys[0] == "primary":
valid_keys += 1
valid_keys_stk_energy += 1
# Flag use of ft^2 floor area as stock when number of stock
# units is unavailable (applicable to residential envelope
# and all commercial technologies)
if mseg["stock"] == "NA":
sqft_subst = 1
# If applicable, determine the rate of conversion from baseline
# equipment to heat pumps (including fuel switching cases and
# like-for-like replacements of e.g., resistance heating/WH).
# Currently, assume only heating/water heating end uses are
# covered by these exogenous rates, and ensure that these rates
# are only assessed for equipment microsegments (e.g., they do
# not apply to envelope component heating energy msegs);
# equipment cooling microsegments that are linked with the
# heating microsegments are subject to the rates; set to None
# otherwise
if self.handyvars.hp_rates and "demand" not in mskeys and (any(
[x in mskeys for x in ["heating", "water heating"]]) or (
link_htcl_fs_rates and "cooling" in mskeys)):
# Map the current mseg region to the regionality of the
# HP conversion rate data
reg = [r[0] for r in
self.handyvars.hp_rates_reg_map.items() if
mskeys[1] in r[1]][0]
# Pull in HP conversion rate data for the region and
# building type of the current microsegment
hp_rate_dat = self.handyvars.hp_rates[
"data"][reg][bldg_sect]
# Attempt to further restrict HP conversion data by
# fuel type, end use, technology, and building vintage;
# handle cases where data are applicable to "all"
# technologies within a given combination of fuel, end use,
# and vintage, or otherwise set the HP conversion rate to
# None if no data are available for the current mseg
try:
hp_rate = hp_rate_dat[
mskeys[3]][mskeys[4]][mskeys[-2]][mskeys[-1]]
except KeyError:
try:
hp_rate = hp_rate_dat[
mskeys[3]][mskeys[4]]["all"][mskeys[-1]]
except KeyError:
# HP conversion rates for NGHP cooling msegs are
# not directly addressed in the exogenous file
# structure but should be set to the same as
# NGHP heating
if "cooling" in mskeys and "NGHP" in mskeys:
try:
hp_rate = hp_rate_dat[mskeys[3]][
"heating"][mskeys[-2]][mskeys[-1]]
except KeyError:
hp_rate = None
# HP conversion rates for electric cooling msegs
# attached to heating msegs that are fuel
# switching from fossil to electric and subject to
# the HP rates should be subject to the same rates;
# attach cooling scaling to NG rates, and use NG
# furnaces if those rates are resolved by
# technology (NG furnaces are most prevalent
# fossil-based heating technology)
elif self.fuel_switch_to == "electricity" and \
"cooling" in mskeys:
try:
hp_rate = hp_rate_dat["natural gas"][
"heating"]["furnace (NG)"][mskeys[-1]]
except KeyError:
try:
hp_rate = hp_rate_dat["natural gas"][
"heating"]["all"][mskeys[-1]]
except KeyError:
hp_rate = None
# HP conversion rates for electric cooling msegs
# attached to electric resistance heating msegs
# that are subject to the HP rates should be
# subject to the same rates
elif self.fuel_switch_to is None and \
"cooling" in mskeys:
try:
hp_rate = hp_rate_dat["electricity"][
"heating"]["resistance heat"][
mskeys[-1]]
except KeyError:
try:
hp_rate = hp_rate_dat["electricity"][
"heating"]["all"][mskeys[-1]]
except KeyError:
hp_rate = None
else:
hp_rate = None
else:
hp_rate = None
# For cases where the measure is switching fuel to a HP
# and an external HP conversion rate has been imposed,
# and the current mseg applies to fossil fuel (e.g., is
# being switched away from), append an '-FS' to the
# contributing microsegment tech. information needed for ECM
# competition; this will ensure that the mseg is not
# directly competed with fossil msegs for other non-FS
# measures (e.g., gas efficiency), which is necessary b/c
# the overlap between such measures will have already been
# accounted for via the HP conversion rate calculations
if hp_rate and (self.fuel_switch_to == "electricity" and
"electricity" not in mskeys):
contrib_mseg_key = list(contrib_mseg_key)
# Tech info. is second to last mseg list element
try:
contrib_mseg_key[-2] += "-FS"
# Handle Nonetype on technology
except TypeError:
contrib_mseg_key[-2] = "-FS"
contrib_mseg_key = tuple(contrib_mseg_key)
# If sub-market scaling fraction is non-numeric (indicating
# it is not applicable to current microsegment), set to 1
if mkt_scale_frac is None or isinstance(mkt_scale_frac, dict):
mkt_scale_frac = 1
# If a sub-market scaling fraction is to be applied to the
# current baseline microsegment, check that the source
# information for the fraction is sufficient; if not, remove
# the measure from further analysis
if isinstance(mkt_scale_frac_source, dict) and \
"title" in mkt_scale_frac_source.keys():
# Establish sub-market fraction general source, URL, and
# derivation information
# Set general source info. for the sub-market fraction
source_info = [
mkt_scale_frac_source['title'],
mkt_scale_frac_source['organization']]
# Set URL for the sub-market fraction
url = mkt_scale_frac_source['URL']
# Set information about how sub-market fraction was derived
frac_derive = mkt_scale_frac_source['fraction_derivation']
# Check the validity of sub-market fraction source, URL,
# and derivation information
# Check sub-market fraction general source information,
# yield warning if source information is invalid and
# invalid source information flag hasn't already been
# raised for this measure
if sbmkt_source_invalid != 1 and (any([
not isinstance(x, str) or
len(x) < 2 for x in source_info]) is True):
# Print invalid source information warning
warnings.warn(
"WARNING: '" + self.name + "' has invalid "
"sub-market scaling fraction source title, author,"
" organization, and/or year information")
# Set invalid source information flag to 1
sbmkt_source_invalid = 1
# Check sub-market fraction URL, yield warning if URL is
# invalid and invalid URL flag hasn't already been raised
# for this measure
if sbmkt_url_invalid != 1:
# Parse the URL into components (addressing scheme,
# network location, etc.)
url_check = urlparse(url)
# Check for valid URL address scheme and network
# location components
if (any([len(url_check.scheme),
len(url_check.netloc)]) == 0 or
all([x not in url_check.netloc for x in
self.handyvars.valid_submkt_urls])):
# Print invalid URL warning
warnings.warn(
"WARNING: '" + self.name + "' has invalid "
"sub-market scaling fraction source URL "
"information")
# Set invalid URL flag to 1
sbmkt_url_invalid = 1
# Check sub-market fraction derivation information, yield
# warning if invalid
if not isinstance(frac_derive, str):
# Print invalid derivation warning
warnings.warn(
"WARNING: '" + self.name + "' has invalid "
"sub-market scaling fraction derivation "
"information")
# Set invalid derivation flag to 1
sbmkt_derive_invalid = 1
# If the derivation information or the general source
# and URL information for the sub-market fraction are
# invalid, yield warning that measure will be removed from
# analysis, reset the current valid contributing key chain
# count to a 999 flag, and flag the measure as inactive
# such that it will be removed from all further routines
if sbmkt_derive_invalid == 1 or (
sbmkt_source_invalid == 1 and
sbmkt_url_invalid == 1):
# Print measure removal warning
warnings.warn(
"WARNING (CRITICAL): '" + self.name + "' has "
"insufficient sub-market source information and "
"will be removed from analysis")
# Reset measure 'active' attribute to zero
self.remove = True
# Break from all further baseline stock/energy/carbon
# and cost information updates for the measure
break
# Seed the random number generator such that performance, cost,
# and lifetime draws are consistent across all microsegments
# that contribute to a measure's master microsegment (e.g, if
# measure performance, cost, and/or lifetime distributions
# are identical relative to two contributing baseline
# microsegments, the numpy arrays yielded by the random number
# generator for these measure parameters and microsegments
# will also be identical)
numpy.random.seed(rnd_sd)
# If the measure performance/cost/lifetime variable is list
# with distribution information, sample values accordingly
if isinstance(perf_meas, list) and isinstance(perf_meas[0],
str):
# Sample measure performance values
perf_meas = self.rand_list_gen(
perf_meas, self.handyvars.nsamples)
# Set any measure performance values less than zero to
# zero, for cases where performance isn't relative
if perf_units != 'relative savings (constant)' and \
type(perf_units) is not list and any(
perf_meas < 0) is True:
perf_meas[
|
numpy.where(perf_meas < 0)
|
numpy.where
|
# Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0. "
import tvb.simulator.lab as lab
from nest_elephant_tvb.Tvb.modify_tvb.test_interface import ReducedWongWang_proxy
from nest_elephant_tvb.Tvb.modify_tvb import Interface_co_simulation
import numpy as np
# reference simulation
np.random.seed(42)
model = lab.models.ReducedWongWang(tau_s=np.random.rand(76))
connectivity = lab.connectivity.Connectivity().from_file()
connectivity.speed = np.array([4.0])
connectivity.configure()
coupling = lab.coupling.Linear(a=np.array(0.0154))
integrator = lab.integrators.HeunDeterministic(dt=0.1,bounded_state_variable_indices=np.array([0]),state_variable_boundaries=np.array([[0.0, 1.0]]))
monitors = (lab.monitors.Raw(period=0.1, variables_of_interest=np.array(0,dtype=np.int)),)
# Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim = lab.simulator.Simulator(model=model,
connectivity=connectivity,
coupling=coupling,
integrator=integrator,
monitors=monitors,
# initial_conditions=np.repeat(0.0,1*1*nb_region).reshape(1,1,nb_region,1)
)
sim.configure()
result_all=sim.run(simulation_length=10.0)
result = result_all[0][1][0][0]
# The modify model without proxy
np.random.seed(42)
model = lab.models.ReducedWongWang(tau_s=np.random.rand(76))
# integrator = HeunDeterministic(dt=0.1)
# Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim_2 = lab.simulator.Simulator(model=model,
connectivity=connectivity,
coupling=coupling,
integrator=integrator,
monitors=monitors,
# initial_conditions=np.repeat(0.0,1*1*nb_region).reshape(1,1,nb_region,1)
)
sim_2.configure()
model_2 = ReducedWongWang_proxy()
model_2.copy_inst(sim.model)
sim_2.model = model_2
result_2_all=sim_2.run(simulation_length=10.0)[0][1][0]
result_2= result_2_all[0]
diff = result - result_2
if np.sum(diff) == 0.0:
print('test succeeds')
else:
print('test FAIL')
# The modify model without proxy
np.random.seed(42)
id_proxy = range(11)
model = lab.models.ReducedWongWang(tau_s=np.random.rand(76))
# Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim_3 = lab.simulator.Simulator(model=model,
connectivity=connectivity,
coupling=coupling,
integrator=integrator,
monitors=monitors,
# initial_conditions=np.repeat(0.0,1*1*nb_region).reshape(1,1,nb_region,1)
)
sim_3.configure()
model_3 = ReducedWongWang_proxy()
model_3.copy_inst(sim.model)
model_3.set_id_proxy(id_proxy)
sim_3.model = model_3
result_3_all=sim_3.run(simulation_length=10.0)[0][1][0]
result_3= result_3_all[0]
diff = result - result_3
if np.sum(diff) == 0.0:
print('test succeeds')
else:
print('test FAIL')
# The modify model without proxy
np.random.seed(42)
id_proxy = range(11)
model = lab.models.ReducedWongWang(tau_s=np.random.rand(76))
# Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim_4 = lab.simulator.Simulator(model=model,
connectivity=connectivity,
coupling=coupling,
integrator=integrator,
monitors=monitors,
# initial_conditions=np.repeat(0.0,1*1*nb_region).reshape(1,1,nb_region,1)
)
sim_4.configure()
model_4 = ReducedWongWang_proxy()
model_4.copy_inst(sim.model)
model_4.set_id_proxy(np.array(id_proxy))
model_4.update_proxy(np.ones((11,1))*0.7)
sim_4.model = model_4
result_4_all = sim_4.run(simulation_length=10.0)[0][1][0]
result_4 = result_4_all[0]
diff = result - result_4
if np.sum(diff) != 0.0:
print('test succeeds')
else:
print('test FAIL')
# The modify model without proxy
np.random.seed(42)
id_proxy = range(11)
model = lab.models.ReducedWongWang(tau_s=np.random.rand(76))
# Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim_5 = lab.simulator.Simulator(model=model,
connectivity=connectivity,
coupling=coupling,
integrator=integrator,
monitors=monitors,
# initial_conditions=np.repeat(0.0,1*1*nb_region).reshape(1,1,nb_region,1)
)
sim_5.configure()
model_5 = ReducedWongWang_proxy()
model_5.copy_inst(sim.model)
model_5.set_id_proxy(np.array(id_proxy))
model_5.update_proxy([[0.02610815369723578 ],
[0.007918682131383152 ],
[0.008257260378213565 ],
[0.023084939706151147 ],
[0.03725706591997936 ],
[0.017066023963743862 ],
[0.028114124110158213 ],
[0.010048491097557441 ],
[0.013214675199868617 ],
[0.0046064972150810365],
[0.05189135144713729 ]])
sim_5.model = model_5
result_5_all = sim_5.run(simulation_length=10.0)[0][1][0]
result_5 = result_5_all[0]
diff = result - result_5
if np.sum(diff) == 0.0:
print('test succeeds')
else:
print('test FAIL')
# New simulator without proxy
np.random.seed(42)
model_6 = lab.models.ReducedWongWang(tau_s=np.random.rand(76))
monitors_2 = (Interface_co_simulation(period=0.1, id_proxy=np.array([0, 2], dtype=np.int), time_synchronize=10.0),)
# Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim_6 = lab.simulator.Simulator(model=model_6,
connectivity=connectivity,
coupling=coupling,
integrator=integrator,
monitors=monitors_2,
# initial_conditions=np.repeat(0.0,1*1*nb_region).reshape(1,1,nb_region,1)
)
sim_6.configure()
result_6_all=sim_6.run(simulation_length=10.0)[0][1][0]
result_6= result_6_all[0]
diff = result - result_6
if
|
np.sum(diff)
|
numpy.sum
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the groupstars module.
"""
from astropy.table import Table, vstack
import numpy as np
from numpy.testing import assert_almost_equal
import pytest
from ..groupstars import DAOGroup, DBSCANGroup
try:
import sklearn.cluster # noqa
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def assert_table_almost_equal(table1, table2):
assert table1.colnames == table2.colnames
assert table1.meta == table2.meta
for colname in table1.colnames:
assert_almost_equal(table1[colname], table2[colname])
class TestDAOGROUP:
def test_daogroup_one(self):
"""
+---------+--------+---------+---------+--------+---------+
| * * * * |
| |
0.2 + +
| |
| |
| |
0 + * * +
| |
| |
| |
-0.2 + +
| |
| * * * * |
+---------+--------+---------+---------+--------+---------+
0 0.5 1 1.5 2
x and y axis are in pixel coordinates. Each asterisk represents
the centroid of a star.
"""
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
x_1 = x_0 + 2.0
first_group = Table([x_0, y_0, np.arange(len(x_0)) + 1,
np.ones(len(x_0), dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
second_group = Table([x_1, y_0, len(x_0) + np.arange(len(x_0)) + 1,
2*np.ones(len(x_0), dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
starlist = vstack([first_group, second_group])
daogroup = DAOGroup(crit_separation=0.6)
test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_daogroup_two(self):
"""
+--------------+--------------+-------------+--------------+
3 + * +
| * |
2.5 + * +
| * |
2 + * +
| |
1.5 + +
| |
1 + * +
| * |
0.5 + * +
| * |
0 + * +
+--------------+--------------+-------------+--------------+
-1 -0.5 0 0.5 1
"""
first_group = Table([np.zeros(5), np.linspace(0, 1, 5),
np.arange(5) + 1, np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
second_group = Table([np.zeros(5), np.linspace(2, 3, 5),
6 + np.arange(5), 2*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
starlist = vstack([first_group, second_group])
daogroup = DAOGroup(crit_separation=0.3)
test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_daogroup_three(self):
"""
1 +--+-------+--------+--------+--------+-------+--------+--+
| |
| |
| |
0.5 + +
| |
| |
0 + * * * * * * * * * * +
| |
| |
-0.5 + +
| |
| |
| |
-1 +--+-------+--------+--------+--------+-------+--------+--+
0 0.5 1 1.5 2 2.5 3
"""
first_group = Table([np.linspace(0, 1, 5), np.zeros(5),
np.arange(5) + 1, np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
second_group = Table([np.linspace(2, 3, 5), np.zeros(5),
6 + np.arange(5), 2*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
starlist = vstack([first_group, second_group])
daogroup = DAOGroup(crit_separation=0.3)
test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_daogroup_four(self):
"""
+-+---------+---------+---------+---------+-+
1 + * +
| * * |
| |
| |
0.5 + +
| |
| |
| |
0 + * * +
| |
| |
-0.5 + +
| |
| |
| * * |
-1 + * +
+-+---------+---------+---------+---------+-+
-1 -0.5 0 0.5 1
"""
x = np.linspace(-1., 1., 5)
y = np.sqrt(1. - x**2)
xx = np.hstack((x, x))
yy = np.hstack((y, -y))
starlist = Table([xx, yy, np.arange(10) + 1,
np.ones(10, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
daogroup = DAOGroup(crit_separation=2.5)
test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_daogroup_five(self):
"""
+--+--------+--------+-------+--------+--------+--------+--+
3 + * +
| * |
2.5 + * +
| * |
2 + * +
| |
1.5 + * * * * * * * * * * +
| |
1 + * +
| * |
0.5 + * +
| * |
0 + * +
+--+--------+--------+-------+--------+--------+--------+--+
0 0.5 1 1.5 2 2.5 3
"""
first_group = Table([1.5*np.ones(5), np.linspace(0, 1, 5),
np.arange(5) + 1, np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
second_group = Table([1.5*np.ones(5), np.linspace(2, 3, 5),
6 + np.arange(5), 2*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
third_group = Table([np.linspace(0, 1, 5), 1.5*np.ones(5),
11 + np.arange(5), 3*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
fourth_group = Table([np.linspace(2, 3, 5), 1.5*np.ones(5),
16 + np.arange(5), 4*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
starlist = vstack([first_group, second_group, third_group,
fourth_group])
daogroup = DAOGroup(crit_separation=0.3)
test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_daogroup_six(self):
"""
+------+----------+----------+----------+----------+------+
| * * * * * * |
| |
0.2 + +
| |
| |
| |
0 + * * * +
| |
| |
| |
-0.2 + +
| |
| * * * * * * |
+------+----------+----------+----------+----------+------+
0 1 2 3 4
"""
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
x_1 = x_0 + 2.0
x_2 = x_0 + 4.0
first_group = Table([x_0, y_0, np.arange(5) + 1,
np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
second_group = Table([x_1, y_0, 6 + np.arange(5),
2*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
third_group = Table([x_2, y_0, 11 + np.arange(5),
3*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
starlist = vstack([first_group, second_group, third_group])
daogroup = DAOGroup(crit_separation=0.6)
test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_isolated_sources(self):
"""
Test case when all sources are isolated.
"""
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
starlist = Table([x_0, y_0, np.arange(len(x_0)) + 1,
np.arange(len(x_0)) + 1],
names=('x_0', 'y_0', 'id', 'group_id'))
daogroup = DAOGroup(crit_separation=0.01)
test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_id_column(self):
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
starlist = Table([x_0, y_0, np.arange(len(x_0)) + 1,
np.arange(len(x_0)) + 1],
names=('x_0', 'y_0', 'id', 'group_id'))
daogroup = DAOGroup(crit_separation=0.01)
test_starlist = daogroup(starlist['x_0', 'y_0'])
assert_table_almost_equal(starlist, test_starlist)
def test_id_column_raise_error(self):
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
starlist = Table([x_0, y_0, np.arange(len(x_0)),
np.arange(len(x_0)) + 1],
names=('x_0', 'y_0', 'id', 'group_id'))
daogroup = DAOGroup(crit_separation=0.01)
with pytest.raises(ValueError):
daogroup(starlist['x_0', 'y_0', 'id'])
@pytest.mark.skipif('not HAS_SKLEARN')
class TestDBSCANGroup:
def test_group_stars_one(object):
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
x_1 = x_0 + 2.0
first_group = Table([x_0, y_0, np.arange(len(x_0)) + 1,
np.ones(len(x_0), dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
second_group = Table([x_1, y_0, len(x_0) + np.arange(len(x_0)) + 1,
2*np.ones(len(x_0), dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
starlist = vstack([first_group, second_group])
dbscan = DBSCANGroup(crit_separation=0.6)
test_starlist = dbscan(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_group_stars_two(object):
first_group = Table([1.5*np.ones(5), np.linspace(0, 1, 5),
np.arange(5) + 1, np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
second_group = Table([1.5*np.ones(5), np.linspace(2, 3, 5),
6 + np.arange(5), 2*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
third_group = Table([np.linspace(0, 1, 5), 1.5*np.ones(5),
11 + np.arange(5), 3*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
fourth_group = Table([np.linspace(2, 3, 5), 1.5*np.ones(5),
16 + np.arange(5), 4*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
starlist = vstack([first_group, second_group, third_group,
fourth_group])
dbscan = DBSCANGroup(crit_separation=0.3)
test_starlist = dbscan(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_isolated_sources(self):
"""
Test case when all sources are isolated.
"""
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
starlist = Table([x_0, y_0, np.arange(len(x_0)) + 1,
np.arange(len(x_0)) + 1],
names=('x_0', 'y_0', 'id', 'group_id'))
dbscan = DBSCANGroup(crit_separation=0.01)
test_starlist = dbscan(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_id_column(self):
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
starlist = Table([x_0, y_0, np.arange(len(x_0)) + 1,
np.arange(len(x_0)) + 1],
names=('x_0', 'y_0', 'id', 'group_id'))
dbscan = DBSCANGroup(crit_separation=0.01)
test_starlist = dbscan(starlist['x_0', 'y_0'])
assert_table_almost_equal(starlist, test_starlist)
def test_id_column_raise_error(self):
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0,
|
np.sqrt(2)
|
numpy.sqrt
|
# -*- coding: utf-8 -*-
"""
Tests for Results.predict
"""
import numpy as np
import pandas as pd
from numpy.testing import assert_allclose, assert_equal
import pandas.util.testing as pdt
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
class CheckPredictReturns(object):
def test_2d(self):
res = self.res
data = self.data
fitted = res.fittedvalues.iloc[1:10:2]
pred = res.predict(data.iloc[1:10:2])
pdt.assert_index_equal(pred.index, fitted.index)
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# plain dict
xd = dict(zip(data.columns, data.iloc[1:10:2].values.T))
pred = res.predict(xd)
assert_equal(pred.index, np.arange(len(pred)))
assert_allclose(pred.values, fitted.values, rtol=1e-13)
def test_1d(self):
# one observation
res = self.res
data = self.data
pred = res.predict(data.iloc[:1])
pdt.assert_index_equal(pred.index, data.iloc[:1].index)
assert_allclose(pred.values, res.fittedvalues[0], rtol=1e-13)
fittedm = res.fittedvalues.mean()
xmean = data.mean()
pred = res.predict(xmean.to_frame().T)
assert_equal(pred.index, np.arange(1))
assert_allclose(pred, fittedm, rtol=1e-13)
# Series
pred = res.predict(data.mean())
assert_equal(pred.index, np.arange(1))
assert_allclose(pred.values, fittedm, rtol=1e-13)
# dict with scalar value (is plain dict)
# Note: this warns about dropped nan, even though there are None -FIXED
pred = res.predict(data.mean().to_dict())
assert_equal(pred.index, np.arange(1))
assert_allclose(pred.values, fittedm, rtol=1e-13)
def test_nopatsy(self):
res = self.res
data = self.data
fitted = res.fittedvalues.iloc[1:10:2]
# plain numpy array
pred = res.predict(res.model.exog[1:10:2], transform=False)
assert_allclose(pred, fitted.values, rtol=1e-13)
# pandas DataFrame
x = pd.DataFrame(res.model.exog[1:10:2],
index = data.index[1:10:2],
columns=res.model.exog_names)
pred = res.predict(x)
pdt.assert_index_equal(pred.index, fitted.index)
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# one observation - 1-D
pred = res.predict(res.model.exog[1], transform=False)
assert_allclose(pred, fitted.values[0], rtol=1e-13)
# one observation - pd.Series
pred = res.predict(x.iloc[0])
pdt.assert_index_equal(pred.index, fitted.index[:1])
assert_allclose(pred.values[0], fitted.values[0], rtol=1e-13)
class TestPredictOLS(CheckPredictReturns):
@classmethod
def setup_class(cls):
nobs = 30
np.random.seed(987128)
x = np.random.randn(nobs, 3)
y = x.sum(1) +
|
np.random.randn(nobs)
|
numpy.random.randn
|
"""
A few different backtracking line search subsolvers.
BoundsEnforceLS - Only checks bounds and enforces them by one of three methods.
ArmijoGoldsteinLS -- Like above, but terminates with the ArmijoGoldsteinLS condition.
"""
import sys
import numpy as np
from openmdao.core.analysis_error import AnalysisError
from openmdao.solvers.solver import NonlinearSolver
from openmdao.recorders.recording_iteration_stack import Recording
def _print_violations(outputs, lower, upper):
"""
Print out which variables exceed their bounds.
Parameters
----------
outputs : <Vector>
Vector containing the outputs.
lower : <Vector>
Vector containing the lower bounds.
upper : <Vector>
Vector containing the upper bounds.
"""
start = end = 0
for name, val in outputs._abs_item_iter():
end += val.size
if upper is not None and any(val > upper[start:end]):
print("'%s' exceeds upper bounds" % name)
print(" Val:", val)
print(" Upper:", upper[start:end], '\n')
if lower is not None and any(val < lower[start:end]):
print("'%s' exceeds lower bounds" % name)
print(" Val:", val)
print(" Lower:", lower[start:end], '\n')
start = end
class LinesearchSolver(NonlinearSolver):
"""
Base class for line search solvers.
Attributes
----------
_do_subsolve : bool
Flag used by parent solver to tell the line search whether to solve subsystems while
backtracking.
_lower_bounds : ndarray or None
Lower bounds array.
_upper_bounds : ndarray or None
Upper bounds array.
"""
def __init__(self, **kwargs):
"""
Initialize all attributes.
Parameters
----------
**kwargs : dict
Options dictionary.
"""
super().__init__(**kwargs)
# Parent solver sets this to control whether to solve subsystems.
self._do_subsolve = False
self._lower_bounds = None
self._upper_bounds = None
def _declare_options(self):
"""
Declare options before kwargs are processed in the init method.
"""
super()._declare_options()
opt = self.options
opt.declare(
'bound_enforcement', default='scalar', values=['vector', 'scalar', 'wall'],
desc="If this is set to 'vector', the entire vector is backtracked together " +
"when a bound is violated. If this is set to 'scalar', only the violating " +
"entries are set to the bound and then the backtracking occurs on the vector " +
"as a whole. If this is set to 'wall', only the violating entries are set " +
"to the bound, and then the backtracking follows the wall - i.e., the " +
"violating entries do not change during the line search.")
opt.declare('print_bound_enforce', default=False,
desc="Set to True to print out names and values of variables that are pulled "
"back to their bounds.")
def _setup_solvers(self, system, depth):
"""
Assign system instance, set depth, and optionally perform setup.
Parameters
----------
system : System
pointer to the owning system.
depth : int
depth of the current system (already incremented).
"""
super()._setup_solvers(system, depth)
if system._has_bounds:
abs2meta_out = system._var_abs2meta['output']
start = end = 0
for abs_name, val in system._outputs._abs_item_iter():
end += val.size
meta = abs2meta_out[abs_name]
var_lower = meta['lower']
var_upper = meta['upper']
if var_lower is None and var_upper is None:
start = end
continue
ref0 = meta['ref0']
ref = meta['ref']
if not np.isscalar(ref0):
ref0 = ref0.ravel()
if not np.isscalar(ref):
ref = ref.ravel()
if var_lower is not None:
if self._lower_bounds is None:
self._lower_bounds = np.full(len(system._outputs), -np.inf)
if not np.isscalar(var_lower):
var_lower = var_lower.ravel()
self._lower_bounds[start:end] = (var_lower - ref0) / (ref - ref0)
if var_upper is not None:
if self._upper_bounds is None:
self._upper_bounds = np.full(len(system._outputs), np.inf)
if not
|
np.isscalar(var_upper)
|
numpy.isscalar
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encode tokens, entity references and predictions as numerical vectors."""
import inspect
import json
import os
import sys
from typing import Any, List, Optional, Text, Tuple, Type, Union
from absl import logging
import numpy as np
import tensorflow as tf
MAX_NUM_ENTITIES = 20
EnrefArray = Union[tf.Tensor, np.ndarray]
class Section(object):
"""Represents a section (i.e. a range) within a data array."""
def __init__(self, array, start, size):
self.array = array
self.start = start
self.size = size
def slice(self):
return self.array[Ellipsis, self.start:(self.start + self.size)]
def replace(self, array):
if isinstance(self.array, tf.Tensor):
self.array = tf.concat([
self.array[Ellipsis, :self.start], array,
self.array[Ellipsis, (self.start + self.size):]
], -1)
else:
self.array[Ellipsis, self.start:(self.start + self.size)] = array
return self.array
class TypeSection(Section):
"""A section which specifies the encoding type (enref, token, prediction)."""
SIZE = 3
def is_token(self):
return self.array[Ellipsis, self.start + 2]
def set_token(self):
self.array[Ellipsis, self.start] = 0.0
self.array[Ellipsis, self.start + 2] = 1.0
def is_enref(self):
return self.array[Ellipsis, self.start]
def set_enref(self):
self.array[Ellipsis, self.start] = 1.0
self.array[Ellipsis, self.start + 2] = 0.0
class EnrefMetaSection(Section):
"""Encodes whether a token is an enref and if its new or new continued."""
SIZE = 3
def is_enref(self):
return self.array[Ellipsis, self.start]
def set_is_enref(self, value):
self.array[Ellipsis, self.start] = 1.0 if value else 0.0
def is_new(self):
return self.array[Ellipsis, self.start + 1]
def set_is_new(self, value):
self.array[Ellipsis, self.start + 1] = 1.0 if value else 0.0
def is_new_continued(self):
return self.array[Ellipsis, self.start + 2]
def set_is_new_continued(self, value):
self.array[Ellipsis, self.start + 2] = 1.0 if value else 0.0
def get_is_new_slice(self):
return self.array[Ellipsis, self.start + 1:self.start + self.size]
def replace_is_new_slice(self, array):
self.array = tf.concat([
self.array[Ellipsis, :self.start + 1], array,
self.array[Ellipsis, (self.start + self.size):]
], -1)
return self.array
class EnrefIdSection(Section):
SIZE = MAX_NUM_ENTITIES
def get(self):
index = np.argmax(self.slice())
return index
def set(self, enref_id):
self.array[Ellipsis, self.start:(self.start + self.size)] = 0.0
self.array[Ellipsis, self.start + enref_id] = 1.0
class EnrefPropertiesSection(Section):
"""Encodes the grammatical gender and whether an enref is a group."""
SIZE = 6
DOMAINS = ['people', 'locations']
PROPERTIES = ['female', 'male', 'neuter']
def get_domain(self):
array = self.array[Ellipsis, self.start:self.start + 2]
if np.max(array) <= 0.0:
return 'unknown'
index = np.argmax(array)
return self.DOMAINS[index]
def set_domain(self, domain):
self.array[Ellipsis, self.start:(self.start + 2)] = 0.0
if domain == 'unknown':
return
index = self.DOMAINS.index(domain)
self.array[Ellipsis, self.start + index] = 1.0
def get_gender(self):
array = self.array[Ellipsis, (self.start + 2):(self.start + 5)]
if np.max(array) <= 0.0:
return 'unknown'
index = np.argmax(array)
return self.PROPERTIES[index]
def set_gender(self, gender):
self.array[Ellipsis, (self.start + 2):(self.start + 5)] = 0.0
if gender == 'unknown':
return
index = self.PROPERTIES.index(gender)
self.array[Ellipsis, self.start + 2 + index] = 1.0
def is_group(self):
return self.array[Ellipsis, self.start + 5]
def set_is_group(self, value):
self.array[Ellipsis, self.start + 5] = 1.0 if value else 0.0
class EnrefMembershipSection(Section):
"""Encodes the members of a group, if an enref refers to multiple entities."""
SIZE = MAX_NUM_ENTITIES
def __init__(self, array, start, size):
Section.__init__(self, array, start, size)
self.names = None
def get_ids(self):
ids = np.where(self.slice() > 0.0)[0].tolist()
return ids
def get_names(self):
return self.names
def set(self, ids, names = None):
self.names = names
self.array[Ellipsis, self.start:(self.start + self.size)] = 0.0
for enref_id in ids:
self.array[Ellipsis, self.start + enref_id] = 1.0
class EnrefContextSection(Section):
"""Encodes if an enref is a sender or recipient and the message offset."""
SIZE = 7
def is_sender(self):
return self.array[Ellipsis, self.start]
def set_is_sender(self, value):
self.array[Ellipsis, self.start] = 1.0 if value else 0.0
def is_recipient(self):
return self.array[Ellipsis, self.start + 1]
def set_is_recipient(self, value):
self.array[Ellipsis, self.start + 1] = 1.0 if value else 0.0
def get_message_offset(self):
digit = 1
message_offset = 0
for i in range(2, self.SIZE):
message_offset += int(self.array[Ellipsis, self.start + i]) * digit
digit *= 2
return message_offset
def set_message_offset(self, offset):
for i in range(2, self.SIZE):
if offset & 0x01:
self.array[Ellipsis, self.start + i] = 1.0
else:
self.array[Ellipsis, self.start + i] = 0.0
offset = offset >> 1
class TokenPaddingSection(Section):
"""An empty section sized so that enref and token encodings align."""
SIZE = (
EnrefIdSection.SIZE + EnrefPropertiesSection.SIZE +
EnrefMembershipSection.SIZE + EnrefContextSection.SIZE)
class SignalSection(Section):
"""Encodes optional token signals collected during preprocessing."""
SIZE = 10
SIGNALS = {
'first_name': 0,
'sports_team': 1,
'athlete': 2,
}
def set(self, signals):
self.array[Ellipsis, self.start:(self.start + self.size)] = 0.0
for signal in signals:
index = self.SIGNALS[signal]
self.array[Ellipsis, self.start + index] = 1.0
def get(self):
signals = []
for index, signal in enumerate(self.SIGNALS):
if self.array[Ellipsis, self.start + index] > 0.0:
signals.append(signal)
return signals
class WordvecSection(Section):
"""Contains the word2vec embedding of a token."""
SIZE = 300
def get(self):
return self.slice()
def set(self, wordvec):
self.array[Ellipsis, self.start:(self.start + self.size)] = wordvec
class BertSection(Section):
"""Contains the BERT embedding of a token."""
SIZE = 768
def get(self):
return self.slice()
def set(self, bertvec):
self.array[Ellipsis, self.start:(self.start + self.size)] = bertvec
class Encoding(object):
"""Provides an API to access data within an array."""
def __init__(self, array, layout):
assert isinstance(array, (np.ndarray, tf.Tensor))
self.array = array
index = 0
for (name, section_class) in layout:
section = section_class(array, index, section_class.SIZE)
setattr(self, name, section)
index += section_class.SIZE
self.sections_size = index
class EnrefEncoding(Encoding):
"""An API to access and modify contrack entity references within an array."""
def __init__(self, array, layout):
Encoding.__init__(self, array, layout)
self.entity_name = None
self.word_span = None
self.span_text = None
def populate(self, entity_name, word_span,
span_text):
self.entity_name = entity_name
self.word_span = word_span
self.span_text = span_text
def __repr__(self):
descr = ''
if self.entity_name is not None:
descr += '%s ' % self.entity_name
descr += '(%d%s%s) ' % (self.enref_id.get(),
'n' if self.enref_meta.is_new() > 0.0 else '', 'c'
if self.enref_meta.is_new_continued() > 0.0 else '')
if self.word_span is not None:
descr += '%d-%d ' % self.word_span
if self.span_text is not None:
descr += '(%s) ' % self.span_text
if self.enref_properties is not None:
is_group = self.enref_properties.is_group() > 0.0
domain = self.enref_properties.get_domain()
descr += domain[0]
if domain == 'people' and not is_group:
descr += ':' + self.enref_properties.get_gender()
if is_group:
descr += ':g %s' % self.enref_membership.get_ids()
if self.signals is not None and self.signals.get():
descr += str(self.signals.get())
return descr
class TokenEncoding(Encoding):
"""An API to access and modify contrack tokens within an array."""
def __init__(self, array, layout):
Encoding.__init__(self, array, layout)
def populate(self, token, signals, wordvec,
bertvec):
self.token = token
self.signals.set(signals)
self.wordvec.set(wordvec)
self.bert.set(bertvec)
def __repr__(self):
signals = self.signals.get()
signals_str = str(signals) if signals else ''
return '%s%s' % (self.token, signals_str)
class PredictionEncoding(Encoding):
"""An API to access and modify prediction values stored in an array."""
def __init__(self, array, layout):
Encoding.__init__(self, array, layout)
def __repr__(self):
descr = '(%d%s%s) ' % (self.enref_id.get(),
'n' if self.enref_meta.is_new() > 0.0 else '', 'c'
if self.enref_meta.is_new_continued() > 0.0 else '')
if self.enref_properties is not None:
is_group = self.enref_properties.is_group() > 0.0
domain = self.enref_properties.get_domain()
descr += domain[0]
if domain == 'people' and not is_group:
descr += ':' + self.enref_properties.get_gender()
if is_group:
descr += ': %s' % self.enref_membership.get_ids()
return descr
class Encodings(object):
"""Organize access to data encoded in numerical vectors."""
def __init__(self):
self.enref_encoding_layout = [('type', TypeSection),
('enref_meta', EnrefMetaSection),
('enref_id', EnrefIdSection),
('enref_properties', EnrefPropertiesSection),
('enref_membership', EnrefMembershipSection),
('enref_context', EnrefContextSection),
('signals', SignalSection),
('wordvec', WordvecSection),
('bert', BertSection)]
self.enref_encoding_length = sum(
[class_name.SIZE for (_, class_name) in self.enref_encoding_layout])
logging.info('EnrefEncoding (length: %d): %s', self.enref_encoding_length,
[f'{s}: {c.SIZE}' for s, c in self.enref_encoding_layout])
self.token_encoding_layout = [('type', TypeSection),
('enref_meta', EnrefMetaSection),
('padding', TokenPaddingSection),
('signals', SignalSection),
('wordvec', WordvecSection),
('bert', BertSection)]
self.token_encoding_length = sum(
[class_name.SIZE for (_, class_name) in self.token_encoding_layout])
assert self.enref_encoding_length == self.token_encoding_length
logging.info('TokenEncoding (length: %d): %s', self.token_encoding_length,
[f'{s}: {c.SIZE}' for s, c in self.token_encoding_layout])
self.prediction_encoding_layout = [
('enref_meta', EnrefMetaSection),
('enref_id', EnrefIdSection),
('enref_properties', EnrefPropertiesSection),
('enref_membership', EnrefMembershipSection),
]
self.prediction_encoding_length = sum([
class_name.SIZE for (_, class_name) in self.prediction_encoding_layout
])
logging.info('PredictionEncoding (length: %d): %s',
self.prediction_encoding_length,
[f'{s}: {c.SIZE}' for s, c in self.prediction_encoding_layout])
@classmethod
def load_from_json(cls, path):
"""Loads the encoding layout from a json file."""
classes = inspect.getmembers(sys.modules[__name__])
with tf.io.gfile.GFile(path, 'r') as file:
encodings_dict = json.loads(file.read())
enc = Encodings()
enc.enref_encoding_layout = []
for name, cls_name in encodings_dict['enref_encoding_layout']:
section_cls = next(o for (n, o) in classes if n.endswith(cls_name))
enc.enref_encoding_layout.append((name, section_cls))
enc.enref_encoding_length = sum(
[class_name.SIZE for (_, class_name) in enc.enref_encoding_layout])
enc.token_encoding_layout = []
for name, cls_name in encodings_dict['token_encoding_layout']:
section_cls = next(o for (n, o) in classes if n.endswith(cls_name))
enc.token_encoding_layout.append((name, section_cls))
enc.token_encoding_length = sum(
[class_name.SIZE for (_, class_name) in enc.token_encoding_layout])
assert enc.enref_encoding_length == enc.token_encoding_length
enc.prediction_encoding_layout = []
for name, cls_name in encodings_dict['prediction_encoding_layout']:
section_cls = next(o for (n, o) in classes if n.endswith(cls_name))
enc.prediction_encoding_layout.append((name, section_cls))
enc.prediction_encoding_length = sum(
[class_name.SIZE for (_, class_name) in enc.prediction_encoding_layout])
return enc
def as_enref_encoding(self, array):
return EnrefEncoding(array, self.enref_encoding_layout)
def new_enref_array(self):
return np.array([0.0] * self.enref_encoding_length)
def new_enref_encoding(self):
enc = EnrefEncoding(self.new_enref_array(), self.enref_encoding_layout)
enc.type.set_enref()
return enc
def as_token_encoding(self, array):
return TokenEncoding(array, self.token_encoding_layout)
def new_token_array(self):
return np.array([0.0] * self.token_encoding_length)
def new_token_encoding(self, token, signals,
wordvec, bertvec):
enc = TokenEncoding(self.new_token_array(), self.token_encoding_layout)
enc.type.set_token()
enc.populate(token, signals, wordvec, bertvec)
return enc
def as_prediction_encoding(self, array):
return PredictionEncoding(array, self.prediction_encoding_layout)
def new_prediction_array(self):
return np.array([0.0] * self.prediction_encoding_length)
def new_prediction_encoding(self):
enc = PredictionEncoding(self.new_prediction_array(),
self.prediction_encoding_layout)
return enc
def build_enref_from_prediction(
self, token,
prediction):
"""Build new enref from prediction logits."""
if prediction.enref_meta.is_enref() <= 0.0:
return None
new_array =
|
np.array(token.array)
|
numpy.array
|
def transform(dataset, SX=0.5, SY=0.5, SZ=0.5, noise=15.0):
"""Deblur Images with a Weiner Filter."""
import numpy as np
from numpy import exp, square, pi
from scipy.fftpack import fftn, ifftn, fftshift
#Import information from dataset
array = dataset.active_scalars
dim = array.shape
#Point Spread Function (Estimated with Gaussian Function)
def gaussian(SX, SY, SZ):
x = np.arange(-dim[0]/2, dim[0]/2)
y = np.arange(-dim[1]/2, dim[1]/2)
z = np.arange(-dim[2]/2, dim[2]/2)
[x, y, z] = np.meshgrid(x, y, z, indexing='ij')
Kx = x/dim[0]
Ky = y/dim[1]
Kz = z/dim[2]
r = square(Kx)+square(Ky)+square(Kz)
G = exp(-2*
|
square(pi)
|
numpy.square
|
import unittest
import numpy as np
from probnum.diffeq.ode import ivp, ivp_examples
from probnum.random_variables import Constant
from tests.testing import NumpyAssertions
class TestConvenienceFunction(unittest.TestCase):
"""Test case for correct object initialization."""
def setUp(self):
self.tspan = (0.0, 4.212)
def test_logistic(self):
"""Test the logistic ODE convenience function."""
rv = Constant(0.1)
lg1 = ivp_examples.logistic(self.tspan, rv)
lg2 = ivp_examples.logistic(self.tspan, rv, params=(1.0, 1.0))
self.assertIsInstance(lg1, ivp.IVP)
self.assertIsInstance(lg2, ivp.IVP)
def test_fitzhughnagumo(self):
"""Test the FHN IVP convenience function."""
rv = Constant(np.ones(2))
lg1 = ivp_examples.fitzhughnagumo(self.tspan, rv)
lg2 = ivp_examples.fitzhughnagumo(self.tspan, rv, params=(1.0, 1.0, 1.0, 1.0))
self.assertIsInstance(lg1, ivp.IVP)
self.assertIsInstance(lg2, ivp.IVP)
def test_lotkavolterra(self):
"""Test the LV ODE convenience function."""
rv = Constant(np.ones(2))
lg1 = ivp_examples.lotkavolterra(self.tspan, rv)
lg2 = ivp_examples.lotkavolterra(self.tspan, rv, params=(1.0, 1.0, 1.0, 1.0))
self.assertIsInstance(lg1, ivp.IVP)
self.assertIsInstance(lg2, ivp.IVP)
def test_seir(self):
"""Test the SEIR ODE convenience function."""
rv = Constant(np.array([1.0, 0.0, 0.0, 0.0]))
lg1 = ivp_examples.seir(self.tspan, rv)
lg2 = ivp_examples.seir(self.tspan, rv, params=(1.0, 1.0, 1.0, 1.0))
self.assertIsInstance(lg1, ivp.IVP)
self.assertIsInstance(lg2, ivp.IVP)
def test_lorenz(self):
"""Test the Lorenz model ODE convenience function."""
rv = Constant(np.array([1.0, 1.0, 1.0]))
lg1 = ivp_examples.lorenz(self.tspan, rv)
lg2 = ivp_examples.lorenz(
self.tspan,
rv,
params=(
10.0,
28.0,
8.0 / 3.0,
),
)
self.assertIsInstance(lg1, ivp.IVP)
self.assertIsInstance(lg2, ivp.IVP)
class TestRHSEvaluation(unittest.TestCase, NumpyAssertions):
"""Test cases that check the evaluation of IVP vector fields."""
def setUp(self):
self.tspan = (0.0, 4.212)
def test_logistic_rhs(self):
rv = Constant(0.1)
lg1 = ivp_examples.logistic(self.tspan, rv)
self.assertEqual(lg1.rhs(0.1, rv).shape, rv.shape)
def test_fitzhughnagumo_rhs(self):
rv = Constant(
|
np.ones(2)
|
numpy.ones
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import pickle
from abc import ABCMeta, abstractmethod
import json_tricks as json
import numpy as np
from scipy.io import loadmat
from torch.utils.data import Dataset
from mmpose.datasets import DatasetInfo
from mmpose.datasets.pipelines import Compose
class Kpt3dMviewRgbImgDirectDataset(Dataset, metaclass=ABCMeta):
"""Base class for keypoint 3D top-down pose estimation with multi-view RGB
images as the input.
All subclasses should overwrite:
Methods:`_get_db`, 'evaluate'
Args:
ann_file (str): Path to the annotation file.
img_prefix (str): Path to a directory where images are held.
Default: None.
data_cfg (dict): config
pipeline (list[dict | callable]): A sequence of data transforms.
dataset_info (DatasetInfo): A class containing all dataset info.
test_mode (bool): Store True when building test or
validation dataset. Default: False.
"""
def __init__(self,
ann_file,
img_prefix,
data_cfg,
pipeline,
dataset_info=None,
test_mode=False):
self.image_info = {}
self.ann_info = {}
self.ann_file = ann_file
self.img_prefix = img_prefix
self.pipeline = pipeline
self.test_mode = test_mode
self.ann_info['image_size'] = np.array(data_cfg['image_size'])
self.ann_info['heatmap_size'] = np.array(data_cfg['heatmap_size'])
self.ann_info['num_joints'] = data_cfg['num_joints']
self.ann_info['space_size'] = data_cfg['space_size']
self.ann_info['space_center'] = data_cfg['space_center']
self.ann_info['cube_size'] = data_cfg['cube_size']
self.ann_info['scale_aware_sigma'] = data_cfg.get(
'scale_aware_sigma', False)
if dataset_info is None:
raise ValueError(
'Check https://github.com/open-mmlab/mmpose/pull/663 '
'for details.')
dataset_info = DatasetInfo(dataset_info)
self.ann_info['flip_pairs'] = dataset_info.flip_pairs
self.ann_info['num_scales'] = 1
self.ann_info['flip_index'] = dataset_info.flip_index
self.ann_info['upper_body_ids'] = dataset_info.upper_body_ids
self.ann_info['lower_body_ids'] = dataset_info.lower_body_ids
self.ann_info['joint_weights'] = dataset_info.joint_weights
self.ann_info['skeleton'] = dataset_info.skeleton
self.sigmas = dataset_info.sigmas
self.dataset_name = dataset_info.dataset_name
self.load_config(data_cfg)
self.db = []
self.pipeline = Compose(self.pipeline)
@abstractmethod
def _get_db(self):
"""Load dataset."""
raise NotImplementedError
def load_config(self, data_cfg):
"""Initialize dataset attributes according to the config.
Override this method to set dataset specific attributes.
"""
self.num_joints = data_cfg['num_joints']
self.num_cameras = data_cfg['num_cameras']
self.seq_frame_interval = data_cfg.get('seq_frame_interval', 1)
self.subset = data_cfg.get('subset', 'train')
self.need_2d_label = data_cfg.get('need_2d_label', False)
self.need_camera_param = True
@staticmethod
def _get_mapping_id_name(imgs):
"""
Args:
imgs (dict): dict of image info.
Returns:
tuple: Image name & id mapping dicts.
- id2name (dict): Mapping image id to name.
- name2id (dict): Mapping image name to id.
"""
id2name = {}
name2id = {}
for image_id, image in imgs.items():
file_name = image['file_name']
id2name[image_id] = file_name
name2id[file_name] = image_id
return id2name, name2id
@abstractmethod
def evaluate(self, results, *args, **kwargs):
"""Evaluate keypoint results."""
@staticmethod
def _write_keypoint_results(keypoints, res_file):
"""Write results into a json file."""
with open(res_file, 'w') as f:
json.dump(keypoints, f, sort_keys=True, indent=4)
def __len__(self):
"""Get the size of the dataset."""
return len(self.db) // self.num_cameras
def __getitem__(self, idx):
"""Get the sample given index."""
results = {}
# return self.pipeline(results)
for c in range(self.num_cameras):
result = copy.deepcopy(self.db[self.num_cameras * idx + c])
result['ann_info'] = self.ann_info
results[c] = result
return self.pipeline(results)
@staticmethod
def _sort_and_unique_outputs(outputs, key='sample_id'):
"""sort outputs and remove the repeated ones."""
outputs = sorted(outputs, key=lambda x: x[key])
num_outputs = len(outputs)
for i in range(num_outputs - 1, 0, -1):
if outputs[i][key] == outputs[i - 1][key]:
del outputs[i]
return outputs
def _get_scale(self, raw_image_size):
heatmap_size = self.ann_info['heatmap_size']
image_size = self.ann_info['image_size']
assert heatmap_size[0][0] / heatmap_size[0][1] \
== image_size[0] / image_size[1]
w, h = raw_image_size
w_resized, h_resized = image_size
if w / w_resized < h / h_resized:
w_pad = h / h_resized * w_resized
h_pad = h
else:
w_pad = w
h_pad = w / w_resized * h_resized
scale = np.array([w_pad, h_pad], dtype=np.float32)
return scale
@staticmethod
def rotate_points(points, center, rot_rad):
"""Rotate the points around the center.
Args:
points: np.ndarray, N*2
center: np.ndarray, 2
rot_rad: scalar
Return:
np.ndarray (N*2)
"""
rot_rad = rot_rad * np.pi / 180.0
rotate_mat = np.array([[np.cos(rot_rad), -np.sin(rot_rad)],
[np.sin(rot_rad),
np.cos(rot_rad)]])
center = center.reshape(2, 1)
points = points.T
points = rotate_mat.dot(points - center) + center
return points.T
@staticmethod
def calc_bbox(pose, pose_vis):
"""calculate the bbox of a pose."""
index = pose_vis[:, 0] > 0
bbox = [
np.min(pose[index, 0]),
np.min(pose[index, 1]),
np.max(pose[index, 0]),
np.max(pose[index, 1])
]
return np.array(bbox)
def _get_cam(self, calib):
"""Get camera parameters.
Returns: Camera parameters.
"""
cameras = {}
for id, cam in calib.items():
sel_cam = {}
# note the transpose operation different from from VoxelPose
sel_cam['R'] = np.array(cam['R'], dtype=np.float32).T
sel_cam['T'] = np.array(cam['T'], dtype=np.float32)
sel_cam['k'] =
|
np.array(cam['k'], dtype=np.float32)
|
numpy.array
|
#!/usr/bin/python3
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import sys
import pprint
import numpy as np
import pickle
import itertools
import config
from agents.learning import LearningAgent
from environment.bandit import Bandit
from experiments.experiment import Experiment, ParallelExperiment
from agents.gaussian import *
from agents.uniform import *
from agents.pruning import *
from agents.delegator import Delegator
from util import meeting_point
def main():
config_obj = config.Config.get_instance()
settings = config_obj.parse(sys.argv[1])
team_sizes = settings['team_sizes']
bandit_sizes = settings['bandit_sizes']
trials = settings['trials']
executions = settings['executions']
experiments = []
# values of the prob. distribution of agent generation
# it vary in nature if we're dealing with gaussian or uniform
dist_params = settings['upper_bounds']
if settings['ltd_type'] == 'gaussian':
# must use list comprehension otherwise generator is consumed in 1st use
dist_params = [x for x in itertools.product(settings['mus'], settings['sigmas'])]
print('Parameters:')
pprint.PrettyPrinter().pprint(settings)
# sets up a number of experiments for each variation on bandit size,
# team size and distribution parameters
# the number of repetitions for each variation is in the 'executions' variable
for n_arms in bandit_sizes:
for team_sz in team_sizes:
for param in dist_params:
print('Preparing for %d/%d/%s' % (n_arms, team_sz, param))
# if experiment is gaussian, param has two values
mu_or_upper_bound = param if settings['ltd_type'] == 'uniform' else param[0]
# TODO: make experiments with different sigmas be written to different places
os.system(
"mkdir -p " + os.path.join(
settings['output_dir'], str(n_arms), str(team_sz), '%.2f' % mu_or_upper_bound
)
)
# identifies groups of experiments by their parameters
exp_group_name = '%d/%d/%.2f' % (n_arms, team_sz, mu_or_upper_bound)
# will have one experiment for each configuration of the parameters
experiment_batch = []
for e in range(executions):
sys.stdout.write(
'\rSetup for %d arms, |X| = %6d, u/mu = %.4f, exec=%6d' %
(n_arms, team_sz, mu_or_upper_bound, e)
)
bandit = Bandit(n_arms, None, 0.25)
learner = LearningAgent(
bandit, alpha=settings['alpha'], epsilon=settings['epsilon'],
alpha_decay=settings['alpha_decay'], epsilon_decay=settings['epsilon_decay']
)
if settings['ltd_type'] == 'uniform':
controller = Delegator(
[PruningAgentFair2(bandit, 0.95, u=mu_or_upper_bound) for _ in range(team_sz)],
alpha=settings['alpha'], epsilon=settings['epsilon'],
alpha_decay=settings['alpha_decay'], epsilon_decay=settings['epsilon_decay']
)
else:
controller = Delegator(
[GaussianAgentPrune(
bandit, 0.95, mu=mu_or_upper_bound, sigma=param[1]
) for _ in range(team_sz)],
alpha=settings['alpha'], epsilon=settings['epsilon'],
alpha_decay=settings['alpha_decay'], epsilon_decay=settings['epsilon_decay']
)
experiment_id = '%d/%d/%d/%.2f' % (e, n_arms, team_sz, mu_or_upper_bound)
lta_experiment = Experiment(bandit, learner, 'LtA/' + experiment_id)
ltd_experiment = Experiment(bandit, controller, 'LtD/' + experiment_id)
experiment_batch.append(lta_experiment)
experiment_batch.append(ltd_experiment)
# this batch of experiment is ready. run it:
print('\nSetup finished for %d experiments.' % len(experiment_batch))
manager = ParallelExperiment(experiment_batch)
manager.run(trials)
plot(manager.result, settings['output_dir'], settings['ltd_type'])
print('Plot OK for %s' % exp_group_name)
def plot(results, output_dir, ltd_type):
#exp_dict[exp_group_name] = {'LtA': [], 'LtD': []}
exp_dict = {}
print('# results:', len(results))
for r in results:
# group by n_arms, team_sz and currentMu: each exec will be an entry
index_str, execution_str, n_arms, team_sz, currentMu = r.id.split('/')
exp_group_name = '%s/%s/%s' % (n_arms, team_sz, currentMu)
exp_group = exp_dict.get(exp_group_name, {'LtA': [], 'LtD': []})
# index_str = 'LtA' if r.agent == 'LearningAgent' else 'LtD'
exp_group[index_str].append(r)
exp_dict[exp_group_name] = exp_group
print('Results organized')
for exp_group_name, exp_group in exp_dict.items():
# extracts data of training with algorithms and actions
trials = exp_group['LtA'][0].trials # TODO check if the number is the same across all experiment in the group
execution_rwd_lta = [exp.rewards for exp in exp_group['LtA']]
execution_rwd_ltd = [exp.rewards for exp in exp_group['LtD']]
p_best_lta = [exp.p_best for exp in exp_group['LtA']]
p_best_ltd = [exp.p_best for exp in exp_group['LtD']]
times_best_lta = [exp.cumulative_times_best for exp in exp_group['LtA']]
times_best_ltd = [exp.cumulative_times_best for exp in exp_group['LtD']]
cumulative_rewards_lta = [exp.cumulative_rewards for exp in exp_group['LtA']]
cumulative_rewards_ltd = [exp.cumulative_rewards for exp in exp_group['LtD']]
cumulative_regret_lta = [exp.cumulative_regrets for exp in exp_group['LtA']]
cumulative_regret_ltd = [exp.cumulative_regrets for exp in exp_group['LtD']]
cumulative_regret_exp_lta = [exp.cumulative_regrets_exp for exp in exp_group['LtA']]
cumulative_regret_exp_ltd = [exp.cumulative_regrets_exp for exp in exp_group['LtD']]
# calculates the meeting points (where learning over actions starts
# to outperform learning over algorithms) for various metrics
meeting_rewards = meeting_point(np.mean(execution_rwd_lta, 0), np.mean(execution_rwd_ltd, 0))
meeting_pbest = meeting_point(np.mean(p_best_lta, 0), np.mean(p_best_ltd, 0))
meeting_tbest = meeting_point(np.mean(times_best_lta, 0), np.mean(times_best_ltd, 0))
meeting_cumulative_reward = meeting_point(np.mean(cumulative_rewards_lta, 0), np.mean(cumulative_rewards_ltd, 0))
meeting_cumulative_regret = meeting_point(np.mean(cumulative_regret_ltd, 0), np.mean(cumulative_regret_lta, 0))
meeting_regret_exp = meeting_point(np.mean(cumulative_regret_exp_ltd, 0), np.mean(cumulative_regret_exp_lta, 0))
ltd_name = ltd_type.capitalize() # 'Gaussian' if settings['ltd_type'] == 'gaussian' else 'Uniform'
# plots instantaneous reward
plt.figure()
plt.plot(np.mean(execution_rwd_lta, 0), label="Actions")
plt.plot(np.mean(execution_rwd_ltd, 0), label=ltd_name)
plt.plot(np.convolve(np.mean(execution_rwd_lta, 0), np.ones((100,))/100, mode='valid'))
plt.plot(np.convolve(np.mean(execution_rwd_ltd, 0), np.ones((100,)) / 100, mode='valid'))
plt.xlabel("Iteration")
plt.ylabel("Reward")
plt.legend()
plt.savefig(os.path.join(output_dir, exp_group_name, "reward.pdf"))
plt.close()
# plots pbest (probability of selecting the best action
plt.figure()
plt.plot(np.mean(p_best_lta, 0), color="#1f77b4", label="Actions")
plt.plot(np.mean(p_best_ltd, 0), color="#ff7f0e", label=ltd_name)
plt.errorbar(
range(0, trials, 50), np.mean(p_best_lta, 0)[0:trials:50],
yerr=np.std(p_best_lta, 0)[0:trials:50],
color="#1f77b4", fmt=".", capsize=3
)
plt.errorbar(
range(0, trials, 50), np.mean(p_best_ltd, 0)[0:trials:50],
yerr=np.std(p_best_ltd, 0)[0:trials:50],
color="#ff7f0e", fmt=".", capsize=3
)
plt.xlabel("Iteration")
plt.ylabel(r"$p_{a^*} (pbest)$")
plt.legend()
plt.savefig(os.path.join(output_dir, exp_group_name, 'pbest.pdf'))
plt.close()
# plots the number of times the best action has been selected
plt.figure()
plt.plot(np.mean(times_best_lta, 0), color="#1f77b4", label="Actions")
plt.plot(np.mean(times_best_ltd, 0), color="#ff7f0e", label=ltd_name)
plt.errorbar(
range(0, trials, 50), np.mean(times_best_lta, 0)[0:trials:50],
yerr=np.std(times_best_lta, 0)[0:trials:50],
color="#1f77b4", fmt=".", capsize=3
)
plt.errorbar(
range(0, trials, 50), np.mean(times_best_ltd, 0)[0:trials:50],
yerr=np.std(times_best_ltd, 0)[0:trials:50],
color="#ff7f0e", fmt=".", capsize=3
)
plt.xlabel("Iteration")
plt.ylabel(r"# $a^*$ (#times a* was played)")
plt.legend()
plt.savefig(os.path.join(output_dir, exp_group_name, "timesBest.pdf"))
plt.close()
# plots the cumulative reward
plt.figure()
plt.plot(np.mean(cumulative_rewards_lta, 0), color="#1f77b4", label="Actions")
plt.plot(np.mean(cumulative_rewards_ltd, 0), color="#ff7f0e", label=ltd_name)
plt.errorbar(
range(0, trials, 50), np.mean(cumulative_rewards_lta, 0)[0:trials:50],
yerr=np.std(cumulative_rewards_lta, 0)[0:trials:50],
color="#1f77b4", fmt=".", capsize=3
)
plt.errorbar(
range(0, trials, 50), np.mean(cumulative_rewards_ltd, 0)[0:trials:50],
yerr=np.std(cumulative_rewards_ltd, 0)[0:trials:50],
color="#ff7f0e", fmt=".", capsize=3
)
plt.xlabel("Iteration")
plt.ylabel("Cumulative reward")
plt.legend()
plt.savefig(os.path.join(output_dir, exp_group_name, "cumulativeRewards.pdf"))
plt.close()
# plots the cumulative regret
plt.figure()
plt.plot(np.mean(cumulative_regret_lta, 0), color="#1f77b4", label="Actions")
plt.plot(np.mean(cumulative_regret_ltd, 0), color="#ff7f0e", label="Delegate")
plt.errorbar(
range(0, trials, 50), np.mean(cumulative_regret_lta, 0)[0:trials:50],
yerr=np.std(cumulative_regret_lta, 0)[0:trials:50],
color="#1f77b4", fmt=".", capsize=3
)
plt.errorbar(
range(0, trials, 50), np.mean(cumulative_regret_ltd, 0)[0:trials:50],
yerr=np.std(cumulative_regret_ltd, 0)[0:trials:50],
color="#ff7f0e", fmt=".", capsize=3
)
plt.xlabel("Iteration")
plt.ylabel(r"$\sum $Regret")
plt.legend()
plt.savefig(os.path.join(output_dir, exp_group_name, "cumulativeRegret.pdf"))
plt.close()
# plots the expected cumulative regret
plt.figure()
plt.plot(np.mean(cumulative_regret_exp_lta, 0), color="#1f77b4", label="Actions")
plt.plot(np.mean(cumulative_regret_exp_ltd, 0), color="#ff7f0e", label="Delegate")
plt.errorbar(
range(0, trials, 50), np.mean(cumulative_regret_exp_lta, 0)[0:trials:50],
yerr=
|
np.std(cumulative_regret_exp_lta, 0)
|
numpy.std
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 AGB
# Full license can be found in LICENSE.txt
# ---------------------------------------------------------------------------
""" Perform OCB gridding for SuperMAG data
Functions
---------
supermag2ascii_ocb(smagfile, outfile, kwargs)
Write and ASCII file with SuperMAG data and the OCB coordinates for each
data point
load_supermag_ascii_data(filename)
Load SuperMAG ASCII data files
Data
----
SuperMAG data available at: http://supermag.jhuapl.edu/
"""
from __future__ import absolute_import, unicode_literals
import datetime as dt
import numpy as np
import ocbpy
import ocbpy.ocb_scaling as ocbscal
def supermag2ascii_ocb(smagfile, outfile, hemisphere=0, ocb=None,
ocbfile='default', instrument='', max_sdiff=600,
min_sectors=7, rcent_dev=8.0, max_r=23.0, min_r=10.0):
""" Coverts and scales the SuperMAG data into OCB coordinates
Parameters
----------
smagfile : (str)
file containing the required SuperMAG file sorted by time
outfile : (str)
filename for the output data
hemisphere : (int)
Hemisphere to process (can only do one at a time). 1=Northern,
-1=Southern, 0=Determine from data (default=0)
ocb : (OCBoundary or NoneType)
OCBoundary object with data loaded from an OC boundary data file.
If None, looks to ocbfile
ocbfile : (str)
file containing the required OC boundary data sorted by time, or
'default' to load default file for time and hemisphere. Only used if
no OCBoundary object is supplied (default='default')
instrument : (str)
Instrument providing the OCBoundaries. Requires 'image' or 'ampere'
if a file is provided. If using filename='default', also accepts
'amp', 'si12', 'si13', 'wic', and ''. (default='')
max_sdiff : (int)
maximum seconds between OCB and data record in sec (default=600)
min_sectors : (int)
Minimum number of MLT sectors required for good OCB (default=7).
rcent_dev : (float)
Maximum number of degrees between the new centre and the AACGM pole
(default=8.0)
max_r : (float)
Maximum radius for open-closed field line boundary in degrees
default=23.0)
min_r : (float)
Minimum radius for open-closed field line boundary in degrees
(default=10.0)
Notes
-----
May only process one hemisphere at a time. Scales the magnetic field
observations using `ocbpy.ocb_scale.normal_curl_evar`.
"""
if not ocbpy.instruments.test_file(smagfile):
raise IOError("SuperMAG file cannot be opened [{:s}]".format(smagfile))
if not isinstance(outfile, str):
raise IOError("output filename is not a string [{:}]".format(outfile))
# Read the superMAG data and calculate the magnetic field magnitude
header, mdata = load_supermag_ascii_data(smagfile)
# Load the OCB data for the SuperMAG data period
if ocb is None or not isinstance(ocb, ocbpy.ocboundary.OCBoundary):
mstart = mdata['DATETIME'][0] - dt.timedelta(seconds=max_sdiff+1)
mend = mdata['DATETIME'][-1] + dt.timedelta(seconds=max_sdiff+1)
# If hemisphere isn't specified, set it here
if hemisphere == 0:
hemisphere = np.sign(np.nanmax(mdata['MLAT']))
# Ensure that all data is in the same hemisphere
if hemisphere == 0:
hemisphere = np.sign(np.nanmin(mdata['MLAT']))
elif hemisphere != np.sign(np.nanmin(mdata['MLAT'])):
raise ValueError("".join(["cannot process observations from "
"both hemispheres at the same time;"
"set hemisphere=+/-1 to choose."]))
# Initialize the OCBoundary object
ocb = ocbpy.OCBoundary(ocbfile, stime=mstart, etime=mend,
hemisphere=hemisphere, instrument=instrument)
elif hemisphere == 0:
# If the OCBoundary object is specified and hemisphere isn't use
# the OCBoundary object to specify the hemisphere
hemisphere = ocb.hemisphere
# Test the OCB data
if ocb.filename is None or ocb.records == 0:
ocbpy.logger.error("no data in OCB file {:}".format(ocb.filename))
return
# Remove the data with NaNs/Inf and from the opposite hemisphere/equator
igood = np.where((np.isfinite(mdata['MLT'])) & (np.isfinite(mdata['MLAT']))
& (np.isfinite(mdata['BE'])) & (np.isfinite(mdata['BN']))
& (np.isfinite(mdata['BZ']))
& (np.sign(mdata['MLAT']) == hemisphere))[0]
if igood.shape != mdata['MLT'].shape:
for k in mdata.keys():
mdata[k] = mdata[k][igood]
# Recalculate the number of stations if some data was removed
for tt in np.unique(mdata['DATETIME']):
itimes = np.where(mdata['DATETIME'] == tt)[0]
mdata['NST'][itimes] = len(itimes)
# Open and test the file to ensure it can be written
with open(outfile, 'w') as fout:
# Write the output line
outline = "#DATE TIME NST STID "
optional_keys = ["<KEY>SZA"]
for okey in optional_keys:
if okey in mdata.keys():
outline = "{:s}{:s} ".format(outline, okey)
outline = "".join([outline, "MLAT MLT BMAG BN BE BZ OCB_MLAT OCB_MLT ",
"OCB_BMAG OCB_BN OCB_BE OCB_BZ\n"])
fout.write(outline)
# Initialise the ocb and SuperMAG indices
imag = 0
nmag = mdata['DATETIME'].shape[0]
# Cycle through the data, matching SuperMAG and OCB records
while imag < nmag and ocb.rec_ind < ocb.records:
imag = ocbpy.match_data_ocb(ocb, mdata['DATETIME'], idat=imag,
max_tol=max_sdiff,
min_sectors=min_sectors,
rcent_dev=rcent_dev, max_r=max_r,
min_r=min_r)
if imag < nmag and ocb.rec_ind < ocb.records:
# Set this value's AACGM vector values
vdata = ocbscal.VectorData(imag, ocb.rec_ind,
mdata['MLAT'][imag],
mdata['MLT'][imag],
aacgm_n=mdata['BN'][imag],
aacgm_e=mdata['BE'][imag],
aacgm_z=mdata['BZ'][imag],
scale_func=ocbscal.normal_curl_evar)
vdata.set_ocb(ocb)
# Format the output line:
# DATE TIME NST [SML SMU] STID [SZA] MLAT MLT BMAG BN BE BZ
# OCB_MLAT OCB_MLT OCB_BMAG OCB_BN OCB_BE OCB_BZ
outline = "{:} {:d} {:s} ".format(mdata['DATETIME'][imag],
mdata['NST'][imag],
mdata['STID'][imag])
for okey in optional_keys:
if okey == "SZA":
outline = "{:s}{:.2f} ".format(outline,
mdata[okey][imag])
else:
outline = "{:s}{:d} ".format(outline,
mdata[okey][imag])
outline = "".join([outline, "{:.2f} ".format(vdata.aacgm_lat),
"{:.2f} {:.2f} ".format(vdata.aacgm_mlt,
vdata.aacgm_mag),
"{:.2f} {:.2f} ".format(vdata.aacgm_n,
vdata.aacgm_e),
"{:.2f} {:.2f} {:.2f} {:.2f}".format(
vdata.aacgm_z, vdata.ocb_lat,
vdata.ocb_mlt, vdata.ocb_mag),
" {:.2f} {:.2f} {:.2f}\n".format(
vdata.ocb_n, vdata.ocb_e, vdata.ocb_z)])
fout.write(outline)
# Move to next line
imag += 1
return
def load_supermag_ascii_data(filename):
"""Load a SuperMAG ASCII data file
Parameters
----------
filename : (str)
SuperMAG ASCI data file name
Returns
-------
out : (dict of numpy.arrays)
The dict keys are specified by the header data line, the data
for each key are stored in the numpy array
"""
fill_val = 999999
header = list()
ind = {"SMU": fill_val, "SML": fill_val}
out = {"YEAR": list(), "MONTH": list(), "DAY": list(), "HOUR": list(),
"MIN": list(), "SEC": list(), "DATETIME": list(), "NST": list(),
"SML": list(), "SMU": list(), "STID": list(), "BN": list(),
"BE": list(), "BZ": list(), "MLT": list(), "MLAT": list(),
"DEC": list(), "SZA": list()}
if not ocbpy.instruments.test_file(filename):
return header, dict()
# Open the datafile and read the data
with open(filename, "r") as f:
hflag = True
n = -1
for line in f.readlines():
if hflag:
# Fill the header list
header.append(line)
if line.find("=========================================") >= 0:
hflag = False
else:
# Fill the output dictionary
if n < 0:
# This is a date line
n = 0
lsplit = np.array(line.split(), dtype=int)
dtime = dt.datetime(lsplit[0], lsplit[1], lsplit[2],
lsplit[3], lsplit[4], lsplit[5])
snum = lsplit[-1]
else:
lsplit = line.split()
if len(lsplit) == 2:
# This is an index line
ind[lsplit[0]] = int(lsplit[1])
else:
# This is a station data line
out['YEAR'].append(dtime.year)
out['MONTH'].append(dtime.month)
out['DAY'].append(dtime.day)
out['HOUR'].append(dtime.hour)
out['MIN'].append(dtime.minute)
out['SEC'].append(dtime.second)
out['DATETIME'].append(dtime)
out['NST'].append(snum)
for k in ind.keys():
out[k].append(ind[k])
out['STID'].append(lsplit[0])
out['BN'].append(float(lsplit[1]))
out['BE'].append(float(lsplit[2]))
out['BZ'].append(float(lsplit[3]))
out['MLT'].append(float(lsplit[4]))
out['MLAT'].append(float(lsplit[5]))
out['DEC'].append(float(lsplit[6]))
out['SZA'].append(float(lsplit[7]))
n += 1
if n == snum:
n = -1
ind = {"SMU": fill_val, "SML": fill_val}
# Recast data as numpy arrays and replace fill value with np.nan
for k in out:
if k == "STID":
out[k] =
|
np.array(out[k], dtype=str)
|
numpy.array
|
# test_basic.py: some basic tests of the code
import numpy
numpy.random.seed(2)
from extreme_deconvolution import extreme_deconvolution
def test_single_gauss_1d_nounc():
# Generate data from a single Gaussian, recover mean and variance
ndata= 3001
ydata= numpy.atleast_2d(numpy.random.normal(size=ndata)).T
ycovar= numpy.zeros_like(ydata)
# initialize fit
K= 1
initamp= numpy.ones(K)
initmean= numpy.atleast_2d(numpy.mean(ydata)+1.)
initcovar= numpy.atleast_3d(3.*numpy.var(ydata))
# Run XD
extreme_deconvolution(ydata,ycovar,initamp,initmean,initcovar)
# Test
tol= 10./numpy.sqrt(ndata)
assert numpy.fabs(initmean-0.) < tol, 'XD does not recover correct mean for single Gaussian w/o uncertainties'
assert numpy.fabs(initcovar-1.) < tol, 'XD does not recover correct variance for single Gaussian w/o uncertainties'
return None
def test_single_gauss_1d_constunc():
# Generate data from a single Gaussian, recover mean and variance
ndata= 3001
ydata= numpy.atleast_2d(numpy.random.normal(size=ndata)).T
ycovar= numpy.ones_like(ydata)*0.25
ydata+= numpy.atleast_2d(numpy.random.normal(size=ndata)).T\
*numpy.sqrt(ycovar)
# initialize fit
K= 1
initamp= numpy.ones(K)
initmean= numpy.atleast_2d(numpy.mean(ydata)+1.5)
initcovar= numpy.atleast_3d(3.*numpy.var(ydata))
# Run XD
extreme_deconvolution(ydata,ycovar,initamp,initmean,initcovar)
# Test
tol= 10./numpy.sqrt(ndata)
assert numpy.fabs(initmean-0.) < tol, 'XD does not recover correct mean for single Gaussian w/ constant uncertainties'
assert numpy.fabs(initcovar-1.) < tol, 'XD does not recover correct variance for single Gaussian w/ constant uncertainties'
return None
def test_single_gauss_1d_varunc():
# Generate data from a single Gaussian, recover mean and variance
ndata= 3001
ydata= numpy.atleast_2d(numpy.random.normal(size=ndata)).T
ycovar= numpy.ones_like(ydata)*\
numpy.atleast_2d(numpy.random.uniform(size=ndata)).T
ydata+= numpy.atleast_2d(numpy.random.normal(size=ndata)).T\
*numpy.sqrt(ycovar)
# initialize fit
K= 1
initamp= numpy.ones(K)
initmean= numpy.atleast_2d(numpy.mean(ydata)+numpy.std(ydata))
initcovar= numpy.atleast_3d(3.*numpy.var(ydata))
# Run XD
extreme_deconvolution(ydata,ycovar,initamp,initmean,initcovar)
# Test
tol= 10./numpy.sqrt(ndata)
assert numpy.fabs(initmean-0.) < tol, 'XD does not recover correct mean for single Gaussian w/ uncertainties'
assert
|
numpy.fabs(initcovar-1.)
|
numpy.fabs
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
# Identify pixels above the threshold
# Threshold of RGB > 160 does a nice job of identifying ground pixels only
def color_thresh(img, rgb_thresh=(160, 160, 160)):
# Create an array of zeros same xy size as img, but single channel
color_select =
|
np.zeros_like(img[:,:,0])
|
numpy.zeros_like
|
import cv2
import numpy as np
import calib.corner_kernels as ck
from math import pi, tan
from skimage.feature import peak_local_max
def compute_corner_kernel_whole(size, offset=0.0):
kern = np.zeros((size, size), dtype=np.uint8)
half = size // 2
factor = 255 / ((half - offset) * (half - offset))
for row in range(0, half):
for col in range(0, half):
kern[row, col] = round((row + 1 - offset) * (col + 1 - offset) * factor)
for col in range(half, size):
kern[row, col] = round((row + 1 - offset) * (size - col - offset) * factor)
for row in range(half, size):
for col in range(0, half):
kern[row, col] = round((size - row - offset) * (col + 1 - offset) * factor)
for col in range(half, size):
kern[row, col] = round((size - row - offset) * (size - col - offset) * factor)
return kern
def compute_inital_corner_likelihood(image):
likelihoods = []
for prototype in ck.CORNER_KERNEL_PROTOTYPES:
filter_responses = [cv2.filter2D(image, ddepth=cv2.CV_64F, kernel=kernel) for kernel in prototype]
fA, fB, fC, fD = filter_responses
mean_response = (fA + fB + fC + fD) / 4.
minAB = np.minimum(fA, fB)
minCD = np.minimum(fC, fD)
diff1 = minAB - mean_response
diff2 = minCD - mean_response
# For an ideal corner, the response of {A,B} should be greater than the mean response of {A,B,C,D},
# while the response of {C,D} should be smaller, and vice versa for flipped corners.
likelihood1 = np.minimum(diff1, -diff2)
likelihood2 = np.minimum(-diff1, diff2) # flipped case
likelihoods.append(likelihood1)
likelihoods.append(likelihood2)
corner_likelihood = np.max(likelihoods, axis=0)
return corner_likelihood
def __find_dominant_directions(hist64, cutoff=10):
# find clusters
clusters = []
cur_cluster = []
val_angle = 0
angle_increment = pi / 64
for val in hist64:
if val > cutoff:
cur_cluster.append((val, val_angle))
else:
if len(cur_cluster) > 0:
clusters.append(cur_cluster)
cur_cluster = []
val_angle += angle_increment
if len(cur_cluster) > 0:
clusters.append(cur_cluster)
# if the fist and last values are above threshold, join the first and last clusters
if hist64[0] > cutoff and hist64[63] > cutoff:
clusters[0] = clusters[len(clusters) - 1] + clusters[0]
clusters = [np.array(cluster) for cluster in clusters[:-1]]
else:
clusters = [np.array(cluster) for cluster in clusters]
if len(clusters) < 2:
return None
# find the two dominant clusters
cluster_areas = [cluster[:, 0].sum() for cluster in clusters]
biggest_at = np.argmax(cluster_areas)
biggest_cluster_area = cluster_areas[biggest_at]
cluster_areas[biggest_at] = -1.0
second_biggest_at = np.argmax(cluster_areas)
cluster_areas = [biggest_cluster_area, cluster_areas[second_biggest_at]]
clusters = [clusters[biggest_at], clusters[second_biggest_at]]
angles = []
for i_cluster in range(0, 2):
cluster = clusters[i_cluster]
area = cluster_areas[i_cluster]
mode = area / 2.0
running_total = 0.
for i_bin in range(0, len(cluster)):
hist_bin = cluster[i_bin]
new_total = running_total + hist_bin[0]
if new_total > mode:
# linear interpolation between bin angles
if i_bin > 0:
angle_1 = cluster[i_bin - 1][1]
angle_2 = cluster[i_bin - 1][1]
frac = (mode - running_total) / hist_bin[0]
else:
angle_1 = cluster[0][1]
angle_2 = cluster[1][1]
frac = mode / new_total
if angle_1 > angle_2:
angle_2 += pi
angle = angle_1 + frac * (angle_2 - angle_1)
break
running_total = new_total
angles.append((-angle + (pi / 2)) % pi)
angles.sort()
return tuple(angles)
def __build_corner_template(size, directions):
template = np.zeros((size, size), dtype=np.float32)
a45 = pi / 4
a90 = pi / 2
a135 = pi / 2 + pi / 4
s = size // 2
for direction in directions:
on_vertical_border = True
sign = 1.0
if 0. <= direction < a45:
beta = direction
elif a45 <= direction < a90:
beta = a90 - direction
on_vertical_border = False
elif a90 <= direction < a135:
beta = direction - a90
on_vertical_border = False
sign = -1.0
elif a135 <= direction < pi:
beta = pi - direction
sign = -1.0
else:
raise ValueError("Illegal direction value: {:.3f}. Direction must be within [0, pi)".format(direction))
s_tan_beta = s * tan(beta)
p0c0 = 0
p0c1 = int(0 + s + sign * s_tan_beta)
p1c0 = 2 * s
p1c1 = int(0 + s - sign * s_tan_beta)
if on_vertical_border:
p0 = (p0c0, p0c1)
p1 = (p1c0, p1c1)
else:
p0 = (p0c1, p0c0)
p1 = (p1c1, p1c0)
cv2.line(template, p0, p1, 1, 3, cv2.LINE_AA)
return template
def __filter_candidate(greyscale_image, coord, neighborhood_size):
window = greyscale_image[coord[0] - neighborhood_size:coord[0] + neighborhood_size + 1,
coord[1] - neighborhood_size:coord[1] + neighborhood_size + 1]
grad_x = cv2.Sobel(window, cv2.CV_32FC1, dx=1, dy=0, ksize=3)
grad_y = cv2.Sobel(window, cv2.CV_32FC1, dx=0, dy=1, ksize=3)
grad_mag = np.abs(grad_x) + np.abs(grad_y)
grad_mag_flat = grad_mag.flatten()
orientations_flat = (cv2.phase(grad_x, grad_y) % pi).flatten() # phase accuracy: about 0.3 degrees
hist = (np.histogram(orientations_flat, bins=64, range=(0, pi), weights=grad_mag_flat)[0] /
(neighborhood_size * neighborhood_size))
return hist, grad_mag
def find_candidates(greyscale_image, neighborhood_size=20, candidate_threshold=.5):
corner_likelihood = compute_inital_corner_likelihood(greyscale_image)
# TODO: the absolute threshold should be statistically determined based on actual checkerboard images
candidates = peak_local_max(corner_likelihood, neighborhood_size, corner_likelihood.max() * candidate_threshold)
return candidates
def prep_img_save(img, b=5):
return cv2.normalize(cv2.copyMakeBorder(img, b, b, b, b, cv2.BORDER_CONSTANT, value=0), 0, 255,
cv2.NORM_MINMAX).astype(np.uint8)
def find_chessboard_corners(greyscale_image, neighborhood_size=10, candidate_threshold=.5):
candidates = find_candidates(greyscale_image, neighborhood_size, candidate_threshold)
bordered_image = cv2.copyMakeBorder(greyscale_image, neighborhood_size, neighborhood_size, neighborhood_size,
neighborhood_size, cv2.BORDER_CONSTANT, value=0)
detected_corners = []
windows = []
grad_mags = []
templates = []
ix_candidate = 0
for candidate in candidates:
print(ix_candidate)
coord = candidate
window = greyscale_image[coord[0] - neighborhood_size:coord[0] + neighborhood_size + 1,
coord[1] - neighborhood_size:coord[1] + neighborhood_size + 1]
hist, grad_mag = __filter_candidate(bordered_image, candidate, neighborhood_size)
win_b = cv2.copyMakeBorder(window, 5, 5, 5, 5, cv2.BORDER_CONSTANT, value=0)
windows.append(win_b)
grad_mags.append(prep_img_save(grad_mag))
angles = __find_dominant_directions(hist)
if angles is not None:
template = __build_corner_template(neighborhood_size * 2 + 1, angles)
templates.append(prep_img_save(template))
else:
templates.append(np.zeros_like(win_b))
ix_candidate += 1
# if __filter_candidate(bordered_image, candidate, neighborhood_size):
# detected_corners.append(candidate)
ch_test = np.vstack((np.hstack(windows), np.hstack(grad_mags), np.hstack(templates)))
cv2.imwrite("~/Desktop/TMP/ch_test01.png", ch_test)
detected_corners =
|
np.array(detected_corners)
|
numpy.array
|
import numpy as np
import pandas as pd
import scipy.interpolate as si
import chart_studio.plotly as py
# import plotly.graph_objs as go
from plotly.graph_objs import *
from rho_factor.config import *
class rho:
def __init__(self, df=None, wl=None):
self.df = df
self.aot = 0.1
self.ws = 2
self.wl = wl
self.rhosoaa_fine_file = rhosoaa_fine_file
self.rhosoaa_coarse_file = rhosoaa_coarse_file
self.M1999_file = M1999_file
self.M2015_file = M2015_file
#self.load_rho_lut()
def load_rho_lut(self):
self.rhosoaa_fine = pd.read_csv(self.rhosoaa_fine_file, index_col=[0, 1, 2, 3, 4, 5])
self.rhosoaa_coarse = pd.read_csv(self.rhosoaa_coarse_file, index_col=[0, 1, 2, 3, 4, 5])
self.rhoM1999 = pd.read_csv(self.M1999_file, skiprows=7)
self.rhoM2015 = pd.read_csv(self.M2015_file, skiprows=8)
def get_rho_values(self, ws=2, aot=0.1, sza=[30], wl=None):
if all(wl != None):
self.wl = wl
grid = self.rho.rho.index.levels
# convert pandas dataframe into 6D array of the tabulated rho values for interpolation
rho_ = reshape().df2ndarray(self.rho, 'rho')
rho_wl = calc().spline_4d(grid, rho_[:, :, :, :, 1, 1], ([ws], [aot], self.wl, sza))
return rho_wl
def process(self, ws=2, aot=0.1):
wl = self.wl
df = self.df
rho = self.get_rho_values(wl=wl, sza=df['sza'].values.mean())
self.Rrs = (df.loc[:, ("Lt")] - rho * df.loc[:, ("Lsky")]) / df.loc[:, ("Ed")]
self.Rrs.columns = pd.MultiIndex.from_product([['Rrs(awr)'], self.Rrs.columns], names=['param', 'wl'])
return self.Rrs, rho
class calc:
def __init__(self):
pass
def earth_sun_correction(self, dayofyear):
'''
Earth-Sun distance correction factor for adjustment of mean solar irradiance
:param dayofyear:
:return: correction factor
'''
theta = 2. * np.pi * dayofyear / 365
d2 = 1.00011 + 0.034221 * np.cos(theta) + 0.00128 * np.sin(theta) + \
0.000719 *
|
np.cos(2 * theta)
|
numpy.cos
|
import time
from typing import List, Dict, Optional, Tuple
import numpy as np
import pickle
import rl_env
import os
import torch
import torchvision
import rulebased_agent as ra
from internal_agent import InternalAgent
from outer_agent import OuterAgent
from iggi_agent import IGGIAgent
from legal_random_agent import LegalRandomAgent
from flawed_agent import FlawedAgent
from piers_agent import PiersAgent
from van_den_bergh_agent import VanDenBerghAgent
import random
from collections import namedtuple
from typing import NamedTuple
import database as db
import traceback
print(rl_env.__file__)
class Agent(NamedTuple):
name: str
instance: ra.RulebasedAgent
AGENT_CLASSES = {'InternalAgent': InternalAgent,
'OuterAgent': OuterAgent, 'IGGIAgent': IGGIAgent, 'FlawedAgent': FlawedAgent,
'PiersAgent': PiersAgent, 'VanDenBerghAgent': VanDenBerghAgent}
# AGENT_CLASSES = {'PiersAgent': PiersAgent, 'VanDenBerghAgent': VanDenBerghAgent}
COLORS_INV = ['B', 'W', 'G', 'Y', 'R']
RANKS_INV = [4, 3, 2, 1, 0]
# color_offset = (2 * hand_size)
# rank_offset = color_offset + (num_players - 1) * num_colors
def to_int(cfg, action_dict):
try:
action_type = action_dict['action_type']
except Exception:
traceback.print_exc()
print(f'got action = {action_dict}')
exit(1)
if action_type == 'DISCARD':
return action_dict['card_index']
elif action_type == 'PLAY':
return cfg['hand_size'] + action_dict['card_index']
elif action_type == 'REVEAL_COLOR':
color_offset = (2 * cfg['hand_size'])
return color_offset + action_dict['target_offset'] * cfg['colors'] - (COLORS_INV.index(action_dict['color'])) - 1
elif action_type == 'REVEAL_RANK':
rank_offset = 2 * cfg['hand_size'] + (cfg['players'] - 1) * cfg['colors']
return rank_offset + action_dict['target_offset'] * cfg['ranks'] - (RANKS_INV[action_dict['rank']]) - 1
else:
raise ValueError(f'action_dict was {action_dict}')
class Runner:
def __init__(self, hanabi_game_config: Dict, num_players):
# self.hanabi_game_config = hanabi_game_config
self.num_players = hanabi_game_config['players']
self.env_config = hanabi_game_config
self.environment = rl_env.HanabiEnv(hanabi_game_config)
self.agent_config = {'players': self.num_players} # same for all ra.RulebasedAgent instances
@staticmethod
def _initialize_replay_dict(agents):
team = [] # will be a database column containing the team member classnames
replay_dict = {}
for agent in agents:
team.append(agent.name)
try:
# used when writing to database
replay_dict[agent.name] = { # 'states': [],
'int_actions': [],
'dict_actions': [],
'turns': [], # integer indicating which turn of the game it is
'obs_dicts': []}
# used in online collection mode, e.g. when evaluating a NN, otherwise remains empty
replay_dict['states'] = []
replay_dict['actions'] = []
replay_dict['obs_dicts'] = []
except:
# goes here if we have more than one copy of the same agent(key)
pass
return replay_dict, team
def update_replay_dict(self,
replay_dict,
agent,
observation,
current_player_action,
drop_actions,
agent_index,
turn_in_game_i,
keep_obs_dict=True,
keep_agent=True,
pickle_pyhanabi=True):
if pickle_pyhanabi:
observation = pickle.dumps(observation, pickle.HIGHEST_PROTOCOL)
if keep_obs_dict: # more information is saved, when intending to write to database
if keep_agent:
replay_dict[agent.name]['turns'].append(turn_in_game_i)
replay_dict[agent.name]['obs_dicts'].append(observation)
if not drop_actions:
replay_dict[agent.name]['int_actions'].append(to_int(self.env_config, current_player_action)) # to_int_action() currently bugged
replay_dict[agent.name]['dict_actions'].append(current_player_action)
else:
replay_dict['obs_dicts'].append(observation)
else: # less information is saved, e.g. when in online collection mode
replay_dict['states'].append(observation['vectorized'])
if not drop_actions:
replay_dict['actions'].append(current_player_action)
return replay_dict
def run(self,
agents: List[NamedTuple], # Agent('name', 'ra.RulebasedAgent()')
max_games=1,
target_agent: Optional[str] = None,
drop_actions=False,
keep_obs_dict=False,
keep_agent=True,
pickle_pyhanabi=True
):
"""
agents: Agent instances used to play game
max_games: number of games to collect at maximum
agent_target: if provided, only state-action pairs for corresponding agent are returned
drop_actions: if True, only states will be returned without corresponding actions
keep_obs_dict: If true, returned replay_dict will also contain the observation_dict
mode: If mode=='database' replay dictionary will have complete information,
If mode=='online', only vectorized states/actions will be stored in replay dict
"""
def _is_target_agent(agent):
return target_agent is None or target_agent == agent.name
i_game = 0
turns_played = 0
replay_dict, team = self._initialize_replay_dict(agents)
# loop many games
while i_game < max_games:
observations = self.environment.reset()
done = False
turn_in_game_i = 0
# loop one game
while not done:
# play game
for agent_index, agent in enumerate(agents):
observation = observations['player_observations'][agent_index]
try:
action = agent.instance.act(observation)
except Exception as e:
print(traceback.print_exc())
print(f'agent that failed = {agent}')
print(f'observation = {observation}')
exit(1)
if observation['current_player'] == agent_index: # step env on current player action only
assert action is not None
current_player_action = action
if _is_target_agent(agent): # save observation & action to replay_dictionary
replay_dict = self.update_replay_dict(replay_dict=replay_dict,
agent=agent,
observation=observation,
current_player_action=current_player_action,
drop_actions=drop_actions,
agent_index=agent_index,
turn_in_game_i=turn_in_game_i,
keep_obs_dict=keep_obs_dict,
keep_agent=keep_agent,
pickle_pyhanabi=pickle_pyhanabi)
turns_played += 1
turn_in_game_i += 1
else:
assert action is None
# end of turn
observations, reward, done, unused_info = self.environment.step(current_player_action)
# end loop one game
i_game += 1
# end loop many games
return replay_dict, turns_played
class StateActionCollector:
def __init__(self,
hanabi_game_config,
agent_classes: Dict[str, ra.RulebasedAgent],
# num_players: int,
target_agent: Optional[str] = None
):
self.agent_classes = agent_classes # pool of agents used to play
self.num_players = hanabi_game_config['players']
self._target_agent = target_agent
self.runner = Runner(hanabi_game_config, self.num_players)
self.initialized_agents = {} # Dict[str, namedtuple]
self._replay_dict = {}
def _initialize_all_agents(self):
"""
set self.initialized_agents, so that run() calls wont re-initialize them every time
and instead, their instances can be sampled for each game
"""
initialized_agents = {}
for agent_str, agent_cls in self.agent_classes.items():
initialized_agents[agent_str] = Agent(name=agent_str,
instance=agent_cls({'players': self.num_players}))
self.initialized_agents = initialized_agents # Dict[str, NamedTuple]
def _get_players(self, k, target_agent: Optional[str] = None) -> List[NamedTuple]:
"""
If target_agent is specified, it will be one of the players
"""
players = []
if target_agent:
players.append(self.initialized_agents[target_agent])
players += random.choices(list(self.initialized_agents.values()), k=k)
return players
statelist = List[List]
actionlist = List
def write_to_database(self, path, replay_dictionary, team, with_obs_dict):
# | num_players | agent | turn | state | action | team |
# current_player: 0
# current_player_offset: 0
# deck_size: 40
# discard_pile: []
# fireworks: {}
# information_tokens: 8
# legal_moves: [{}, ..., {}]
# life_tokens: 3
# observed_hands: [[{},...,{}], ..., [{},...,{}]]
# num_players: 2
# vectorized:
# pyhanabi
if not with_obs_dict:
# later we may implement smaller databases, where we drop the observation dictionary
raise NotImplementedError("Database layout requires observation dictionary")
# creates database at path, if it does not exist already
conn = db.create_connection(path)
# if table exists, appends dictionary data, otherwise it creates the table first and then inserts
replay_dictionary['team'] = [agent.name for agent in team]
replay_dictionary['num_players'] = self.num_players
db.insert_data(conn, replay_dictionary, with_obs_dict)
@staticmethod
def _accumulate_states_maybe_actions(source, target):
""" Assumes target has keys 'states' and 'actions' """
if not isinstance(target['states'], torch.FloatTensor) or isinstance(target['states'], np.ndarray):
target['states'] = np.array(source['states'])
target['actions'] =
|
np.array(source['actions'])
|
numpy.array
|
import argparse
import numpy as np
import os
import tabulate
import torch
import torch.nn.functional as F
from torch.nn.utils import vector_to_parameters, parameters_to_vector
import data
import models
import curves
import utils
parser = argparse.ArgumentParser(description='DNN curve evaluation')
parser.add_argument('--dir', type=str, default='/tmp/eval', metavar='DIR',
help='training directory (default: /tmp/eval)')
parser.add_argument('--num_points', type=int, default=5, metavar='N',
help='number of points on the curve (default: 61)')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true',
help='switches between validation and test set (default: validation)')
parser.add_argument('--transform', type=str, default='VGG', metavar='TRANSFORM',
help='transform name (default: VGG)')
parser.add_argument('--data_path', type=str, default=None, metavar='PATH',
help='path to datasets location (default: None)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num_workers', type=int, default=4, metavar='N',
help='number of workers (default: 4)')
parser.add_argument('--model', type=str, default=None, metavar='MODEL',
help='model name (default: None)')
parser.add_argument('--num_bends', type=int, default=3, metavar='N',
help='number of curve bends (default: 3)')
parser.add_argument('--ckpt1', type=str, default=None, metavar='CKPT',
help='checkpoint 1 to eval (default: None)')
parser.add_argument('--ckpt2', type=str, default=None, metavar='CKPT',
help='checkpoint 2 to eval (default: None)')
parser.add_argument('--wd', type=float, default=1e-4, metavar='WD',
help='weight decay (default: 1e-4)')
args = parser.parse_args()
os.makedirs(args.dir, exist_ok=True)
torch.backends.cudnn.benchmark = True
loaders, num_classes = data.loaders(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test,
shuffle_train=False
)
architecture = getattr(models, args.model)
model1 = architecture.base(num_classes=num_classes)
if torch.cuda.is_available():
model1.cuda()
model1.load_state_dict(torch.load(args.ckpt1))
else:
model1.load_state_dict(torch.load(args.ckpt1, map_location=torch.device('cpu')))
model2 = architecture.base(num_classes=num_classes)
if torch.cuda.is_available():
model2.cuda()
model2.load_state_dict(torch.load(args.ckpt1))
else:
model2.load_state_dict(torch.load(args.ckpt2, map_location=torch.device('cpu')))
mid_model = architecture.base(num_classes=num_classes)
criterion = F.cross_entropy
regularizer = curves.l2_regularizer(args.wd)
T = args.num_points
ts = np.linspace(0.0, 1.0, T)
tr_loss = np.zeros(T)
tr_nll = np.zeros(T)
tr_acc = np.zeros(T)
te_loss = np.zeros(T)
te_nll = np.zeros(T)
te_acc = np.zeros(T)
tr_err = np.zeros(T)
te_err = np.zeros(T)
dl = np.zeros(T)
previous_weights = None
columns = ['t', 'Train loss', 'Train nll', 'Train error (%)', 'Test nll', 'Test error (%)']
# t = torch.FloatTensor([0.0])
# if torch.cuda.is_available():
# t = t.cuda()
for i, t_value in enumerate(ts):
#t.data.fill_(t_value)
mid_update = t_value*parameters_to_vector(model1.parameters()) + (1-t_value)*parameters_to_vector(model2.parameters())
vector_to_parameters(mid_update, mid_model.parameters())
utils.update_bn(loaders['train'], mid_model)
tr_res = utils.test(loaders['train'], mid_model, criterion, regularizer)
te_res = utils.test(loaders['test'], mid_model, criterion, regularizer)
tr_loss[i] = tr_res['loss']
tr_nll[i] = tr_res['nll']
tr_acc[i] = tr_res['accuracy']
tr_err[i] = 100.0 - tr_acc[i]
te_loss[i] = te_res['loss']
te_nll[i] = te_res['nll']
te_acc[i] = te_res['accuracy']
te_err[i] = 100.0 - te_acc[i]
values = [t_value, tr_loss[i], tr_nll[i], tr_err[i], te_nll[i], te_err[i]]
table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='10.4f')
if i % 40 == 0:
table = table.split('\n')
table = '\n'.join([table[1]] + table)
else:
table = table.split('\n')[2]
print(table)
def stats(values, dl):
min =
|
np.min(values)
|
numpy.min
|
def etiquetar():
rotulo_Liquido = "Composición de fase líquido"
rotulo_Vapor = "Composición de fase vapor"
rotulo_Separador = "-" * 11
etiqueta_liquido = "{0}{1}{0}".format(rotulo_Separador, rotulo_Liquido)
etiqueta_vapor = "{0}{1}{0}".format(rotulo_Vapor, rotulo_Vapor)
return etiqueta_liquido, etiqueta_vapor
print(etiquetar()[0])
def function():
print("Metano, Butano, Hexano")
etiqueta_liquido = "Composición de fase líquida"
etiqueta_vapor = "Composición de fase vapor"
print(" -*{} {etiqueta_liquido}".format(etiqueta_liquido))
print("xi = ", xy[0])
print("Sxi = ", np.sum(xy[0]))
print("-" * 20)
print("yi = ", xy[1])
print("Syi = ", np.sum(xy[1]))
pass
class ClassName(object):
"""docstring for ClassName"""
def __init__(self, arg):
super(ClassName, self).__init__()
self.arg = arg
def function_0():
pass
def function_1():
pass
def wilson(self, Pc, Tc, w, T):
# Ecuación wilson
lnKi = np.log(Pc / self.P) + 5.373 * (1 + w) * (1 - Tc / self.T)
self.Ki = np.exp(lnKi)
return self.Ki
def beta(self, zi):
self.zi = zi
self.Ki = self.wilson(Pc, Tc, w, T)
Bmin = np.divide((self.Ki * self.zi - 1), (self.Ki - 1))
#print (("Bmin_inter = ", Bmin))
Bmax = np.divide((1 - self.zi), (1 - self.Ki))
#print (("Bmax_inter = ", Bmax))
self.Bini = (np.max(Bmin) + np.min(Bmax)) / 2
return self.Bini
def rice(self, zi, Ki, Bini):
self.zi = zi
self.Bini = Bini
self.Ki = Ki
self.fg = np.sum(self.zi * (self.Ki - 1) / (1 - self.Bini + self.Bini * self.Ki))
self.dfg = - np.sum(self.zi * (self.Ki - 1) ** 2 / (1 - self.Bini + self.Bini * self.Ki) ** 2)
#print g, dg
return self.fg, self.dfg
def flash_ideal(self):
self.Bini = self.beta(zi)
self.Ki = self.wilson(self.Pc, self.Tc, self.w, self.T)
print("Ki_(P, T) = ", self.Ki)
Eg = self.rice(zi, self.Ki, self.Bini)
errorEq = abs(Eg[0])
i, s = 0, 1
while errorEq > ep:
Eg = self.rice(zi, self.Ki, self.Bini)
self.Bini = self.Bini - s * Eg[0] / Eg[1]
errorEq = abs(Eg[0])
i += 1
if i >= 50:
break
xy = self.composicion_xy(zi, self.Ki, self.Bini)
print ("Metano, Butano, Hexano")
print ("-------------Composición de fase líquida------------------------")
print ("xi = ", xy[0])
print ("Sxi = ", np.sum(xy[0]))
print ("-------------Composición de fase vapor------------------------")
print ("yi = ", xy[1])
print ("Syi = ", np.sum(xy[1]))
return Eg[0], Eg[1], self.Bini
def flash_PT(self):
flashID = self.flash_ideal()
print ("flash (P, T, zi)")
print ("g, dg, B = ", flashID)
print ("---------------------------------------------------------------")
self.Bini = flashID[2]
print ("Beta_r ini = ", self.Bini)
moles = self.composicion_xy(zi, self.Ki, self.Bini)
self.xi, self.yi = moles[0], moles[1]
nil, niv = moles[2], moles[3]
fi_F = self.fugac()
#print "nil = ", nil, np.sum(nil)
#print "Snil = ", np.sum(nil)
#print "niv", niv, np.sum(niv)
#print "Sniv = ", np.sum(niv)
#nF = 2
#CoeFugi = np.ones((nF, nC))
#for i in range(nF):
# if i == 1:
# self.ni = nil
# self.nT = np.sum(self.ni)
# elif i == 2:
# self.ni = niv
# self.nT = np.sum(self.ni)
# Flug_i = self.fluido(self.P)
# CoeFugi[i, :] = Flug_i[1]
#print CoeFugi
#self.Ki = CoeFugi[0, :] / CoeFugi[1, :]
self.Ki = fi_F[0] / fi_F[1]
L = 1.0
self.Ki = self.Ki * L
Ki_1 = self.Ki
print ("Ki_(P, T, ni) primera = ", self.Ki)
print ("----------------------------------------------------------------")
#self.Ki = np.array([1.729, 0.832, 0.640])
#self.Ki = self.wilson(self.Pc, self.Tc, self.w, self.T)
#print "Ki_(P, T) = ", self.Ki
while 1:
i, s = 0, 0.1
while 1:
Eg = self.rice(zi, self.Ki, self.Bini)
print (Eg)
self.Bini = self.Bini - s * Eg[0] / Eg[1]
print (self.Bini)
errorEq = abs(Eg[0])
i += 1
#print i
#if self. Bini < 0 or self.Bini > 1:
#break
# self.Bini = 0.5
if i >= 50:
pass
#break
if errorEq < 1e-5:
break
print ("Resultado Real = ", Eg)
print (" Beta r = ", self.Bini)
moles = self.composicion_xy(zi, self.Ki, self.Bini)
self.xi, self.yi = moles[0], moles[1]
#xy = self.composicion_xy(zi, self.Ki, self.Bini)
print ("C1 -i-C4 n-C4")
print ("-----------Composición de fase líquida----------------------")
print ("xi = ", moles[0])
print ("Sxi = ", np.sum(moles[0]))
print ("-----------Composición de fase vapor------------------------")
print ("yi = ", moles[1])
print ("Syi = ", np.sum(moles[1]))
fi_F = self.fugac()
self.Ki = fi_F[0] / fi_F[1]
Ki_2 = self.Ki
dKi = abs(Ki_1 - Ki_2)
Ki_1 = Ki_2
print ("Ki_(P, T, ni) = ", self.Ki)
fun_Ki = np.sum(dKi)
print ("fun_Ki = ", fun_Ki)
if fun_Ki < 1e-5:
break
return flashID
def fugac(self):
'''
Esta función que se llama fugacidad, calcula los coeficientes de fugacidad
con una ecuación de estado para mezclas multicomponente
T = temperatura en Kelvin
Y = fracción molar del componente i en la fase de vapor
X = fracción molar del componente i en la fase líquida
'''
self.T
self.P
#----------------------------------------------------------------------
self.Fw = 0.48 + (1.574 * self.w) - (0.176 * self.w ** 2)
a = ((0.42748 * (self.R * self.Tc) ** 2) / self.Pc) * ((1 + self.Fw * (1 - (self.Tr ** 0.5))) ** 2)
b = (0.08664 * self.R * self.Tc) / self.Pc
#----------------------------------------------------------------------
#print Fw, "Parametro a:", a, b
#Yf = np.array([Y4,Y5,Y6])
Yf = self.yi
Xf = self.xi
#print Yf, Xf
#----------------------- Vapor -----------------------------------------
amv = np.sum(Yf * a ** 0.5) ** 2
aml = np.sum(Xf * a ** 0.5) ** 2
#-------------------------------
#print "amv = ", amv
#print "aml = ", aml
bmv = np.sum(Yf * b)
bml = np.sum(Xf * b)
#print "bmv = ", bmv
#print "bml = ", bml
Av = (amv * self.P) / ((self.R * self.T) ** 2)
Bv = (bmv * self.P) / (self.R * self.T)
#-------------------- Liquido -------------------
Al = (aml * self.P) / ((self.R * self.T) ** 2)
Bl = (bml * self.P) / (self.R * self.T)
#print "Av", Av
#print "Bv", Bv
#print "Al", Al
#print "Av", Bl
Zfv = [1, -1, (Av - Bv - Bv ** 2), (- Av * Bv)]
ZfvR = np.roots(Zfv)
Zv =
|
np.max(ZfvR)
|
numpy.max
|
import numpy as np
import time
from keras.utils import np_utils
from keras.preprocessing.image import transform_matrix_offset_center, apply_transform
np.random.seed(int((time.time()*1e6)%1e6))
def rotation(x, theta, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='wrap', cval=0.):
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[
|
np.sin(theta)
|
numpy.sin
|
# this script trains 3 different classifiers
# stores the model weights in pickle files
PATH_TO_FACES = './models/eigenfaces.pickle'
PATH_TO_TRAINING_PERSON_1 = './data/training_faces/person_1/'
PATH_TO_TRAINING_PERSON_2 = './data/training_faces/person_2/'
PATH_TO_KNN_MODEL = './models/knn_model.pickle'
PATH_TO_SVM_MODEL = './models/svm_model.pickle'
PATH_TO_RANDOM_FOREST_MODEL = './models/random_forest_model.pickle'
PATH_TO_TEST_FACES_PERSON_1 = './data/test_faces/person_1/'
PATH_TO_TEST_FACES_PERSON_2 = './data/test_faces/person_2/'
KNN_K = 5
import pickle
import os
import cv2
import numpy as np
import copy
import math
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
# perform KNN classifier
def _importFace(path):
img = cv2.imread(path)
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def _subtractMean(faces1, faces2, meanFace):
face1New = copy.deepcopy(faces1)
face2New = copy.deepcopy(faces2)
face1New = [item-meanFace for item in face1New]
face2New = [item-meanFace for item in face2New]
return face1New, face2New
def _generic_evaluate(classifierFunction):
person1 = [PATH_TO_TEST_FACES_PERSON_1+item for item in os.listdir(PATH_TO_TEST_FACES_PERSON_1)]
person2 = [PATH_TO_TEST_FACES_PERSON_2+item for item in os.listdir(PATH_TO_TEST_FACES_PERSON_2)]
predictions1 = [classifierFunction(item) for item in person1]
predictions2 = [classifierFunction(item) for item in person2]
correct = 0
for item in predictions1:
if item == 1:
correct+=1
for item in predictions2:
if item == 2:
correct+=1
accuracy = float(correct)/(len(predictions1)+len(predictions2))
return accuracy
def _generic_training_preprocessing():
pickle_in = open(PATH_TO_FACES,'rb')
data= pickle.load(pickle_in)
meanFace = data["mean_face"]
eigenFaces = data["eigen_faces"]
facePaths1 = [(PATH_TO_TRAINING_PERSON_1+item) for item in os.listdir(PATH_TO_TRAINING_PERSON_1)]
facePaths2 = [(PATH_TO_TRAINING_PERSON_2+item) for item in os.listdir(PATH_TO_TRAINING_PERSON_2)]
faces1 = [_importFace(item) for item in facePaths1 ]
faces2 = [_importFace(item) for item in facePaths2 ]
faces1Norm, faces2Norm = _subtractMean(faces1, faces2, meanFace)
#convert the faces into vectors
x,y = faces1Norm[0].shape
length = x*y
reshapedFaces1 = [np.reshape(item, (length,1)) for item in faces1Norm]
reshapedFaces2 = [np.reshape(item, (length,1)) for item in faces2Norm]
numEigenFaces = len(eigenFaces)
x,y = eigenFaces[0].shape
length = x*y
eigenFaceMatrix = np.zeros((length,numEigenFaces))
for i in range(numEigenFaces):
faceColumn = np.reshape(eigenFaces[i], (length,1))
eigenFaceMatrix[:,i] = np.squeeze(faceColumn)
eigenFaceMatrix = np.transpose(eigenFaceMatrix)
componentFaces1 = [np.matmul(eigenFaceMatrix, item) for item in reshapedFaces1]
componentFaces2 = [np.matmul(eigenFaceMatrix, item) for item in reshapedFaces2]
return meanFace, eigenFaces,componentFaces1,componentFaces2,eigenFaceMatrix
def knnTrain():
meanFace, eigenFaces,componentFaces1,componentFaces2,eigenFaceMatrix = _generic_training_preprocessing()
mergedItems = []
for item in componentFaces1:
mergedItems+= [(item,1)]
for item in componentFaces2:
mergedItems+= [(item,2)]
model = {}
model['items'] = mergedItems
model['mean_face'] = meanFace
model['pca_components'] = eigenFaceMatrix
with open(PATH_TO_KNN_MODEL, 'wb') as handle:
pickle.dump(model,handle)
return True
def _computeDistance(vect1, vect2):
if len(vect1) != len(vect2):
raise TypeError('length mismatch')
dist = 0.0
for i in range(len(vect1)):
dist += (vect1[i][0] - vect2[i][0])**2
dist = math.sqrt(dist)
return dist
def knnClassify(imagePath):
pickle_in = open(PATH_TO_KNN_MODEL,'rb')
data= pickle.load(pickle_in)
trainItems = data['items']
meanFace = data['mean_face']
pcaComponents = data['pca_components']
image = cv2.imread(imagePath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
imageNorm = image - meanFace
x,y = imageNorm.shape
length = x*y
vectorized = np.reshape(imageNorm,(length,1))
pcaComps = np.matmul(pcaComponents, vectorized)
distances = []
for item in trainItems:
weights, label = item
distance = _computeDistance(weights, pcaComps)
distances +=[(distance ,label)]
distances.sort(key=lambda tup: tup[0], reverse=False)
distances = distances[0:KNN_K]
vote_1 = 0
vote_2 = 0
for item in distances:
_,label = item
if label == 1:
vote_1 +=1
else:
vote_2 +=1
if vote_1 > vote_2:
return 1
else:
return 2
def knnEvaluate():
# get the accuracy of the knn classifier
accuracy = _generic_evaluate(knnClassify)
return accuracy
def svmTrain():
# first we need to run pca on the test data
# obtain an array of pca vectors
meanFace, eigenFaces,componentFaces1,componentFaces2,eigenFaceMatrix = _generic_training_preprocessing()
componentFaces1 = [np.transpose(item).tolist()[0] for item in componentFaces1]
componentFaces2 = [np.transpose(item).tolist()[0] for item in componentFaces2]
mergedItems = []
labels = []
for item in componentFaces1:
mergedItems+= [item]
labels+=[1]
for item in componentFaces2:
mergedItems+= [item]
labels+=[2]
# perform the training
classifier = svm.SVC(gamma='scale')
classifier.fit(mergedItems,labels)
# save the classifier
model = {}
model['mean_face'] = meanFace
model['pca_components'] = eigenFaceMatrix
model['classifier']=classifier
with open(PATH_TO_SVM_MODEL, 'wb') as handle:
pickle.dump(model,handle)
return True
def svmPredict(imgPath):
# load the classifier
pickle_in = open(PATH_TO_SVM_MODEL,'rb')
data= pickle.load(pickle_in)
svmClassifier = data['classifier']
meanFace = data['mean_face']
pcaComponents = data['pca_components']
image = cv2.imread(imgPath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
imageNorm = image - meanFace
x,y = imageNorm.shape
length = x*y
vectorized = np.reshape(imageNorm,(length,1))
pcaComps = np.transpose(np.matmul(pcaComponents, vectorized)).tolist()
# pipeline the test item
prediction = svmClassifier.predict(pcaComps)
# prediction
return prediction[0]
def svmEvaluate():
accuracy = _generic_evaluate(svmPredict)
return accuracy
def boostedTreeTrain():
meanFace, eigenFaces,componentFaces1,componentFaces2,eigenFaceMatrix = _generic_training_preprocessing()
componentFaces1 = [np.transpose(item).tolist()[0] for item in componentFaces1]
componentFaces2 = [
|
np.transpose(item)
|
numpy.transpose
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 14 11:14:16 2018
@author: SilverDoe
"""
# Import `datasets` from `sklearn`
from sklearn import datasets,svm,metrics
from pprint import pprint
# Load in the `digits` data
digits = datasets.load_digits()
# Print the `digits` data
pprint(digits)
# Get the keys of the `digits` data
print(digits.keys())
# Print out the data
print(digits.data)
# Print out the target values
print(digits.target)
# Print out the description of the `digits` data
print(digits.DESCR)
'''============================================================================'''
# Isolate the `digits` data
digits_data = digits.data
# Inspect the shape
print(digits_data.shape)
# Isolate the target values with `target`
digits_target = digits.target
# Inspect the shape
print(digits_target.shape)
import numpy as np
# Print the number of unique labels
number_digits = len(
|
np.unique(digits.target)
|
numpy.unique
|
import pytest
import numpy as np
from scipy.interpolate import interpn
from ardent.utilities import _validate_xyz_resolution
from ardent.utilities import _compute_axes
from ardent.utilities import _compute_coords
from ardent.preprocessing.resampling import _resample
from ardent.preprocessing.resampling import _downsample_along_axis
from ardent.preprocessing.resampling import downsample_image
from ardent.preprocessing.resampling import change_resolution_to
from ardent.preprocessing.resampling import change_resolution_by
"""
Test _resample.
"""
def test__resample():
# Test proper use.
shape = (3,4)
resolution = 1.5
origin = 'zero'
attempted_scales = 1
image = np.arange(np.prod(shape)).reshape(shape)
real_axes = _compute_axes(image.shape, resolution, origin=origin)
new_shape = np.floor(np.multiply(image.shape, attempted_scales))
real_scales = np.divide(new_shape, image.shape)
new_shape = np.floor(np.multiply(np.subtract(image.shape, 1), real_scales)) + 1
new_real_coords = _compute_coords(new_shape, resolution / real_scales, origin=origin)
correct_output = interpn(points=real_axes, values=image, xi=new_real_coords)
assert np.array_equal(_resample(image, real_axes, new_real_coords), correct_output)
shape = (3)
resolution = 1
origin = 'zero'
attempted_scales = 1
image = np.arange(np.prod(shape)).reshape(shape)
real_axes = _compute_axes(image.shape, resolution, origin=origin)
new_shape = np.floor(np.multiply(image.shape, attempted_scales))
real_scales = np.divide(new_shape, image.shape)
new_shape = np.floor(np.multiply(np.subtract(image.shape, 1), real_scales)) + 1
new_real_coords = _compute_coords(new_shape, resolution / real_scales, origin=origin)
correct_output = interpn(points=real_axes, values=image, xi=new_real_coords)
assert np.array_equal(_resample(image, real_axes, new_real_coords), correct_output)
shape = (3,4,5)
resolution = (0.5,1,1.5)
origin = 'center'
attempted_scales = (1/2,1/3,1/4)
image = np.arange(np.prod(shape)).reshape(shape)
real_axes = _compute_axes(image.shape, resolution, origin=origin)
new_shape = np.floor(np.multiply(image.shape, attempted_scales))
real_scales = np.divide(new_shape, image.shape)
new_shape = np.floor(np.multiply(np.subtract(image.shape, 1), real_scales)) + 1
new_real_coords = _compute_coords(new_shape, resolution / real_scales, origin=origin)
correct_output = interpn(points=real_axes, values=image, xi=new_real_coords)
assert np.array_equal(_resample(image, real_axes, new_real_coords), correct_output)
shape = (6,7,8,9)
resolution = (0.5,1,1.5,2.5)
origin = 'center'
attempted_scales = (2,3,3.5,np.pi)
image = np.arange(np.prod(shape)).reshape(shape)
real_axes = _compute_axes(image.shape, resolution, origin=origin)
new_shape = np.floor(np.multiply(image.shape, attempted_scales))
real_scales = np.divide(new_shape, image.shape)
new_shape = np.floor(np.multiply(np.subtract(image.shape, 1), real_scales)) + 1
new_real_coords = _compute_coords(new_shape, resolution / real_scales, origin=origin)
correct_output = interpn(points=real_axes, values=image, xi=new_real_coords)
assert np.array_equal(_resample(image, real_axes, new_real_coords), correct_output)
# # Test uniform resolutions.
# real_axes = _compute_axes(image.shape, resolution, origin='center')
# new_shape = np.floor(np.multiply(image.shape, xyz_scales))
# real_scales = np.divide(new_shape, image.shape)
# new_real_coords = _compute_coords(new_shape, resolution / real_scales, origin='center')
# correct_output = interpn(points=real_axes, values=image, xi=new_real_coords)
# assert np.array_equal(_resample(image, real_axes, new_real_coords), correct_output)
# # Test non-uniform resolutions.
# dynamic_resolution = np.arange(1, image.ndim + 1) * resolution
# real_axes = _compute_axes(image.shape, dynamic_resolution, origin='center')
# new_shape = np.floor(np.multiply(image.shape, xyz_scales))
# real_scales = np.divide(new_shape, image.shape)
# new_real_coords = _compute_coords(new_shape, dynamic_resolution / real_scales, origin='center')
# correct_output = interpn(points=real_axes, values=image, xi=new_real_coords)
# assert np.array_equal(_resample(image, real_axes, new_real_coords), correct_output)
# image = np.arange(3*4).reshape(3,4)
# resolution = 0.5
# xyz_scales = 1/3
# test__resample(image=image, resolution=resolution, xyz_scales=xyz_scales)
"""
Test _downsample_along_axis.
"""
def test__downsample_along_axis():
# Test proper use.
# Test identity.
kwargs = dict(
image=np.arange(3*4).reshape(3,4),
axis=0,
scale_factor=1,
truncate=False,
)
correct_output = np.arange(3*4).reshape(3,4)
assert np.array_equal(_downsample_along_axis(**kwargs), correct_output)
# Test basic use with negative axis.
kwargs = dict(
image=np.arange(3*4).reshape(3,4),
axis=-1,
scale_factor=2,
truncate=False,
)
correct_output = np.arange(0.5,12,2).reshape(3,2)
assert np.array_equal(_downsample_along_axis(**kwargs), correct_output)
# Test uneven case.
kwargs = dict(
image=np.arange(9*3,dtype=float).reshape(9,3),
axis=0,
scale_factor=4,
truncate=False,
)
correct_output = np.stack(
(
np.average(np.arange(9*3).reshape(9,3).take(range(3),0),0),
np.average(np.arange(9*3).reshape(9,3).take(range(3,3+4),0),0),
np.average(np.arange(9*3).reshape(9,3).take(range(3+4,9),0),0),
),0)
assert np.array_equal(_downsample_along_axis(**kwargs), correct_output)
# Test uneven case with dtype=int.
kwargs = dict(
image=np.arange(9*3,dtype=int).reshape(9,3),
axis=0,
scale_factor=4,
truncate=False,
)
image = np.arange(9*3,dtype=int).reshape(9,3)
intermediate_padded_image = np.concatenate((
# np.pad(image.astype(int), mode='mean') rounds half to even before casting to int.
np.round(np.mean(image.take(range(3),0),0,keepdims=True)),
np.arange(9*3,dtype=int).reshape(9,3),
np.round(np.mean(image.take(range(7,9),0),0,keepdims=True)),
np.round(np.mean(image.take(range(7,9),0),0,keepdims=True)),
),0)
correct_output = np.stack(
(
np.average(intermediate_padded_image.take(range(4),0),0),
np.average(intermediate_padded_image.take(range(4,4+4),0),0),
np.average(intermediate_padded_image.take(range(4+4,12),0),0),
),0)
assert np.array_equal(_downsample_along_axis(**kwargs), correct_output)
# Test uneven case with truncation.
kwargs = dict(
image=np.arange(3*11,dtype=float).reshape(3,11),
axis=-1,
scale_factor=4,
truncate=True,
)
correct_output = np.stack(
(
np.average(
|
np.arange(3*11)
|
numpy.arange
|
import numpy as np
np.set_printoptions(precision=4, suppress=True)
from BN import *
ev = (1,1,1,1,1)
p1 = Node(
|
np.array([.001])
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 25 11:55:07 2020
@author: ja17375
"""
import EnsembleVisualiser
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import mplstereonet
from numpy import deg2rad, rad2deg, sin, cos
import numpy as np
from skimage import measure
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
FIG_DIR = '/Users/ja17375/SWSTomo/Figures'
def plot_alpha_gamma_pdf(Ensemble, pdf):
'''Plots 2D pdf for the parameters alpha, gamma from model ensemble'''
# sample PDF at sample points, reshape to fit grid, take transeverse or array so its the right way round
fig, ax = plt.subplots()
C = ax.contourf(pdf, cmap=plt.cm.gist_earth_r,
extent=[Ensemble.model_config['alpha_min'], Ensemble.model_config['alpha_max'],
Ensemble.model_config['gamma_min'], Ensemble.model_config['gamma_max']],
)
plt.colorbar(C)
# ax.plot(Ensemble.models.alpha, Ensemble.models.gamma, '.')
def plot_3d_pdf(Ensemble):
alpha, gamma, strength = np.meshgrid(Ensemble.alpha_samples,
Ensemble.gamma_samples,
Ensemble.strength_samples)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(alpha, gamma, strength, c=Ensemble.pdf_3d)
plt.show()
def plot_model_slices(Ensemble, model, method, nsamps = 100, save=False):
'''plot a 2d slicethrough PDF for a inout model point'''
# Check model
if len(model) == 3:
print('Plotting 2-D sections through model')
print(f'alpha = {model[0]}')
print(f'gamma = {model[1]}')
print(f'strength = {model[2]}')
elif model == 'most-likely':
Ensemble.find_most_likely()
model = Ensemble.most_likely_model
print('Plotting ...')
else:
raise ValueError('Model not understood')
fig = plt.figure(figsize= (16, 8))
# Plot gamma v strength, polar plot (Alpha is fixed to mode value)
points, slc = Ensemble.slice_3d_volume('alpha', model[0], nsamps, method)
gg = deg2rad(points[:,1].reshape(nsamps, nsamps))
ss = points[:,2].reshape(nsamps, nsamps)
ax1 = fig.add_subplot(131, projection='polar')
ax1.plot(deg2rad(model[1]), model[2], 'kx')
ax1.set_theta_zero_location('N')
ax1.set_theta_direction(-1)
C1 = ax1.contour(gg, ss, slc, 10)
ax1.clabel(C1, C1.levels, inline=True, fontsize=10)
ax1.set_title(f'Slice through alpha axis. alpha = {model[0]:0.3f}')
# Plot alpha v strength, polar plot (Gamma is fixed)
points, slc = Ensemble.slice_3d_volume('gamma', model[1], nsamps, method)
aa = deg2rad(points[:,0].reshape(nsamps, nsamps))
ss = points[:,2].reshape(nsamps, nsamps)
ax2 = fig.add_subplot(132, projection='polar')
C2 = ax2.contour(aa, ss, slc, 10)
ax2.clabel(C2, C2.levels, inline=True, fontsize=10)
ax2.plot(deg2rad(model[0]), model[2], 'kx')
ax2.set_theta_zero_location('N')
ax2.set_theta_direction(-1)
ax2.set_thetamin(0)
ax2.set_thetamax(90)
ax2.set_title(f'Slice through Gamma axis, gamma = {model[1]:0.3f}')
# Plot alpha and gamma, stereonet/ sterographic projection plot
# Remember that alpha == dip and gamma == strike
# To project dip onto polar plot using Lambert equal area:
# R = 2*cos(alpha/2)
# alpha = 2*arccos(R/2)
points, slc = Ensemble.slice_3d_volume('strength', model[2], nsamps, method)
gg = deg2rad(points[:,1].reshape(nsamps, nsamps))
a = points[:,0].reshape(nsamps, nsamps)
aa = (2*cos(deg2rad(a)/2))
ax3 = fig.add_subplot(133, projection='polar')
C3 = ax3.contour(gg, aa, slc, 10)
ax3.clabel(C3, C3.levels, inline=True, fontsize=10)
ax3.plot(deg2rad(model[1]), (2*cos(deg2rad(model[0])/2)), 'kx')
ax3.set_theta_zero_location('N')
ax3.set_theta_direction(-1)
ax3.set_title(f'Slice through strength axis, s = {model[2]:0.3f}')
if save is True:
fname = input('Enter filename (without extension) to save as >')
plt.savefig(f'{FIG_DIR}/{fname}.png', dpi=500)
plt.show()
def ags_to_xyz(alpha, gamma, s):
'''Function that maps the natively "spherical" alpha, gamma, strength samples to
cartesian co-ordinates for plotting
'''
radius = s
theta = deg2rad(90 - alpha)
phi = deg2rad(gamma)
x = radius * sin(theta) * cos(phi)
y = radius * sin(theta) * sin(phi)
z = radius * cos(theta)
return x, y, z
def isosurface_view(Ensemble, level):
'''
Uses the marching cubes method to find an isosurface in the 3D PDF.
This isosurface is plotted using matplotlib's 3D toolkit
'''
# Normalise PDF to max likelihood
pdf = Ensemble.pdf_3d / Ensemble.pdf_3d.max()
a_spacing = Ensemble.alpha_samples[1] - Ensemble.alpha_samples[0]
g_spacing = Ensemble.gamma_samples[1] - Ensemble.gamma_samples[0]
s_spacing = Ensemble.strength_samples[1] - Ensemble.strength_samples[0]
space = (a_spacing, g_spacing, s_spacing)
verts, faces, normals, values = measure.marching_cubes(pdf, level, spacing=space)
verts[:,1] -= 180 # Map gamma verticies back to -180 - 180
xx, yy, zz = ags_to_xyz(verts[:,0], verts[:,1], verts[:,2])
sphe_verts = np.vstack((xx, yy, zz)).T
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
mesh = Poly3DCollection(sphe_verts[faces])
mesh.set_edgecolor('k')
ax.add_collection3d(mesh)
add_str_scale(ax)
# Add the most likely model
model = Ensemble.find_most_likely(ret=True)
xm, ym, zm = ags_to_xyz(model[0], model[1], model[2])
ax.plot([0, xm], [0, ym], [0, zm],'-')
ax.plot(xm, ym, zm, 'x')
# ax.scatter(xx, yy, zz, c=values)
ax.set_xlim([-0.02, 0.02])
ax.set_xlabel('X')
ax.set_ylim([-0.02, 0.02])
ax.set_ylabel('Y')
ax.set_zlim([0, 0.02])
# Set initial view angle to be top down and so that 0 degrees points North
ax.view_init(90, 180)
plt.savefig(f'test_isosurface_view_level{level}.png', dpi=400)
plt.show()
def cartesian_isosurface_view(Ensemble, level, save=False):
'''
'''
# Normalise PDF to max likelihood
pdf = Ensemble.pdf_3d / Ensemble.pdf_3d.max()
a_spacing = Ensemble.alpha_samples[1] - Ensemble.alpha_samples[0]
g_spacing = Ensemble.gamma_samples[1] - Ensemble.gamma_samples[0]
s_spacing = Ensemble.strength_samples[1] - Ensemble.strength_samples[0]
space = (a_spacing, g_spacing, s_spacing)
verts, faces, normals, values = measure.marching_cubes(pdf, level, spacing=space)
verts[:,1] -= 180 # Map gamma verticies back to -180 - 180
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
mesh = Poly3DCollection(verts[faces])
mesh.set_edgecolor('k')
ax.add_collection3d(mesh)
# Add the most likely model
model = Ensemble.find_most_likely(ret=True)
ax.plot([0, model[0]], [0, model[1]], [0, model[2]],'-')
ax.plot(model[0], model[1], model[2], 'x')
ax.set_xlabel('Alpha')
ax.set_ylabel('Gamma')
ax.set_zlabel('Strength')
ax.set_xlim([0, 90])
ax.set_ylim([-180, 180])
ax.set_zlim([0, 0.02])
if save is True:
uid = input('Enter a inversion uid to add to fiename')
plt.savefig(f'{FIG_DIR}/{uid}_cart_isosurf_lvl_{level}.png', dpi=500)
plt.show()
def add_str_scale(ax):
'''
draws circles on 3-D pyplot axis to show strength parameter (radius of PDF)
'''
t = np.linspace(0, 2*np.pi, 50)
strs = [0.005, 0.01, 0.015, 0.02] # strenght params to use
for s in strs:
x = s*np.cos(t)
y = s*np.sin(t)
z = np.zeros(50)
ax.plot(x, y, z,'r-')
ax.text(s*cos(np.pi/4), s*sin(np.pi/4),0, s=str(s))
ax.plot([0, 0.02], [0, 0], [0, 0], 'k-')
def plot_param_histograms(Ensemble):
'''
makes a histogram for each model parameter
'''
models = Ensemble.models
fig = plt.figure(figsize=(21,7))
# historgram for alpha
abins = np.linspace(0, 91, 50)
ax1 = fig.add_subplot(131)
ax1.hist(models.alpha, bins=abins)
ax1.set_xlim([0, 90])
ax1.set_xlabel(r'alpha ($\degree$)')
# histogram for gamma
gbins = np.linspace(-180, 181, 50)
ax2 = fig.add_subplot(132)
ax2.hist(models.gamma, bins=gbins)
ax2.set_xlim([-180, 180])
ax2.set_xlabel(r'gamma ($\degree$)')
# histogram for strength
sbins =
|
np.linspace(0, 0.02, 50)
|
numpy.linspace
|
from coopihc.space.State import State
from coopihc.space.StateElement import StateElement
from coopihc.space.Space import Space
from coopihc.space.utils import discrete_space, multidiscrete_space, continuous_space
import numpy
from collections import OrderedDict
state = State()
substate = State()
substate["x1"] = StateElement(values=1, spaces=discrete_space([1, 2, 3]))
substate["x2"] = StateElement(
values=[1, 2, 3],
spaces=multidiscrete_space(
[
[0, 1, 2],
[1, 2, 3],
[
0,
1,
2,
3,
],
]
),
)
substate["x3"] = StateElement(
values=1.5 * numpy.ones((3, 3)),
spaces=continuous_space(numpy.ones((3, 3)), 2 *
|
numpy.ones((3, 3))
|
numpy.ones
|
# __author__ = 'Hochikong'
from __future__ import absolute_import
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Embedding, LSTM
from keras.utils import plot_model
from configparser import ConfigParser
import matplotlib.pyplot as plt
# import os
# import sys
# PROJECT_PATH = os.path.abspath('.')
# sys.path.append(PROJECT_PATH)
from ETL import get_db, get_collection
import numpy
import pandas
CONFIG = 'config.ini'
SECTION = 'MODEL'
NLP = 'NLP'
DB = 'ETL'
FEATURES_FILE = 'features.csv'
cfg = ConfigParser()
cfg.read(CONFIG)
max_length = int(cfg.get(NLP, 'max_length'))
features = pandas.read_csv(FEATURES_FILE, header=None)
OUTPUT_DIM = cfg.get(SECTION, 'output_dim')
LSTM_UNITS = cfg.get(SECTION, 'lstm_units')
DROPOUT = cfg.get(SECTION, 'dropout')
DENSE = cfg.get(SECTION, 'dense')
ACTIVATION = cfg.get(SECTION, 'activation')
LOSS = cfg.get(SECTION, 'loss')
OPTIMIZER = cfg.get(SECTION, 'optimizer')
METRICS = cfg.get(SECTION, 'metrics')
BATCH_SIZE = cfg.get(SECTION, 'batch_size')
TRAIN_PERCENT = cfg.get(SECTION, 'train_percent')
EPOCH = cfg.get(SECTION, 'epoch')
VALIDATION_SPLIT = cfg.get(SECTION, 'validation_split')
addr = cfg.get(DB, 'address')
port = int(cfg.get(DB, 'port'))
database = cfg.get(DB, 'database')
collection = cfg.get(DB, 'collection')
model = Sequential()
model.add(Embedding(len(features), int(OUTPUT_DIM), input_length=max_length))
model.add(LSTM(int(LSTM_UNITS)))
model.add(Dropout(float(DROPOUT)))
model.add(Dense(int(DENSE)))
model.add(Activation(ACTIVATION))
model.compile(loss=LOSS, optimizer=OPTIMIZER, metrics=[METRICS])
if __name__ == "__main__":
print('Connecting to MongoDB')
db = get_db(addr, port, database)
col = get_collection(db, collection)
query_result = [l for l in col.find()] # Add lines into query_result
indx = list(set([l['label'] for l in query_result])) # Labels set
data = [[] for i in indx]
for l in query_result:
if l['label'] in indx:
p = indx.index(l['label'])
data[p].append(l['sentence']) # Store,accord to indx
# labels = numpy.array([l['label'] for l in query_result])
tmpl = [l['label'] for l in query_result] # A list only contains labels
train_percent = float(TRAIN_PERCENT)
percent = list(map(lambda x: x * train_percent,
[tmpl.count(i) for i in indx]))
# Choose how many lines in data you used as train data
percent = list(map(lambda x: int(x), percent))
unit = zip(indx, percent)
train_X = []
train_Y = []
test_X = []
test_Y = []
for i, v in unit:
p = indx.index(i)
train_X.extend(data[p][:v])
test_X.extend(data[p][v:])
train_Y.extend(len(data[p][:v]) * [i])
test_Y.extend(len(data[p][v:]) * [i])
train_Y = numpy.array(train_Y, dtype=int)
test_Y =
|
numpy.array(test_Y, dtype=int)
|
numpy.array
|
#!/usr/bin/env python3
"""
Scrapes the HF logs passed in via a glob string and calculates the amount of core hours lost to thread idling
Takes in a single or glob path to HF log files.
"""
import argparse
from datetime import datetime
from glob import glob
import numpy as np
def get_duration_and_dead_ratio(file_name):
"""Takes a log filename and extracts the amount of wasted core hours.
:returns: A tuple containing the sim duration, percent of time lost, number of cores with valid times and the time lost for each core"""
with open(file_name) as f:
lines = f.readlines()
final_times = {}
j = 1
release = lines[-j].split(":")
while (
len(release) < 6
or not release[5].startswith("Simulation completed.")
and j < len(lines)
):
j += 1
release = lines[-j].split(":")
try:
assert j < len(lines)
end_time = datetime.strptime(":".join(release[:3]), "%Y-%m-%d %H:%M:%S,%f")
except:
if verbose:
print(file_name, "Couldn't get a start time")
return None, None, None, None
for line in lines[-(j + 1) :: -1]:
line = line.split(":")
if len(line) < 6 or not line[5].startswith("Process "):
continue
try:
time = datetime.strptime(":".join(line[:3]), "%Y-%m-%d %H:%M:%S,%f")
except:
continue
rank = line[3].split("_")[-1]
if rank not in final_times.keys():
final_times.update({rank: time})
if len(final_times) < 2:
if verbose:
print(file_name, "Not enough times")
return None, None, None, None
times = sorted(final_times.values())
first_time = times[0]
duration = (end_time - first_time).total_seconds()
total_dead_time = 0
lost_times = []
for time in times:
lost_times.append((end_time - time).total_seconds())
if very_verbose:
print("adding {}".format((end_time - time).total_seconds() / duration))
total_dead_time += (end_time - time).total_seconds() / duration
total_dead_time *= 1 / len(times)
return duration, total_dead_time, len(times), lost_times
parser = argparse.ArgumentParser()
parser.add_argument(
"log_glob", help='log file selection expression. eg: "Runs/*/*/HF/Acc/HF.log"'
)
parser.add_argument(
"-o", "--out_file", help="The file to write the data to", default="hf_ch_burn.csv"
)
parser.add_argument("-v", "--verbose", help="Additional print statements enabled.")
parser.add_argument(
"-vv",
"--very_verbose",
help="Even more print statements enabled. Intended for use with a single realisation, or part log.",
)
args = parser.parse_args()
log_files = glob(args.log_glob)
verbose = args.verbose or args.very_verbose
very_verbose = args.very_verbose
rels = len(log_files)
values = np.ndarray(
rels,
dtype=[
("f_name", "U128"),
("fault", "U32"),
("duration", "f8"),
("efficiency", "f8"),
("ch_burned", "f8"),
],
)
for i, file in enumerate(log_files):
(
dead_duration,
decimal_dead_time,
node_count,
dead_times,
) = get_duration_and_dead_ratio(file)
if dead_duration is None:
dead_times = [0]
parts = file.split("/")
runs_idex = parts.index("Runs")
fault = parts[runs_idex + 1]
if node_count is not None and node_count % 40 != 0:
if verbose:
print(file, "Nodes off: {}".format(node_count))
extra_nodes = np.ceil(node_count / 80) * 80
dead_times.extend(
[np.mean(dead_times) for i in range(extra_nodes - node_count)]
)
if verbose:
print(
file,
dead_duration,
sum(dead_times) / 3600,
node_count * dead_duration * (decimal_dead_time / 3600),
)
values[i] = (file, fault, dead_duration, decimal_dead_time, sum(dead_times) / 3600)
faults = np.unique(values["fault"])
for fault in faults:
fault_mask = values["fault"] == fault
avg = np.mean(values[fault_mask]["duration"])
stdev = np.std(values[fault_mask]["duration"])
outliers = values["duration"] > avg * 2
values = values[~np.logical_and(fault_mask, outliers)]
if sum(np.logical_and(fault_mask, outliers)) > 0:
print(
"Removing {} outliers for fault {}".format(
sum(
|
np.logical_and(fault_mask, outliers)
|
numpy.logical_and
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# ------------------------------------
import seaborn as sns
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as pp # noqa: E402
from matplotlib import cm # noqa: E402
import numpy as np # noqa: E402
import pandas as pd # noqa: E402
from vihds import utils # noqa: E402
def plot_prediction_summary(
device_names, signal_names, times, OBS, MU, STD, device_ids, predict_style, fixYaxis=False,
):
"""Compare the simulation against the data for the highest weighted sample"""
nplots = MU.shape[1]
unique_devices = np.unique(device_ids)
ndevices = len(unique_devices)
f, axs = pp.subplots(ndevices, nplots, sharex=True, figsize=(10, 2 * ndevices))
for iu, device_id in enumerate(unique_devices):
locs = np.where(device_ids == device_id)[0]
for idx in range(nplots):
if ndevices > 1:
ax = axs[iu, idx]
else:
ax = axs[idx]
w_mu = MU[locs, idx, :]
w_std = STD[locs, idx, :]
for mu, std in zip(w_mu, w_std):
ax.fill_between(times, mu - 2 * std, mu + 2 * std, color="grey", alpha=0.1)
ax.plot(times, OBS[locs, idx, :].T, "r-", lw=1, alpha=1)
ax.plot(times, w_mu.T, predict_style, lw=1, alpha=0.75, color="k")
if fixYaxis:
ax.set_ylim(-0.2, 1.2)
if iu == ndevices - 1:
ax.set_xlabel("Time (h)")
if iu == 0:
ax.set_title(signal_names[idx])
if idx == 0:
ax.set_ylabel(device_names[device_id])
pp.tight_layout()
sns.despine()
return f
def plot_weighted_theta(
theta_names,
TR_iws,
TR_theta,
TR_device_ids,
VL_iws,
VL_theta,
VL_device_ids,
columns2use,
sample=True,
nsamples=100,
):
# make a dataframe so we can call seaborn scatter plot
order_ids = np.argsort(theta_names)
n_train, n_train_samples = TR_iws.shape
n_val, n_val_samples = VL_iws.shape
# resample with replacement
TR_samples = []
for iws in TR_iws:
if sample:
# sub-sample according to iws
samples = np.random.choice(n_train_samples, nsamples, p=iws)
else:
# sub-sample uniformly
samples = np.random.choice(n_train_samples, nsamples)
TR_samples.append(samples)
VL_samples = []
for iws in VL_iws:
if sample:
# sub-sample according to iws
samples = np.random.choice(n_val_samples, nsamples, p=iws)
else:
# sub-sample uniformly
samples = np.random.choice(n_val_samples, nsamples)
VL_samples.append(samples)
TR_devices = np.tile(TR_device_ids.reshape((n_train, 1)), [1, nsamples])
VL_devices = np.tile(VL_device_ids.reshape((n_val, 1)), [1, nsamples])
names = []
train_thetas = []
val_thetas = []
for theta_idx in order_ids:
theta_name = theta_names[theta_idx]
train_theta = []
val_theta = []
for samples, values in zip(TR_samples, TR_theta[theta_idx]):
train_theta.append(values[samples])
for samples, values in zip(VL_samples, VL_theta[theta_idx]):
val_theta.append(values[samples])
names.append(theta_name)
train_thetas.append(np.array(train_theta).flatten())
val_thetas.append(np.array(val_theta).flatten())
names.append("device")
train_thetas.append(TR_devices.flatten())
val_thetas.append(VL_devices.flatten())
train_thetas = np.array(train_thetas, dtype=float).T
tr_df = pd.DataFrame(train_thetas, columns=names)
sns.set(style="ticks")
g = sns.PairGrid(tr_df, hue="device", vars=columns2use)
g = g.map_diag(sns.kdeplot, shade=True, alpha=0.5)
g = g.map_offdiag(sns.scatterplot, s=20, alpha=0.25, edgecolor="k", linewidth=0.5)
g = g.add_legend()
return g.fig
def species_summary(
species_names, treatments, device_ids, times, iw_states, devices, settings, normalise=True,
):
"""Plot the simulated latent species"""
ndevices = len(devices)
nplots = iw_states.shape[1]
fs = 14
colors = "grbcmyk"
divisors = [np.max(iw_states[:, idx, :]) if normalise else 1.0 for idx in range(nplots)]
f, axs = pp.subplots(ndevices, nplots, sharex=True, sharey=normalise, figsize=(14, 2 * ndevices))
for iu, device_id in enumerate(devices):
for idx in range(nplots):
if ndevices == 1:
ax = axs[idx]
else:
ax = axs[iu, idx]
if settings.separate_conditions is True:
for i, _ in enumerate(settings.conditions):
locs = np.where((device_ids == device_id) & (treatments[:, i] > 0.0))[0]
mus = iw_states[locs, idx, :] / divisors[idx]
# alphas = treatments[locs,i] / treat_max
alphas = 0.5
ax.plot(
np.tile(times, [len(locs), 1]).T, mus.T, "-", lw=1, alpha=alphas, color=colors[i],
)
else:
locs = np.where(device_ids == device_id)[0]
mus = iw_states[locs, idx, :] / divisors[idx]
ax.plot(np.tile(times, [len(locs), 1]).T, mus.T, "-", lw=1, color="k")
if normalise:
ax.set_ylim(-0.1, 1.1)
if iu == 0:
if idx < len(species_names):
ax.set_title(species_names[idx])
else:
ax.set_title("Latent %d" % (idx - len(species_names)))
ax.set_xticks([0, 4, 8, 12, 16])
if ndevices == 1:
ax = axs[0]
else:
ax = axs[iu, 0]
ax.set_ylabel(
settings.pretty_devices[device_id], labelpad=20, fontweight="bold", fontsize=fs,
)
sns.despine()
pp.tight_layout()
# Global axis labels: add a big axis, then hide frame
f.add_subplot(111, frameon=False)
pp.tick_params(labelcolor="none", top=False, bottom=False, left=False, right=False)
pp.xlabel("Time (h)", fontsize=fs, labelpad=7)
if ndevices > 1:
pp.ylabel("Normalized output", fontsize=fs, labelpad=0)
else:
pp.ylabel("Norm. output", fontsize=fs, labelpad=0)
return f
def xval_treatments(res, devices):
"""Compare the final simulated points against the equivalent data-points to establish functional response"""
nplots = len(res.settings.signals)
ndev = len(devices)
ms = 5
fs = 14
obs_mk = "x"
pred_mk = "o"
colors = ["g", "r", "b"]
edges = ["darkgreen", "darkred", "darkblue"]
f, axs = pp.subplots(ndev, nplots, sharex=True, sharey=True, figsize=(9, 2.2 * ndev))
for iu, device_id in enumerate(devices):
locs = np.where(res.devices == device_id)[0]
input_values = []
for ci, _ in enumerate(res.settings.conditions):
vs = np.exp(res.treatments[:, ci]) - 1
input_values.append(vs[locs])
for j, signal in enumerate(res.settings.signals):
if ndev > 1:
ax = axs[iu, j]
else:
ax = axs[j]
mu = res.iw_predict_mu[locs, j, -1]
std = res.iw_predict_std[locs, j, -1]
for ci, cvalues in enumerate(input_values):
ax.errorbar(
cvalues, mu, yerr=std, fmt=pred_mk, ms=ms, lw=1, mec=edges[ci], color=colors[ci], zorder=ci,
)
ax.semilogx(
cvalues, res.X_obs[locs, j, -1], "k" + obs_mk, ms=ms, lw=1, color=edges[ci], zorder=ci + 20,
)
ax.set_ylim(-0.1, 1.1)
ax.tick_params(axis="both", which="major", labelsize=fs)
ax.set_xticks(np.logspace(0, 4, 3))
if j == 0:
ax.set_ylabel(
res.settings.devices[iu], labelpad=25, fontweight="bold", fontsize=fs,
)
if iu == 0:
ax.set_title(signal, fontsize=fs)
# Add legend to one of the panels
if ndev > 1:
ytext = "Normalized fluorescence"
ax = axs[0, nplots - 1]
else:
ytext = "Norm. fluorescence"
ax = axs[nplots - 1]
dstr = list(map(lambda s: s + " (data)", res.settings.conditions))
mstr = list(map(lambda s: s + " (model)", res.settings.conditions))
ax.legend(labels=dstr + mstr)
# Global axis labels: add a big axis, then hide frame
f.add_subplot(111, frameon=False)
pp.tick_params(labelcolor="none", top=False, bottom=False, left=False, right=False)
pp.xlabel(" / ".join(res.settings.conditions), fontsize=fs, labelpad=7)
# pp.xlabel("C$_6$ / C$_{12}$ (nM)", fontsize=fs, labelpad=7)
pp.ylabel(ytext, fontsize=fs, labelpad=7)
sns.despine()
return f
def xval_fit_summary(res, device_id, separatedInputs=False):
"""Summary plot of model-data fit for cross-validation results"""
nplots = len(res.settings.signals)
fs = 14
all_locs = []
if separatedInputs is True:
nrows = len(res.settings.conditions)
for i in range(nrows):
dev_locs = np.where((res.devices == device_id) & (res.treatments[:, i] > 0.0))[0]
_, indices = np.unique(res.treatments[dev_locs, i], return_index=True)
all_locs.append(dev_locs[indices])
f, axs = pp.subplots(nrows, nplots, sharex=True, sharey=True, figsize=(2.2 * nplots, 1.6 * nrows + 1.2),)
else:
nrows = 1
dev_locs = np.where(res.devices == device_id)[0]
_, indices = np.unique(res.treatments[dev_locs, :], return_index=True, axis=0)
all_locs.append(dev_locs[indices])
f, axs = pp.subplots(1, nplots, sharey=True, figsize=(2.2 * nplots, 2.8))
for i, locs in enumerate(all_locs):
colors = [cm.rainbow(x) for x in np.linspace(0, 1, np.shape(locs)[0])] # pylint: disable=no-member
for idx in range(nplots):
if nrows > 1:
ax = axs[i, idx]
else:
ax = axs[idx]
w_mu = res.iw_predict_mu[locs, idx, :]
w_std = res.iw_predict_std[locs, idx, :]
ax.set_prop_cycle("color", colors)
for mu, std in zip(w_mu, w_std):
ax.fill_between(res.times, mu - 2 * std, mu + 2 * std, alpha=0.1)
ax.plot(res.times, res.X_obs[locs, idx, :].T, ".", alpha=1, markersize=2)
ax.plot(res.times, w_mu.T, "-", lw=2, alpha=0.75)
ax.set_xlim(0.0, 17)
ax.set_xticks([0, 5, 10, 15])
ax.set_ylim(-0.2, 1.2)
if (idx == 0) & (nrows > 1):
ax.set_ylabel(
res.settings.conditions[i] + " dilution", labelpad=25, fontweight="bold", fontsize=fs,
)
if i == 0:
ax.set_title(res.settings.signals[idx], fontsize=fs)
# Global axis labels: add a big axis, then hide frame
f.add_subplot(111, frameon=False)
pp.tick_params(labelcolor="none", top=False, bottom=False, left=False, right=False)
pp.xlabel("Time (h)", fontsize=fs, labelpad=7)
pp.ylabel("Normalized output", fontsize=fs, labelpad=7)
pp.tight_layout()
sns.despine()
return f
def gen_treatment_str(conditions, treatments, unit=None):
vstr_list = []
for k, v in zip(conditions, treatments):
val = np.exp(v) - 1.0
if (val > 0.0) & (val < 1.0):
vstr = "%s = %1.1f" % (k, val)
else:
vstr = "%s = %1.0f" % (k, val)
if unit is not None:
vstr = "%s %s" % (vstr, unit)
vstr_list.append(vstr)
return "\n".join(vstr_list)
def xval_individual(res, device_id):
nplots = res.X_obs.shape[1]
colors = ["tab:gray", "r", "y", "c"]
maxs = np.max(res.X_obs, axis=(0, 2))
fs = 14
locs = np.where(res.devices == device_id)[0]
ids = np.argsort(res.ids[locs])
locs = locs[ids]
ntreatments = len(locs)
nrows = int(np.ceil(ntreatments / 2.0))
f = pp.figure(figsize=(12, 1.2 * nrows))
for col in range(2):
left = 0.1 + col * 0.5
bottom = 0.4 / nrows
width = 0.33 / nplots
dx = 0.38 / nplots
dy = (1 - bottom) / nrows
height = 0.8 * dy
for i in range(nrows):
loc = locs[i + col * nrows]
treatment_str = gen_treatment_str(res.settings.conditions, res.treatments[loc])
for idx, maxi in enumerate(maxs):
ax = f.add_subplot(nrows, 2 * nplots, col * nplots + (nrows - i - 1) * 2 * nplots + idx + 1,)
ax.set_position([left + idx * dx, bottom + (nrows - i - 1) * dy, width, height])
mu = res.iw_predict_mu[loc, idx, :]
std = res.iw_predict_std[loc, idx, :]
ax.fill_between(
res.times, (mu - 2 * std) / maxi, (mu + 2 * std) / maxi, alpha=0.25, color=colors[idx],
)
ax.plot(res.times, res.X_obs[loc, idx, :] / maxi, "k.", markersize=2)
ax.plot(res.times, mu / maxi, "-", lw=2, alpha=0.75, color=colors[idx])
ax.set_xlim(0.0, 17)
ax.set_xticks([0, 5, 10, 15])
ax.set_ylim(-0.2, 1.2)
ax.tick_params(axis="both", which="major", labelsize=fs)
if i == 0:
pp.title(res.settings.signals[idx], fontsize=fs)
# if i<nrows-1:
ax.set_xticklabels([])
if idx == 0:
ax.set_ylabel(treatment_str, labelpad=25, fontsize=fs - 2)
else:
ax.set_yticklabels([])
# Add labels
f.text(
left - 0.35 * dx, 0.5, "Normalized output", ha="center", va="center", rotation=90, fontsize=fs,
)
f.text(left + 2 * dx, 0, "Time (h)", ha="center", va="bottom", fontsize=fs)
sns.despine()
return f
def xval_individual_2treatments(res, device_id):
"""Multi-panel plot for each sample, with treatments separated into 2 groups"""
nplots = res.X_obs.shape[1]
colors = ["tab:gray", "r", "y", "c"]
maxs = np.max(res.X_obs, axis=(0, 2))
fs = 14
both_locs = []
for col in range(2):
all_locs = np.where((res.devices == device_id) & (res.treatments[:, col] > 0.0))[0]
indices = np.argsort(res.treatments[all_locs, col])
both_locs.append(all_locs[indices])
ntreatments = max(map(len, both_locs))
f = pp.figure(figsize=(12, 1.5 * ntreatments))
for col, locs in enumerate(both_locs):
left = 0.1 + col * 0.5
bottom = 0.4 / ntreatments
width = 0.33 / nplots
dx = 0.38 / nplots
dy = (1 - bottom) / ntreatments
height = 0.8 * dy
for i, loc in enumerate(locs[:ntreatments]):
# TODO(ndalchau): Incorporate units into conditions specification (here we assume nM)
treatment_str = gen_treatment_str(res.settings.conditions, res.treatments[loc], unit="nM")
for idx, maxi in enumerate(maxs):
ax = f.add_subplot(
ntreatments, 2 * nplots, col * nplots + (ntreatments - i - 1) * 2 * nplots + idx + 1,
)
ax.set_position(
[left + idx * dx, bottom + (ntreatments - i - 1) * dy, width, height]
)
mu = res.iw_predict_mu[loc, idx, :]
std = res.iw_predict_std[loc, idx, :]
ax.fill_between(
res.times, (mu - 2 * std) / maxi, (mu + 2 * std) / maxi, alpha=0.25, color=colors[idx],
)
ax.plot(res.times, res.X_obs[loc, idx, :] / maxi, "k.", markersize=2)
ax.plot(res.times, mu / maxi, "-", lw=2, alpha=0.75, color=colors[idx])
ax.set_xlim(0.0, 17)
ax.set_xticks([0, 5, 10, 15])
ax.set_ylim(-0.2, 1.2)
ax.tick_params(axis="both", which="major", labelsize=fs)
if i == 0:
pp.title(res.settings.signals[idx], fontsize=fs)
if i < ntreatments - 1:
ax.set_xticklabels([])
if idx == 0:
ax.set_ylabel(treatment_str, labelpad=25, fontsize=fs - 2)
else:
ax.set_yticklabels([])
sns.despine()
# Add labels
f.text(
left - 0.35 * dx, 0.5, "Normalized output", ha="center", va="center", rotation=90, fontsize=fs,
)
f.text(left + 2 * dx, 0, "Time (h)", ha="center", va="bottom", fontsize=fs)
return f
def combined_treatments(results, devices):
"""Compare model-data functional responses to inputs for multiple models"""
ndev = len(devices)
nres = len(results)
ms = 5
fs = 14
obs_mk = "x"
pred_mk = "o"
width = 0.2
lefts = [0.05, 0.57]
bottom = 0.3 / ndev
dx = 0.23
dy = (1 - bottom) / ndev
height = 0.9 * dy
c6_idx = 1
c12_idx = 0
ids = [2, 3]
colors = ["y", "c"]
f, ax = pp.subplots(ndev, 2 * nres, sharex=True, figsize=(9, 2.2 * ndev + 0.5))
for iu, device_id in enumerate(devices):
if ndev == 1:
row = ax
ytext = "Norm. fluorescence"
else:
row = ax[iu]
ytext = "Normalized fluorescence"
row[0].set_ylabel(results[0].pretty_devices[iu], labelpad=25, fontweight="bold", fontsize=fs)
for ir, res in enumerate(results):
locs = np.where(res.devices == device_id)[0]
OBS = np.transpose(res.X_obs[locs, -1, :], [1, 0])
IW = res.importance_weights[locs]
PREDICT =
|
np.transpose(res.PREDICT[locs, :], [2, 0, 1])
|
numpy.transpose
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.layers as layers
from paddle.fluid.framework import Program, program_guard
from functools import partial
import paddle.fluid.optimizer as optimizer
class TestAPICase(unittest.TestCase):
def test_return_single_var(self):
def fn_1():
return layers.fill_constant(shape=[4, 2], dtype='int32', value=1)
def fn_2():
return layers.fill_constant(shape=[4, 2], dtype='int32', value=2)
def fn_3():
return layers.fill_constant(shape=[4, 3], dtype='int32', value=3)
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)
pred_2 = layers.less_than(x, y) # false: 0.3 < 0.1
pred_1 = layers.less_than(z, x) # true: 0.2 < 0.3
# call fn_1
out_0 = layers.case(pred_fn_pairs=[(pred_1, fn_1), (pred_1, fn_2)],
default=fn_3)
# call fn_2
out_1 = layers.case(pred_fn_pairs=[(pred_2, fn_1), (pred_1, fn_2)],
default=fn_3)
# call default fn_3
out_2 = layers.case(pred_fn_pairs=((pred_2, fn_1), (pred_2, fn_2)),
default=fn_3)
# no default, call fn_2
out_3 = layers.case(pred_fn_pairs=[(pred_1, fn_2)])
# no default, call fn_2. but pred_2 is false
out_4 = layers.case(pred_fn_pairs=[(pred_2, fn_2)])
place = fluid.CUDAPlace(
0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place)
res = exe.run(main_program,
fetch_list=[out_0, out_1, out_2, out_3, out_4])
self.assertTrue(np.allclose(res[0], 1))
self.assertTrue(np.allclose(res[1], 2))
self.assertTrue(np.allclose(res[2], 3))
self.assertTrue(np.allclose(res[3], 2))
self.assertTrue(np.allclose(res[4], 2))
def test_return_var_tuple(self):
def fn_1():
return layers.fill_constant(shape=[1, 2], dtype='int32',
value=1), layers.fill_constant(
shape=[2, 3],
dtype='float32',
value=2)
def fn_2():
return layers.fill_constant(shape=[3, 4], dtype='int32',
value=3), layers.fill_constant(
shape=[4, 5],
dtype='float32',
value=4)
def fn_3():
return layers.fill_constant(shape=[5], dtype='int32',
value=5), layers.fill_constant(
shape=[5, 6],
dtype='float32',
value=6)
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=1)
y = layers.fill_constant(shape=[1], dtype='float32', value=1)
z = layers.fill_constant(shape=[1], dtype='float32', value=3)
pred_1 = layers.equal(x, y) # true
pred_2 = layers.equal(x, z) # false
out = layers.case(((pred_1, fn_1), (pred_2, fn_2)), fn_3)
place = fluid.CUDAPlace(
0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place)
ret = exe.run(main_program, fetch_list=out)
self.assertTrue(
np.allclose(np.asarray(ret[0]), np.full((1, 2), 1, np.int32)))
self.assertTrue(
np.allclose(np.asarray(ret[1]), np.full((2, 3), 2, np.float32)))
class TestAPICase_Nested(unittest.TestCase):
def test_nested_case(self):
def fn_1(x=1):
var_5 = layers.fill_constant(shape=[1], dtype='int32', value=5)
var_6 = layers.fill_constant(shape=[1], dtype='int32', value=6)
out = layers.case(pred_fn_pairs=[
(var_5 < var_6,
partial(
layers.fill_constant, shape=[1], dtype='int32', value=x)),
(var_5 == var_6,
partial(
layers.fill_constant, shape=[2], dtype='int32', value=x))
])
return out
def fn_2(x=2):
var_5 = layers.fill_constant(shape=[1], dtype='int32', value=5)
var_6 = layers.fill_constant(shape=[1], dtype='int32', value=6)
out = layers.case(pred_fn_pairs=[
(var_5 < var_6, partial(fn_1, x=x)),
(var_5 == var_6,
partial(
layers.fill_constant, shape=[2], dtype='int32', value=x))
])
return out
def fn_3():
var_5 = layers.fill_constant(shape=[1], dtype='int32', value=5)
var_6 = layers.fill_constant(shape=[1], dtype='int32', value=6)
out = layers.case(pred_fn_pairs=[
(var_5 < var_6, partial(fn_2, x=3)),
(var_5 == var_6,
partial(
layers.fill_constant, shape=[2], dtype='int32', value=7))
])
return out
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)
pred_2 = layers.less_than(x, y) # false: 0.3 < 0.1
pred_1 = layers.less_than(z, x) # true: 0.2 < 0.3
out_1 = layers.case(pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)],
default=fn_3)
out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_1), (pred_1, fn_2)],
default=fn_3)
out_3 = layers.case(pred_fn_pairs=[(x == y, fn_1), (x == z, fn_2)],
default=fn_3)
place = fluid.CUDAPlace(
0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place)
res = exe.run(main_program, fetch_list=[out_1, out_2, out_3])
self.assertTrue(np.allclose(res[0], 1))
self.assertTrue(
|
np.allclose(res[1], 2)
|
numpy.allclose
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-present : DESY PHOTON SCIENCE
# authors:
# <NAME>, <EMAIL>
import tkinter as tk
from tkinter import filedialog
import matplotlib.ticker as ticker
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from scipy.interpolate import interp1d
from scipy.signal import convolve
from bcdi.utils import image_registration as reg
helptext = """
Template for figures of the following article:
Carnis et al. Scientific Reports 9, 17357 (2019)
https://doi.org/10.1038/s41598-019-53774-2
Open the simulated amp_dist_strain.npz file and the reconstructed
amp_dist_strain.npz, and plot difference maps
"""
scan = 2227 # spec scan number
datadir = (
"G:/review paper/BCDI_isosurface/S"
+ str(scan)
+ "/simu/crop400phase/no_apodization/avg1/"
)
savedir = (
"G:/review paper/BCDI_isosurface/New figures/isosurface/no_apodization/avg1_new/"
)
voxel_size = 3.0 # in nm
tick_spacing = 50 # for plots, in nm
planar_dist = 0.2269735 # in nm, for strain calculation
field_of_view = (
500 # in nm, should not be larger than the total width at the moment (no padding)
)
tick_direction = "in" # 'out', 'in', 'inout'
tick_length = 6 # in plots
tick_width = 2 # in plots
strain_range = 0.002 # for plots
phase_range = np.pi # for plots
support_threshold = 0.7 # threshold for support determination
min_amp = 0.01 # everything with lower amplitude will be set to np.nan in plots
debug = 0 # 1 to show all plots
save_YZ = 0 # 1 to save the strain in YZ plane
save_XZ = 1 # 1 to save the strain in XZ plane
save_XY = 1 # 1 to save the strain in XY plane
comment = "_iso" + str(support_threshold) # should start with _
comment = comment + "_strainrange_" + str(strain_range)
######################################
# define a colormap
cdict = {
"red": (
(0.0, 1.0, 1.0),
(0.11, 0.0, 0.0),
(0.36, 0.0, 0.0),
(0.62, 1.0, 1.0),
(0.87, 1.0, 1.0),
(1.0, 0.0, 0.0),
),
"green": (
(0.0, 1.0, 1.0),
(0.11, 0.0, 0.0),
(0.36, 1.0, 1.0),
(0.62, 1.0, 1.0),
(0.87, 0.0, 0.0),
(1.0, 0.0, 0.0),
),
"blue": (
(0.0, 1.0, 1.0),
(0.11, 1.0, 1.0),
(0.36, 1.0, 1.0),
(0.62, 0.0, 0.0),
(0.87, 0.0, 0.0),
(1.0, 0.0, 0.0),
),
}
my_cmap = LinearSegmentedColormap("my_colormap", cdict, 256)
my_cmap.set_bad(color="0.7")
def calc_coordination(mysupport, debugging=0):
"""Calculate the coordination number of the support using a 3x3x3 kernel."""
nbz, nby, nbx = mysupport.shape
mykernel = np.ones((3, 3, 3))
mycoord = np.rint(convolve(mysupport, mykernel, mode="same"))
mycoord = mycoord.astype(int)
if debugging == 1:
plt.figure(figsize=(18, 15))
plt.subplot(2, 2, 1)
plt.imshow(mycoord[:, :, nbx // 2])
plt.colorbar()
plt.axis("scaled")
plt.title("Coordination matrix in middle slice in YZ")
plt.subplot(2, 2, 2)
plt.imshow(mycoord[:, nby // 2, :])
plt.colorbar()
plt.title("Coordination matrix in middle slice in XZ")
plt.axis("scaled")
plt.subplot(2, 2, 3)
plt.imshow(mycoord[nbz // 2, :, :])
plt.gca().invert_yaxis()
plt.colorbar()
plt.title("Coordination matrix in middle slice in XY")
plt.axis("scaled")
plt.pause(0.1)
return mycoord
def crop_pad(myobj, myshape, debugging=0):
"""
Crop or pad my obj depending on myshape.
:param myobj: 3d complex array to be padded
:param myshape: list of desired output shape [z, y, x]
:param debugging: to plot myobj before and after rotation
:return: myobj padded with zeros
"""
nbz, nby, nbx = myobj.shape
newz, newy, newx = myshape
if debugging == 1:
plt.figure(figsize=(18, 15))
plt.subplot(2, 2, 1)
plt.imshow(abs(myobj)[:, :, nbx // 2], vmin=0, vmax=1)
plt.colorbar()
plt.axis("scaled")
plt.title("Middle slice in YZ before padding")
plt.subplot(2, 2, 2)
plt.imshow(abs(myobj)[:, nby // 2, :], vmin=0, vmax=1)
plt.colorbar()
plt.title("Middle slice in XZ before padding")
plt.axis("scaled")
plt.subplot(2, 2, 3)
plt.imshow(abs(myobj)[nbz // 2, :, :], vmin=0, vmax=1)
plt.gca().invert_yaxis()
plt.colorbar()
plt.title("Middle slice in XY before padding")
plt.axis("scaled")
plt.pause(0.1)
# z
if newz >= nbz: # pad
temp_z = np.zeros((myshape[0], nby, nbx), dtype=myobj.dtype)
temp_z[(newz - nbz) // 2 : (newz + nbz) // 2, :, :] = myobj
else: # crop
temp_z = myobj[(nbz - newz) // 2 : (newz + nbz) // 2, :, :]
# y
if newy >= nby: # pad
temp_y = np.zeros((newz, newy, nbx), dtype=myobj.dtype)
temp_y[:, (newy - nby) // 2 : (newy + nby) // 2, :] = temp_z
else: # crop
temp_y = temp_z[:, (nby - newy) // 2 : (newy + nby) // 2, :]
# x
if newx >= nbx: # pad
newobj = np.zeros((newz, newy, newx), dtype=myobj.dtype)
newobj[:, :, (newx - nbx) // 2 : (newx + nbx) // 2] = temp_y
else: # crop
newobj = temp_y[:, :, (nbx - newx) // 2 : (newx + nbx) // 2]
if debugging == 1:
plt.figure(figsize=(18, 15))
plt.subplot(2, 2, 1)
plt.imshow(abs(newobj)[:, :, newx // 2], vmin=0, vmax=1)
plt.colorbar()
plt.axis("scaled")
plt.title("Middle slice in YZ after padding")
plt.subplot(2, 2, 2)
plt.imshow(abs(newobj)[:, newy // 2, :], vmin=0, vmax=1)
plt.colorbar()
plt.title("Middle slice in XZ after padding")
plt.axis("scaled")
plt.subplot(2, 2, 3)
plt.imshow(abs(newobj)[newz // 2, :, :], vmin=0, vmax=1)
plt.gca().invert_yaxis()
plt.colorbar()
plt.title("Middle slice in XY after padding")
plt.axis("scaled")
plt.pause(0.1)
return newobj
plt.ion()
root = tk.Tk()
root.withdraw()
#######################################
pixel_spacing = tick_spacing / voxel_size
pixel_FOV = int(
np.rint((field_of_view / voxel_size) / 2)
) # half-number of pixels corresponding to the FOV
##########################
# open simulated amp_phase_strain.npz
##########################
file_path = filedialog.askopenfilename(
initialdir=datadir, title="Select simulation file", filetypes=[("NPZ", "*.npz")]
)
print("Opening ", file_path)
npzfile = np.load(file_path)
amp_simu = npzfile["amp"]
bulk_simu = npzfile["bulk"]
strain_simu = npzfile["strain"]
phase_simu = npzfile["phase"] # ['displacement']
numz, numy, numx = amp_simu.shape
print("SIMU: Initial data size: (", numz, ",", numy, ",", numx, ")")
strain_simu[amp_simu == 0] = np.nan
phase_simu[amp_simu == 0] = np.nan
##########################
# open phased amp_phase_strain.npz
##########################
file_path = filedialog.askopenfilename(
initialdir=datadir, title="Select phased file", filetypes=[("NPZ", "*.npz")]
)
print("Opening ", file_path)
npzfile = np.load(file_path)
amp = npzfile["amp"]
phase = npzfile["phase"]
obj = amp * np.exp(1j * phase)
del amp, phase
numz, numy, numx = obj.shape
print("Phased: Initial data size: (", numz, ",", numy, ",", numx, ")")
obj = crop_pad(obj, amp_simu.shape)
numz, numy, numx = obj.shape
print("Cropped/padded size: (", numz, ",", numy, ",", numx, ")")
plt.figure()
plt.imshow(
np.angle(obj)[numz // 2, :, :], cmap=my_cmap, vmin=-phase_range, vmax=phase_range
)
plt.title("Phase before subpixel shift")
plt.pause(0.1)
##############################
# align datasets
##############################
# dft registration and subpixel shift (see Matlab code)
shiftz, shifty, shiftx = reg.getimageregistration(amp_simu, abs(obj), precision=1000)
obj = reg.subpixel_shift(obj, shiftz, shifty, shiftx)
print(
"Shift calculated from dft registration: (",
str("{:.2f}".format(shiftz)),
",",
str("{:.2f}".format(shifty)),
",",
str("{:.2f}".format(shiftx)),
") pixels",
)
new_amp = abs(obj)
new_phase = np.angle(obj)
del obj
_, new_strain, _ = np.gradient(planar_dist / (2 * np.pi) * new_phase, voxel_size)
# q is along y after rotating the crystal
plt.figure()
plt.imshow(
new_phase[numz // 2, :, :], cmap=my_cmap, vmin=-phase_range, vmax=phase_range
)
plt.title("Phase after subpixel shift")
plt.pause(0.1)
del new_phase
plt.figure()
plt.imshow(
new_strain[numz // 2, :, :], cmap=my_cmap, vmin=-strain_range, vmax=strain_range
)
plt.title("Strain after subpixel shift")
plt.pause(0.1)
new_amp = (
new_amp / new_amp.max()
) # need to renormalize after subpixel shift interpolation
new_amp[new_amp < min_amp] = 0
new_strain[new_amp == 0] = 0
##############################
# plot simulated phase
##############################
masked_array = np.ma.array(phase_simu, mask=np.isnan(phase_simu))
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2,
],
vmin=-phase_range,
vmax=phase_range,
cmap=my_cmap,
)
ax0.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax0.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax0.tick_params(
labelbottom=False,
labelleft=False,
top=True,
right=True,
direction=tick_direction,
length=tick_length,
width=tick_width,
)
if save_YZ == 1:
plt.savefig(savedir + "simu_phase_YZ.png", bbox_inches="tight")
fig, ax1 = plt.subplots(1, 1)
plt1 = ax1.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
vmin=-phase_range,
vmax=phase_range,
cmap=my_cmap,
)
ax1.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax1.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax1.tick_params(
labelbottom=False,
labelleft=False,
top=True,
right=True,
direction=tick_direction,
length=tick_length,
width=tick_width,
)
if save_XZ == 1:
plt.savefig(savedir + "simu_phase_XZ.png", bbox_inches="tight")
fig, ax2 = plt.subplots(1, 1)
plt2 = ax2.imshow(
masked_array[
numz // 2,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
vmin=-phase_range,
vmax=phase_range,
cmap=my_cmap,
)
ax2.invert_yaxis()
ax2.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax2.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax2.tick_params(
labelbottom=False,
labelleft=False,
top=True,
right=True,
direction=tick_direction,
length=tick_length,
width=tick_width,
)
if save_XY == 1:
plt.savefig(savedir + "simu_phase_XY.png", bbox_inches="tight")
plt.colorbar(plt2, ax=ax2)
plt.savefig(savedir + "simu_phase_XY_colorbar.png", bbox_inches="tight")
##############################
# plot amplitudes
##############################
hist, bin_edges = np.histogram(new_amp[new_amp > min_amp].flatten(), bins=250)
bin_step = (bin_edges[1] - bin_edges[0]) / 2
bin_axis = bin_edges + bin_step
bin_axis = bin_axis[0 : len(hist)]
# hist = medfilt(hist, kernel_size=3)
interpolation = interp1d(bin_axis, hist, kind="cubic")
interp_points = 1 * len(hist)
interp_axis = np.linspace(bin_axis.min(), bin_axis.max(), interp_points)
inter_step = interp_axis[1] - interp_axis[0]
interp_curve = interpolation(interp_axis)
fig, ax = plt.subplots(1, 1)
plt.hist(new_amp[new_amp > min_amp].flatten(), bins=250)
plt.xlim(left=min_amp)
plt.ylim(bottom=1)
ax.set_yscale("log")
ax.tick_params(
labelbottom=False,
labelleft=False,
direction="out",
length=tick_length,
width=tick_width,
)
plt.savefig(savedir + "phased_histogram_amp.png", bbox_inches="tight")
ax.tick_params(
labelbottom=True,
labelleft=True,
direction="out",
length=tick_length,
width=tick_width,
)
ax.spines["right"].set_linewidth(1.5)
ax.spines["left"].set_linewidth(1.5)
ax.spines["top"].set_linewidth(1.5)
ax.spines["bottom"].set_linewidth(1.5)
plt.savefig(savedir + "phased_histogram_amp_labels.png", bbox_inches="tight")
fig, ax0 = plt.subplots(1, 1)
plt.plot(amp_simu[numz // 2, 183, 128:136], "r")
plt.plot(new_amp[numz // 2, 183, 128:136], "k")
ax0.tick_params(
labelbottom=False,
labelleft=False,
direction="out",
top=True,
bottom=False,
length=tick_length,
width=tick_width,
)
ax0.spines["right"].set_linewidth(1.5)
ax0.spines["left"].set_linewidth(1.5)
ax0.spines["top"].set_linewidth(1.5)
ax0.spines["bottom"].set_linewidth(1.5)
plt.savefig(savedir + "linecut_amp.png", bbox_inches="tight")
if debug:
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(2, 2)
plt0 = ax0.imshow(
new_amp[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2,
],
cmap=my_cmap,
vmin=0,
vmax=1,
)
plt1 = ax1.imshow(
new_amp[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
cmap=my_cmap,
vmin=0,
vmax=1,
)
plt2 = ax2.imshow(
new_amp[
numz // 2,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
cmap=my_cmap,
vmin=0,
vmax=1,
)
ax2.invert_yaxis()
plt.title("new_amp")
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(
2,
2,
)
plt0 = ax0.imshow(
amp_simu[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2,
],
cmap=my_cmap,
vmin=0,
vmax=1,
)
plt1 = ax1.imshow(
amp_simu[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
cmap=my_cmap,
vmin=0,
vmax=1,
)
plt2 = ax2.imshow(
amp_simu[
numz // 2,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
cmap=my_cmap,
vmin=0,
vmax=1,
)
ax2.invert_yaxis()
plt.title("amp_simu")
diff_amp = (amp_simu - new_amp) * 100
diff_amp_copy = np.copy(diff_amp)
support = np.zeros(amp_simu.shape)
support[np.nonzero(amp_simu)] = 1
support[np.nonzero(new_amp)] = 1
# the support will have the size of the largest object
# between the simulation and the reconstruction
diff_amp_copy[support == 0] = np.nan
masked_array = np.ma.array(diff_amp_copy, mask=np.isnan(diff_amp_copy))
if debug:
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(2, 2)
plt0 = ax0.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2,
],
cmap=my_cmap,
vmin=-100,
vmax=100,
)
plt.colorbar(plt0, ax=ax0)
plt1 = ax1.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
cmap=my_cmap,
vmin=-100,
vmax=100,
)
plt.colorbar(plt1, ax=ax1)
plt2 = ax2.imshow(
masked_array[
numz // 2,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
cmap=my_cmap,
vmin=-100,
vmax=100,
)
ax2.invert_yaxis()
plt.colorbar(plt2, ax=ax2)
plt.title("(amp_simu - new_amp)*100")
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2,
],
vmin=-100,
vmax=100,
cmap=my_cmap,
)
ax0.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax0.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax0.tick_params(
labelbottom=False,
labelleft=False,
top=True,
right=True,
direction=tick_direction,
length=tick_length,
width=tick_width,
)
if save_YZ == 1:
plt.savefig(savedir + "diff_amp_YZ.png", bbox_inches="tight")
fig, ax1 = plt.subplots(1, 1)
plt1 = ax1.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
vmin=-100,
vmax=100,
cmap=my_cmap,
)
ax1.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax1.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax1.tick_params(
labelbottom=False,
labelleft=False,
top=True,
right=True,
direction=tick_direction,
length=tick_length,
width=tick_width,
)
if save_XZ == 1:
plt.savefig(savedir + "diff_amp_XZ.png", bbox_inches="tight")
fig, ax2 = plt.subplots(1, 1)
plt2 = ax2.imshow(
masked_array[
numz // 2,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
vmin=-100,
vmax=100,
cmap=my_cmap,
)
ax2.invert_yaxis()
ax2.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax2.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax2.tick_params(
labelbottom=False,
labelleft=False,
top=True,
right=True,
direction=tick_direction,
length=tick_length,
width=tick_width,
)
if save_XY == 1:
plt.savefig(savedir + "diff_amp_XY.png", bbox_inches="tight")
plt.colorbar(plt2, ax=ax2)
plt.savefig(savedir + "diff_amp_XY_colorbar.png", bbox_inches="tight")
del diff_amp_copy
support[amp_simu == 0] = 0 # redefine the support as the simulated object
##############################
# plot individual strain maps
##############################
new_strain_copy = np.copy(new_strain)
new_strain_copy[new_amp == 0] = np.nan
masked_array = np.ma.array(new_strain_copy, mask=np.isnan(new_strain_copy))
if debug:
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(2, 2)
plt0 = ax0.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2,
],
cmap=my_cmap,
vmin=-strain_range,
vmax=strain_range,
)
plt.colorbar(plt0, ax=ax0)
plt1 = ax1.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
cmap=my_cmap,
vmin=-strain_range,
vmax=strain_range,
)
plt.colorbar(plt1, ax=ax1)
plt2 = ax2.imshow(
masked_array[
numz // 2,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
cmap=my_cmap,
vmin=-strain_range,
vmax=strain_range,
)
ax2.invert_yaxis()
plt.colorbar(plt2, ax=ax2)
plt.title("new_strain")
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2,
],
vmin=-strain_range,
vmax=strain_range,
cmap=my_cmap,
)
ax0.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax0.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax0.tick_params(
labelbottom=False,
labelleft=False,
top=True,
right=True,
direction=tick_direction,
length=tick_length,
width=tick_width,
)
if save_YZ == 1:
plt.savefig(savedir + "phased_strain_YZ" + comment + ".png", bbox_inches="tight")
fig, ax1 = plt.subplots(1, 1)
plt1 = ax1.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
vmin=-strain_range,
vmax=strain_range,
cmap=my_cmap,
)
ax1.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax1.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax1.tick_params(
labelbottom=False,
labelleft=False,
top=True,
right=True,
direction=tick_direction,
length=tick_length,
width=tick_width,
)
if save_XZ == 1:
plt.savefig(savedir + "phased_strain_XZ" + comment + ".png", bbox_inches="tight")
fig, ax2 = plt.subplots(1, 1)
plt2 = ax2.imshow(
masked_array[
numz // 2,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
vmin=-strain_range,
vmax=strain_range,
cmap=my_cmap,
)
ax2.invert_yaxis()
ax2.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax2.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax2.tick_params(
labelbottom=False,
labelleft=False,
top=True,
right=True,
direction=tick_direction,
length=tick_length,
width=tick_width,
)
if save_XY == 1:
plt.savefig(savedir + "phased_strain_XY" + comment + ".png", bbox_inches="tight")
plt.colorbar(plt2, ax=ax2)
plt.savefig(
savedir + "phased_strain_XY" + comment + "_colorbar.png", bbox_inches="tight"
)
del new_strain_copy
strain_simu[
bulk_simu == 0
] = np.nan # remove the non-physical outer layer for simulated strain
masked_array = np.ma.array(strain_simu, mask=np.isnan(strain_simu))
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(2, 2)
plt0 = ax0.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2,
],
cmap=my_cmap,
vmin=-strain_range,
vmax=strain_range,
)
plt.colorbar(plt0, ax=ax0)
plt1 = ax1.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
cmap=my_cmap,
vmin=-strain_range,
vmax=strain_range,
)
plt.colorbar(plt1, ax=ax1)
plt2 = ax2.imshow(
masked_array[
numz // 2,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
cmap=my_cmap,
vmin=-strain_range,
vmax=strain_range,
)
plt.colorbar(plt2, ax=ax2)
ax2.invert_yaxis()
plt.title("strain_simu")
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2,
],
vmin=-strain_range,
vmax=strain_range,
cmap=my_cmap,
)
ax0.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax0.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax0.tick_params(
labelbottom=False,
labelleft=False,
top=True,
right=True,
direction=tick_direction,
length=tick_length,
width=tick_width,
)
if save_YZ == 1:
plt.savefig(savedir + "simu_strain_YZ" + comment + ".png", bbox_inches="tight")
fig, ax1 = plt.subplots(1, 1)
plt1 = ax1.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
vmin=-strain_range,
vmax=strain_range,
cmap=my_cmap,
)
ax1.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax1.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax1.tick_params(
labelbottom=False,
labelleft=False,
top=True,
right=True,
direction=tick_direction,
length=tick_length,
width=tick_width,
)
if save_XZ == 1:
plt.savefig(savedir + "simu_strain_XZ" + comment + ".png", bbox_inches="tight")
fig, ax2 = plt.subplots(1, 1)
plt2 = ax2.imshow(
masked_array[
numz // 2,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
vmin=-strain_range,
vmax=strain_range,
cmap=my_cmap,
)
ax2.invert_yaxis()
ax2.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax2.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax2.tick_params(
labelbottom=False,
labelleft=False,
top=True,
right=True,
direction=tick_direction,
length=tick_length,
width=tick_width,
)
if save_XY == 1:
plt.savefig(savedir + "simu_strain_XY" + comment + ".png", bbox_inches="tight")
plt.colorbar(plt2, ax=ax2)
plt.savefig(savedir + "simu_strain_XY" + comment + "_colorbar.png", bbox_inches="tight")
##############################
# plot difference strain maps
##############################
diff_strain = strain_simu - new_strain
diff_strain[
support == 0
] = (
np.nan
) # the support is 0 outside of the simulated object, strain is not defined there
masked_array = np.ma.array(diff_strain, mask=np.isnan(diff_strain))
if debug:
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(2, 2)
plt0 = ax0.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2,
],
cmap=my_cmap,
vmin=-strain_range,
vmax=strain_range,
)
plt.colorbar(plt0, ax=ax0)
plt1 = ax1.imshow(
masked_array[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
cmap=my_cmap,
vmin=-strain_range,
vmax=strain_range,
)
plt.colorbar(plt1, ax=ax1)
plt2 = ax2.imshow(
masked_array[
numz // 2,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
],
cmap=my_cmap,
vmin=-strain_range,
vmax=strain_range,
)
ax2.invert_yaxis()
plt.colorbar(plt2, ax=ax2)
plt.title("(strain_simu - new_strain) on full data")
phased_support = np.ones(amp_simu.shape)
phased_support[new_amp < support_threshold] = 0
if debug:
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(2, 2)
plt0 = ax0.imshow(
phased_support[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2,
]
)
plt.colorbar(plt0, ax=ax0)
plt1 = ax1.imshow(
phased_support[
numz // 2 - pixel_FOV : numz // 2 + pixel_FOV,
numy // 2,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
]
)
plt.colorbar(plt1, ax=ax1)
plt2 = ax2.imshow(
phased_support[
numz // 2,
numy // 2 - pixel_FOV : numy // 2 + pixel_FOV,
numx // 2 - pixel_FOV : numx // 2 + pixel_FOV,
]
)
ax2.invert_yaxis()
plt.colorbar(plt2, ax=ax2)
plt.title("Phased support")
diff_strain[
phased_support == 0
] = np.nan # exclude also layers outside of the isosurface for the reconstruction
masked_array = np.ma.array(diff_strain, mask=
|
np.isnan(diff_strain)
|
numpy.isnan
|
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow import keras
from tensorflow.keras.layers import Activation, Dense
from tensorflow.keras.optimizers import RMSprop
from sklearn.impute import SimpleImputer
from sklearn import preprocessing
from sklearn.decomposition import PCA
import os.path
import sys
import scipy.io as sio
import functions
import matplotlib
matplotlib.rcParams["backend"] = "TkAgg"
import matplotlib.pyplot as plt
stacked_train1 = np.load("stacked_train1.npy")
stacked_train2 = np.load("stacked_train2.npy")
stacked_train3 = np.load("stacked_train3.npy")
stacked_train4 = np.load("stacked_train4.npy")
stacked_train = np.vstack((stacked_train1, stacked_train2))
stacked_train = np.vstack((stacked_train, stacked_train3))
stacked_train = np.vstack((stacked_train, stacked_train3))
stacked_train = np.vstack((stacked_train, stacked_train4))
pca = PCA(n_components=40)
pca.fit(stacked_train)
evr =
|
np.zeros(40)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
The work is licensed under the MIT licence, for more details:
https://github.com/LiorReznik/Mde-release-1/blob/master/LICENSE
"""
import numpy as np
import os, pickle
from collections import OrderedDict
import urllib,zipfile
from stanza.server import CoreNLPClient
from singleton import Singleton
class DataPreparation(metaclass=Singleton):
def __init__(self,logger):
self.logger = logger
self.doanload_manager
self.name = "wolfram"
def __call__(self,data:list,depth:str):
"""
method to manage all the preprocessing pipe
Parameters
----------
data : list
raw data to be preprocessed.
depth : str
the depth of preprocessing.
Returns
-------
numpy
preprocessed data.
"""
print(depth)
self.__instances = data
# init the preprocessed vocab
self.preprocess = {"X": [], "M": [], "ML": [], "MLD": [], "NONE": [],
"depth": depth.lower(), "maxlen": 0,'deps2ids': OrderedDict()}
self.preprocessing_data
return self.preprocess['X']
@property
def doanload_manager(self):
"""
method to manage the download and extraction of
stanfordcoreNLP server and fastText vectors
Returns
-------
None.
"""
if not os.path.exists("./data"):
os.makedirs("./data")
for path in (( "http://nlp.stanford.edu/software/stanford-corenlp-4.0.0.zip",
"./data/corenlp.zip","./data","stanford-corenlp-4.0.0",True),
( "http://nlp.stanford.edu/software/stanford-corenlp-4.0.0-models-english.jar",
"./data/stanford-corenlp-4.0.0/stanford-corenlp-4.0.0-models-english.jar","./data/stanford-corenlp-4.0.0","stanford-corenlp-4.0.0-models-english.jar",False),
( "http://nlp.stanford.edu/software/stanford-corenlp-4.0.0-models-english-kbp.jar",
"./data/stanford-corenlp-4.0.0/stanford-corenlp-4.0.0-models-english-kbp.jar","./data/stanford-corenlp-4.0.0","stanford-corenlp-4.0.0-models-english-kbp.jar",False),
( "https://dl.fbaipublicfiles.com/fasttext/vectors-english/wiki-news-300d-1M.vec.zip",
"./data/fastText.zip","./data","wiki-news-300d-1M.vec",True),
):
self.download_resorces(download_path=path[0],
extract_archive=path[1],
extract_path=path[2],
extraction_name=path[-2],
extract=path[-1]
)
os.environ["CORENLP_HOME"] = "./data/stanford-corenlp-4.0.0"
def download_resorces(self,**kwargs):
"""
download and extract resorces from the net
Returns
-------
None.
"""
def download():
try:
self.logger.info("Downloading: {}".format(kwargs.get("download_path")))
_, _ = urllib.request.urlretrieve(kwargs.get("download_path"), kwargs.get("extract_archive"))
self.logger.info("Download has completed")
except urllib.error as e:
self.logger.info(e.to_str())
def extract():
self.logger.info("starting the extraction of {}".format(kwargs.get("extract_archive")))
with zipfile.ZipFile(kwargs.get("extract_archive"), 'r') as zip_ref:
zip_ref.extractall(kwargs.get("extract_path","./data"))
self.logger.info("done extracting")
if not os.path.exists(kwargs.get("extract_archive")):
download()
else:
self.logger.info("Skipping Download,The Archive already in the HD")
if kwargs.get("extract",True) and not os.path.exists("{}/{}".format(kwargs.get("extract_path"),kwargs.get("extraction_name"))):
extract()
else:
self.logger.info("Skipping extraction,The folder already in the HD")
@property
def load_embeddings(self) -> tuple:
"""
Method to load fastText vectors
Returns
-------
tuple
vocab,word vectors and dims of the vectors.
"""
self.logger.info("starting to load embeddings")
with open(os.path.join(os.getcwd(),"./data/wiki-news-300d-1M.vec"), 'r', encoding='utf-8', newline='\n', errors='ignore') as f:
_, dims = map(int, f.readline().split())
model = {}
for line in f:
tokens = line.rstrip().split(' ')
model[tokens[0]] = np.asarray(tokens[1:], "float32")
vocab =set(model.keys())
self.logger.info("Done Loading Emmbedings")
return vocab, model, dims
@property
def preprocessing_data(self):
"""
Method that takes all the instances in the data and preprocess them.
"""
def pad_words(tokens:list, append_tuple:bool=False)->list:
"""""
Function to pad/shrink the sentence to the maxlen length
"""""
# shrinking
if len(tokens) > self.preprocess['maxlen']:
return tokens[:self.preprocess['maxlen']]
else:
# padding
for _ in range(self.preprocess['maxlen'] - len(tokens)):
tokens.append(('UNK', 'UNK') if append_tuple else 'UNK')
return tokens
def prep_func():
def final_tuning():
"""""
function to add all of the matrices together
"""""
del self.__instances
self.logger.info("in final tuning")
if self.preprocess['depth'] == 'm':
del self.preprocess['ML']
del self.preprocess['MLD']
self.preprocess['X'] = np.concatenate([self.preprocess["NONE"],
self.preprocess['M']], axis=1)
print(self.preprocess['X'])
elif self.preprocess['depth'] == 'ml':
del self.preprocess['M']
del self.preprocess['MLD']
self.preprocess['X'] = np.concatenate([self.preprocess["NONE"],
self.preprocess["ML"]], axis=1)
else:
del self.preprocess['M']
del self.preprocess['ML']
self.preprocess['X'] = np.array(self.preprocess["MLD"])
def get_depth():
"""""
function to find the depth of all the words in the dependency tree
"""""
nonlocal tree
number = 0
while True:
keys = [key for key, value in tree.items() if value == number]
if not keys:
break
number += 1
tree = {key: number if value in keys else value for key, value in tree.items()}
def get_pairs_and_dep():
"""""
function that parses a json_obj that is a result of dependency parsing from stanford core nlp server
:return dict of dicts that contains pair and dep ->word pairs(parent,child) and dependencies tree
"""""
nonlocal pairs_dep, tree
stanford_obj = self.__nlp.annotate(sent, properties={'annotators': "depparse",
'outputFormat': 'json', 'timeout':'5000000'})
pairs_dep, tree = OrderedDict(), {}
tree[(stanford_obj['sentences'][0]['basicDependencies'][0]['dependentGloss'],
stanford_obj['sentences'][0]['basicDependencies'][0]['dependent'])] = 0
for index, dict in enumerate(stanford_obj['sentences'][0]['basicDependencies'][1:]):
tree[(dict['dependentGloss'], dict['dependent'])] = (dict['governorGloss'], dict['governor'])
pairs_dep[index] = {
'word_pair': (dict['dependentGloss'], dict['governorGloss'], dict['dependent']),
'dependency': dict['dep'],
}
def build_lists_of_pairs():
"""""
function that builds and returns tuple of lists : (1ist of word pairs, list of dependencies)
"""""
nonlocal word_pairs, dependencies
words_pairs, dependencies = [], []
for token in pairs_dep.values():
words_pairs.append((token['word_pair']))
dependencies.append(token['dependency'])
word_pairs, dependencies = pad_words(words_pairs, append_tuple=True), pad_words(dependencies)
def build_head_modifier_vectors():
"""""
function that builds a vector out of the (head,modifier) pair in case of m/ml avg vector of 300 dims
in case of mld vector [head,modifier] of 600 dims
"""""
nonlocal head_modifer_vec
head, modifier = word_pair[0], word_pair[1]
head_vec = model[head] if head != 'UNK' and head in vocab else
|
np.zeros(dims)
|
numpy.zeros
|
from vmad import autooperator, operator
from vmad.core import stdlib
from vmad.lib import fastpm
from vmad.lib import linalg
from vmad.lib.fastpm import FastPMSimulation, ParticleMesh
import numpy
from vmad.lib.linalg import sum, mul
import scipy
from mpi4py import MPI
import numpy as np
import MADLens.PGD as PGD
from MADLens.util import save_snapshot, save3Dpower
from nbodykit.lab import FFTPower, ArrayCatalog
import pickle
import os
import errno
import resource
import logging
import sys
def BinarySearch_Left(mylist, items):
print(mylist, items)
"finds where to insert elements into a sorted array, this is the equivalent of numpy.searchsorted"
results =[]
for item in items:
if item>=max(mylist):
results.append(len(mylist))
elif item<=min(mylist):
results.append(0)
else:
results.append(binarysearch_left(mylist,item, low=0, high=len(mylist)-1))
return np.asarray(results, dtype=int)
def binarysearch_left(A, value, low, high):
"left binary search"
if (high < low):
return low
mid = (low + high) //2
if (A[mid] >= value):
return binarysearch_left(A, value, low, mid-1)
else:
return binarysearch_left(A, value, mid+1, high)
class mod_list(list):
def __add__(self,other):
assert(len(other)==len(self))
return [self[ii]+other[ii] for ii in range(len(self))]
@operator
class list_elem:
"""
take an item from a list
"""
ain = {'x' : '*',}
aout = {'elem' : '*'}
def apl(node, x, i):
elem = x[i]
return dict(elem=elem, x_shape=[numpy.shape(xx) for xx in x])
def vjp(node, _elem, x_shape, i):
_x = []
for ii in range(len(x_shape)):
_x.append(numpy.zeros(x_shape[ii],dtype='f8'))
_x[i][:] = _elem
return dict(_x=_x)
def jvp(node,x_, x, i):
elem_ = x_[i]
return dict(elem_=elem_)
@operator
class list_put:
"""
put an item into a list
"""
ain = {'x': 'ndarray', 'elem': 'ndarray'}
aout = {'y': 'ndarray'}
def apl(node, x, elem, i):
y = x
y[i] = elem
return dict(y=y, len_x = len(x))
def vjp(node, _y, len_x, i):
_elem = _y[i]
_x = mod_list([_y[ii] for ii in range(len_x)])
_x[i] = np.zeros_like(_elem)
return dict(_x=_x, _elem=_elem)
def jvp(node, x_, elem_, len_x, i):
deriv = numpy.ones(len_x)
deriv[i] = 0
deriv_ = np.zeros(len_x)
deriv_[i]= 1
elem_ = np.asarray(elem_,dtype=object)
e = np.asarray([elem_ for ii in range(len_x)],dtype=object)
y_ = numpy.einsum('i,i...->i...',deriv,x_)+numpy.einsum('i,i...->i...',deriv_,e)
y_ = mod_list(y_)
return dict(y_=y_)
@operator
class chi_z:
"""
go from comsoving distance to redshift
"""
ain = {'z' : 'ndarray'}
aout = {'chi': 'ndarray'}
def apl(node, z, cosmo):
return dict(chi = cosmo.comoving_distance(z))
def vjp(node, _chi, z, cosmo):
res = 1./cosmo.efunc(z)/cosmo.H0*cosmo.C
return dict(_z = numpy.multiply(res,_chi))
def jvp(node, z_, z, cosmo):
res = 1./cosmo.efunc(z)/cosmo.H0*cosmo.C
return dict(chi_ = numpy.multiply(res,z_))
@operator
class z_chi:
"""
go from redshift to comoving distance
"""
ain = {'chi' : 'ndarray'}
aout = {'z': 'ndarray'}
def apl(node, chi, cosmo, z_chi_int):
return dict(z = z_chi_int(chi))
def vjp(node, _z, z, cosmo, z_chi_int):
res = cosmo.efunc(z)*cosmo.H0/cosmo.C
return dict(_chi = res*_z)
def jvp(node, chi_, z, cosmo, z_chi_int):
res = cosmo.efunc(z)*cosmo.H0/cosmo.C
return dict(z_ = res*chi_)
def get_PGD_params(B,res,n_steps,pgd_dir):
"""
loads PGD params from file
B: force resolution parameter
res: resolution: Boxsize/Nmesh
nsteps: number of fastpm steps
pgd_dir: directory in which PGD parameter files are stored
"""
pgd_file= os.path.join(pgd_dir,'pgd_params_%d_%d_%d.pkl'%(B,res,n_steps))
if not os.path.isfile(pgd_file):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), pgd_file)
else:
pgd_params = pickle.load(open(pgd_file,'rb'))
alpha0 = pgd_params['alpha0']
mu = pgd_params['mu']
kl = pgd_params['kl']
ks = pgd_params['ks']
return kl, ks, alpha0, mu
class ImageGenerator:
"""
rotates, shifts and stacks simulation boxes to fill the observed volume
so far only 90 degree rotations and shifts are supported but more transformation can be added
"""
def __init__(self, pm, ds, vert_num):
"""
defines rotations and shifts
pm : 3D pmesh object
ds : maximal distance to source plane
vert_num: how many times to repeat the box in vertical direction
"""
self.BoxSize = pm.BoxSize
self.chi_source = ds
self.vert_num = np.ceil(vert_num)
# basis vectors
x = np.asarray([1,0,0],dtype=int)
y =
|
np.asarray([0,1,0],dtype=int)
|
numpy.asarray
|
from numpy import array, matrix, zeros, linspace, arange
# from matplotlib.pyplot import *
import scipy.linalg
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, figure, xlim, ylim, title, xlabel, ylabel, show
import numpy as np
plt.rcParams['figure.figsize'] = (10, 8) # (width, height)
def cst_fem(structure='9node'):
'''
Gusset plate problem using 8 CST elemetnts. Uniform load across top edge
is modeled with 2 concentrated forces
structure = ['truss','4node', '9node']
'''
## define variables
E = 10e6 # modulus of elasticity
L = 20 # length of sketch (see drawing)
Q = 1000 # pounds/inch load
plotfactor = 1e2 # displacement factor for plot
poisson = 0.3 # poisson ratio
## Set nodal coordinates and element destination entries.
# u1 u2 u3 u4 u5 u6 u7 u8
# ==============================================================================
if structure == '4node':
nodexy = array([0, 0, 10, 10, 20, 20, 0, 20]) # global node coordinets (arbitrary)
nodeBC = array([1, 1, 0, 0, 0, 0, 1, 1]) # boundary conditions, 1 if u=0
nodex = list(nodexy[0::2])
nodey = list(nodexy[1::2])
####nodexyplot = [nodex, nodey]
nodexyT = list(zip(nodex, nodey))
### list(zip(nodexplot, nodeyplot))
#### node 0 1 2 3
adj = array([[0, 1, 0, 1],
[0, 0, 1, 1],
[0, 0, 0, 1],
[0, 0, 0, 0]])
#### x y x y x y
#### u1 u2 u# u# u# u#
elnodes = array([[0, 1, 2, 3, 6, 7],
[6, 7, 2, 3, 4, 5]]) # 1 element per row, dofs labled CCW (arbitrary)
# ==============================================================================
elif structure == '9node':
# 9 nodes
nodexy = array([0, 0, L / 4, L / 4, L / 2, L / 2, 3 * L / 4, 3 * L / 4, L,
L, L / 2, L, L / 4, 3 * L / 4, 0, L, 0, L / 2]) # global node coordinets (arbitrary)
# u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11 u12
nodeBC = array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]) # is dof fixed?
# x y x y x y
# u1 u2 u# u# u# u#
elnodes = array([[0, 1, 2, 3, 16, 17],
[2, 3, 4, 5, 16, 17],
[4, 5, 12, 13, 16, 17],
[4, 5, 10, 11, 12, 13],
[4, 5, 6, 7, 10, 11],
[6, 7, 8, 9, 10, 11],
[12, 13, 10, 11, 14, 15],
[16, 17, 12, 13, 14, 15]]) # 1 element per row, dofs labled CCW (arbitrary)
adj = array([[0, 1, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 1, 1, 0, 1],
[0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
# ==============================================================================
elif structure == 'truss':
nodexy = array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 5, 1, 4, 1, 3, 1, 2, 1, 1, 1, 0, 1])
nodeBC = array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1])
elnodes = array([[0, 1, 2, 3, 22, 23],
[2, 3, 4, 5, 20, 21],
[4, 5, 6, 7, 18, 19],
[6, 7, 8, 9, 16, 17],
[8, 9, 10, 11, 14, 15],
[10, 11, 12, 13, 14, 15],
[8, 9, 14, 15, 16, 17],
[6, 7, 16, 17, 18, 19],
[4, 5, 18, 19, 20, 21],
[2, 3, 20, 21, 22, 23]])
nodes = int(len(nodexy) // 2)
adj = np.zeros((nodes, nodes))
conmat = array([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 7, 8, 9, 10],
[1, 11, 2, 10, 11, 3, 9, 10, 4, 8, 9, 5, 7, 8, 6, 7, 7, 8, 9, 10, 11]])
conmat = np.transpose(conmat)
for i in range(len(conmat)):
adj[conmat[i, 0], conmat[i, 1]] = 1
#### Begin calculations
nodex = list(nodexy[0::2])
nodey = list(nodexy[1::2])
####nodexyplot = [nodex, nodey]
nodexyT = list(zip(nodex, nodey))
### list(zip(nodexplot, nodeyplot))
elements = int(len(elnodes)) # Number of elements
nodes = int(len(nodexy) // 2) # number of nodes
doftotal = int(nodes * 2) # number of total degrees of freedom
nodexyplot = zeros((nodes, 2)) # global coordinates of nodes for plotting
nodeplotload = zeros((nodes, 2)) # global coordiantes for deflected nodes for plotting
P = zeros((doftotal, 1)) # total load vector
U = zeros((doftotal, 1)) # displacements
Ue =
|
zeros((6, 1))
|
numpy.zeros
|
import numpy as np
import pandas as pd
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
from torch.autograd import Variable
import torch.utils.data as data
import os
class synth_data2(data.Dataset):
def __init__(self, args,k_dist=None,d_dist=None):
self.args=args
self.orig_N=args['N']
self.new_N=args['N']
self.k_dist=k_dist
self.d_dist=d_dist
if args['T']<=args['l']: print('Uhoh: T<=k')
"""Gen X"""
x_size = args['N']*args['T']*args['d']
self.x=np.zeros(x_size)
self.x[np.random.choice(x_size, size=int(x_size/10), replace=False)]=np.random.uniform(size=int(x_size/10))*100
self.x=np.resize(self.x, (args['N'],args['T'],args['d']))
"""Gen y"""
if (self.k_dist is None) or (self.d_dist is None):
self.k_dist = []
self.d_dist = []
for i in range(args['T']):
# If i<k, we won't evaluate using that timestep therefore it doesn't matter
if i<args['l']:
self.k_dist.append(np.ones(args['l']))
self.d_dist.append(np.ones(args['d']))
elif i==args['l']:
self.k_dist.append(self.convert_distb(np.random.uniform(size=(args['l']))))
self.d_dist.append(self.convert_distb(np.random.uniform(size=(args['d']))))
else:
delta_t = np.random.uniform(-args['delta'],args['delta'],size=(args['l']))
delta_d =
|
np.random.uniform(-args['delta'],args['delta'],size=(args['d']))
|
numpy.random.uniform
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test():
layout = ak.layout.ListOffsetArray64(
ak.layout.Index64(np.array([0, 1], dtype=np.int64)),
ak.layout.IndexedArray64(
ak.layout.Index64(
|
np.array([0, 1, 2, 3], dtype=np.int64)
|
numpy.array
|
# -*- coding: utf-8 -*-
import os
import re
import yaml
import numpy as np
from tensorflow.keras.layers import Input, Embedding, LSTM, Dense
from tensorflow.keras.models import Model
from tensorflow.keras import preprocessing, utils
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route("/")
def home():
return render_template("index.html")
def tokenize(sentences):
tokens_list = []
vocabulary = []
for sentence in sentences:
sentence = sentence.lower()
sentence = re.sub('[^a-zA-Z]', ' ', sentence)
tokens = sentence.split()
vocabulary += tokens
tokens_list.append(tokens)
return tokens_list, vocabulary
""" Defining inference models """
def make_inference_models():
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(200 ,))
decoder_state_input_c = Input(shape=(200 ,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(decoder_embedding, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
return encoder_model, decoder_model
""" Talking with Chatbot """
def str_to_tokens(sentence : str):
signal = 0
sentence = sentence.lower()
sentence = re.sub('[^a-zA-Z]', ' ', sentence)
words = sentence.split()
tokens_list = list()
for word in words:
if word not in tokenizer.word_index:
print("Unintelligible sentence")
""" send the signal to indicate the unclear sentence """
signal = -1
else:
tokens_list.append(tokenizer.word_index[word])
return preprocessing.sequence.pad_sequences([tokens_list], maxlen=maxlen_questions, padding='post'), signal
""" Load QA data """
dir_path = 'data'
files_list = os.listdir(dir_path + os.sep)
questions = list()
answers = list()
for filepath in files_list:
stream = open(dir_path + os.sep + filepath, 'rb')
docs = yaml.safe_load(stream)
conversations = docs['conversations']
for con in conversations:
if len(con) > 2 :
questions.append(con[0])
replies = con[1: ]
ans = ''
for rep in replies:
ans += ' ' + rep
answers.append( ans )
elif len(con) > 1:
questions.append(con[0])
answers.append(con[1])
answers_with_tags = list()
for i in range(len(answers)):
if type(answers[i]) == str:
answers_with_tags.append(answers[i])
else:
questions.pop(i)
answers = list()
for i in range(len(answers_with_tags)) :
answers.append('<START> ' + answers_with_tags[i] + ' <END>')
tokenizer = preprocessing.text.Tokenizer()
tokenizer.fit_on_texts(questions + answers)
VOCAB_SIZE = len(tokenizer.word_index) + 1
#print('VOCAB SIZE : {}'.format(VOCAB_SIZE))
""" Preparing data for Seq2Seq model """
vocab = []
for word in tokenizer.word_index:
vocab.append(word)
""" encoder_input_data """
tokenized_questions = tokenizer.texts_to_sequences(questions)
maxlen_questions = max([len(x) for x in tokenized_questions])
padded_questions = preprocessing.sequence.pad_sequences(tokenized_questions, maxlen=maxlen_questions ,padding='post')
encoder_input_data = np.array(padded_questions)
print(encoder_input_data.shape, maxlen_questions)
""" decoder_input_data """
tokenized_answers = tokenizer.texts_to_sequences(answers)
maxlen_answers = max([len(x) for x in tokenized_answers])
padded_answers = preprocessing.sequence.pad_sequences(tokenized_answers, maxlen=maxlen_answers, padding='post' )
decoder_input_data = np.array(padded_answers)
print(decoder_input_data.shape, maxlen_answers)
""" decoder_output_data """
tokenized_answers = tokenizer.texts_to_sequences(answers)
for i in range(len(tokenized_answers)) :
tokenized_answers[i] = tokenized_answers[i][1:]
padded_answers = preprocessing.sequence.pad_sequences(tokenized_answers, maxlen=maxlen_answers, padding='post')
onehot_answers = utils.to_categorical(padded_answers, VOCAB_SIZE)
decoder_output_data = np.array(onehot_answers)
print(decoder_output_data.shape)
""" Defining the Encoder-Decoder model """
encoder_inputs = Input(shape=(maxlen_questions, ))
encoder_embedding = Embedding(VOCAB_SIZE, 200, mask_zero=True)(encoder_inputs)
encoder_outputs, state_h, state_c = LSTM(200, return_state=True)(encoder_embedding)
encoder_states = [state_h, state_c]
decoder_inputs = Input(shape=(maxlen_answers, ))
decoder_embedding = Embedding(VOCAB_SIZE, 200, mask_zero=True)(decoder_inputs)
decoder_lstm = LSTM(200, return_state=True, return_sequences=True)
decoder_outputs , _ , _ = decoder_lstm(decoder_embedding, initial_state=encoder_states)
decoder_dense = Dense(VOCAB_SIZE, activation='softmax')
output = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], output)
""" load the pre-trained model (from chatbot_seq2seq_lstm.py)"""
model.load_weights('model.h5')
encode_model, decode_model = make_inference_models()
@app.route("/get", methods=["POST"])
def chatbot_response():
msg = request.form["msg"]
text, signal = str_to_tokens(msg)
states_values = encode_model.predict(text)
if signal == -1:
return "Sorry...I cannot understand what you said."
empty_target_seq = np.zeros((1, 1))
empty_target_seq[0, 0] = tokenizer.word_index['start']
stop_condition = False
decoded_translation = ''
while not stop_condition :
dec_outputs, h, c = decode_model.predict([empty_target_seq] + states_values)
sampled_word_index =
|
np.argmax(dec_outputs[0, -1, :])
|
numpy.argmax
|
import numpy as np
from utils import system
import pdb
import matplotlib.pyplot as plt
from ftocp import FTOCP
from nlp import NLP
from matplotlib import rc
from numpy import linalg as la
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# rc('text', usetex=True)
# =============================
# Initialize system parameters
x0 = np.zeros(4)
dt = 0.1 # Discretization time
sys = system(x0, dt) # initialize system object
maxTime = 14 # Simulation time
goal = np.array([10,10,0,np.pi/2])
# Initialize mpc parameters
N = 20; n = 4; d = 2;
Q = 1*np.eye(n)
R = 1*
|
np.eye(d)
|
numpy.eye
|
from __future__ import print_function
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import color, feature, filters, io, measure, morphology, segmentation, img_as_ubyte, transform
import warnings
import math
import pandas as pd
import argparse
import subprocess
import re
import glob
from skimage.segmentation import clear_border
from ortools.graph import pywrapgraph
import time
def buildFeatureFrame(filename,timepoint):
temp = np.asarray(np.load(filename,allow_pickle=True)).item()
imfilename = temp['filename']
img = io.imread(imfilename);
masks = clear_border(temp['masks'])
image_props = measure.regionprops_table(masks,
intensity_image=img,
properties=('label','area','filled_area', 'centroid',
'eccentricity','mean_intensity'))
im_df = pd.DataFrame(image_props)
im_df['time'] = timepoint
return(im_df)
def generateCandidates(image1, image2, im1_select, dist_multiplier=2):
delX = np.sqrt((image1['centroid-0'][im1_select]-image2['centroid-0'])**2+
(image1['centroid-1'][im1_select]-image2['centroid-1'])**2)
max_dist = dist_multiplier*min(delX)
candidates = np.array(delX[delX < max_dist].index)
return(candidates)
def generateLinks(filename_t0, filename_t1,timepoint, nnDist = 10,costMax=35, mN_Int = 10, mN_Ecc=4, mN_Area=25, mN_Disp=1):
ip0 = buildFeatureFrame(filename_t0,timepoint)
ip1 = buildFeatureFrame(filename_t1,timepoint+1)
arr = pd.DataFrame()
for i in np.array(ip0.index):
candidates = generateCandidates(ip0, ip1, i, dist_multiplier=nnDist)
canFRAME = pd.DataFrame(candidates)
canFRAME["1"] = i
arr = arr.append(canFRAME)
arr = arr.rename(columns={0: "t1", "1": "t0"})
arr = arr.reset_index(drop=True)
properties = pd.DataFrame()
mInt_0 = float(np.median(ip0.loc[:,['mean_intensity']]))
mInt_1 = float(np.median(ip1.loc[:,['mean_intensity']]))
for link in np.array(arr.index):
tmp_props_0 = (ip0.loc[arr.loc[link,["t0"]],:])
tmp_props_1 = (ip1.loc[arr.loc[link,["t1"]],:])
deltaInt = (np.abs((int(tmp_props_0["mean_intensity"])/mInt_0)-(int(tmp_props_1["mean_intensity"])/mInt_1))/
np.mean([(int(tmp_props_0["mean_intensity"])/mInt_0),(int(tmp_props_1["mean_intensity"])/mInt_1)]))
deltaArea = (np.abs(int(tmp_props_0['area']) - int(tmp_props_1['area']))/
np.mean([int(tmp_props_0["area"]),int(tmp_props_1["area"])]))
deltaEcc = np.absolute(float(tmp_props_0['eccentricity']) - float(tmp_props_1['eccentricity']))
deltaX = np.sqrt((int(tmp_props_0['centroid-0'])-int(tmp_props_1['centroid-0']))**2+
(int(tmp_props_0['centroid-1'])-int(tmp_props_1['centroid-1']))**2)
properties = properties.append(pd.DataFrame([int(tmp_props_0['label']),int(tmp_props_1['label']),
deltaInt ,deltaArea,deltaEcc,deltaX]).T)
properties = properties.rename(columns={0: "label_t0", 1: "label_t1", 2: "deltaInt",
3: "deltaArea", 4: "deltaEcc", 5: "deltaX"})
properties = properties.reset_index(drop=True)
properties["Cost"]=(properties.loc[:,"deltaInt"]*mN_Int)+(properties.loc[:,"deltaEcc"]*mN_Ecc)+(properties.loc[:,"deltaArea"]*mN_Area)+(properties.loc[:,"deltaX"]*mN_Disp)
properties["TransitionCapacity"]=1
properties = properties.loc[properties["Cost"]<costMax]
properties = properties.reset_index(drop=True)
return(properties)
def DivSimScore(daughterCell_1, daughterCell_2, FrameNext):
daughterStats_1 = FrameNext[(FrameNext['label'] == daughterCell_1)]
daughterStats_2 = FrameNext[(FrameNext['label'] == daughterCell_2)]
deltaInt = (np.abs((int(daughterStats_1["mean_intensity"]))-(int(daughterStats_2["mean_intensity"])))/
np.mean([(int(daughterStats_1["mean_intensity"])),(int(daughterStats_2["mean_intensity"]))]))
deltaArea = (np.abs(int(daughterStats_1['area']) - int(daughterStats_2['area']))/
np.mean([int(daughterStats_1["area"]),int(daughterStats_2["area"])]))
deltaEcc = np.absolute(float(daughterStats_1['eccentricity']) - float(daughterStats_2['eccentricity']))
deltaX = np.sqrt((int(daughterStats_1['centroid-0'])-int(daughterStats_2['centroid-0']))**2+
(int(daughterStats_1['centroid-1'])-int(daughterStats_2['centroid-1']))**2)
sims = pd.DataFrame([int(daughterCell_1),int(daughterCell_2),
deltaInt ,deltaArea,deltaEcc,deltaX]).T
sims = sims.rename(columns={0: "label_D1", 1: "label_D2", 2: "D2deltaInt",
3: "D2deltaArea", 4: "D2deltaEcc", 5: "D2deltaX"})
return(sims)
def DivSetupScore(motherCell, daughterCell_1, daughterCell_2, FrameCurr, FrameNext):
#determine similarities between mother and daughters
simDF = DivSimScore(daughterCell_1, daughterCell_2, FrameNext)
#determine relative area of mother compared to daughters
MotherArea = int(FrameCurr[(FrameCurr['label'] == motherCell)]['area'])
daughterArea_1 = int(FrameNext[(FrameNext['label'] == daughterCell_1)]['area'])
daughterArea_2 = int(FrameNext[(FrameNext['label'] == daughterCell_2)]['area'])
areaChange = MotherArea/(daughterArea_1 + daughterArea_2)
simDF["MDDeltaArea"] = areaChange
return(simDF)
def DivisionCanditates(propMtx, filename_t0,filename_t1,timepoint,mS_Area = 10, mS_Ecc = 2, mS_Int = 2, mS_Disp = 1, MDAR_thresh = 0.75, SDis_thresh = 20.0):
ip0 = buildFeatureFrame(filename_t0,timepoint)
ip1 = buildFeatureFrame(filename_t1,timepoint+1)
Mothers = np.unique(propMtx.loc[:,['label_t0']])
DivCandidacy = pd.DataFrame()
for cell in Mothers:
DaughtersPossible = (propMtx[(propMtx['label_t0'] == cell)].loc[:,'label_t1'])
DaughtersPairs = np.array(np.meshgrid(DaughtersPossible, DaughtersPossible)).T.reshape(-1,2)
Sisters = np.unique(np.sort(DaughtersPairs),axis=0)
for pair in range(Sisters.shape[0]):
if (Sisters[pair,0] != Sisters[pair,1]):
tmpScoreSetup = (DivSetupScore(cell,Sisters[pair,0], Sisters[pair,1], ip0,ip1))
LogicMDAR = (tmpScoreSetup["MDDeltaArea"]>MDAR_thresh)
ScoreSDis = (mS_Int*tmpScoreSetup["D2deltaInt"]) + (mS_Area*tmpScoreSetup["D2deltaArea"]) + (mS_Ecc*tmpScoreSetup["D2deltaEcc"]) + (mS_Disp*tmpScoreSetup["D2deltaX"])
LogicSDis = (ScoreSDis<SDis_thresh)
tmpCandidacy = pd.DataFrame([cell,Sisters[pair,0],Sisters[pair,1],(LogicSDis&LogicMDAR).bool()]).T
DivCandidacy = DivCandidacy.append(tmpCandidacy)
DivCandidacy = DivCandidacy.rename(columns={0: "Mother", 1: "Daughter1", 2: "Daughter2",3: "Div"})
DivCandidacy = DivCandidacy.reset_index(drop=True)
# select true values
DivSelect = DivCandidacy[(DivCandidacy['Div'] == True)]
DivConnects_1 = DivSelect[['Mother','Daughter1','Div']]
DivConnects_2 = DivSelect[['Mother','Daughter2','Div']]
DivConnects_1 = DivConnects_1.rename(columns={'Mother': "label_t0", 'Daughter1': "label_t1"})
DivConnects_2 = DivConnects_2.rename(columns={'Mother': "label_t0", 'Daughter2': "label_t1"})
DivConnects = pd.concat([DivConnects_1,DivConnects_2])
DivConnects = DivConnects.reset_index(drop=True)
return(DivConnects)
def UpdateConnectionsDiv(propMtx,DivCandidatesMtx):
propMtx.loc[propMtx['label_t0'].isin(np.unique(DivCandidatesMtx['label_t0'])),['TransitionCapacity']] = 2
for div in range(DivCandidatesMtx.shape[0]):
tmp_prop = propMtx.loc[(DivCandidatesMtx.loc[div,'label_t0'] ==propMtx['label_t0'])&(DivCandidatesMtx.loc[div,'label_t1'] ==propMtx['label_t1']),]
old_score = float(tmp_prop.loc[:,'Cost'])
new_score = (old_score/2)
propMtx.loc[(DivCandidatesMtx.loc[div,'label_t0'] ==propMtx['label_t0'])&(DivCandidatesMtx.loc[div,'label_t1'] ==propMtx['label_t1']),'Cost'] = new_score
return(propMtx)
def SolveMinCostTable(filename_t0, filename_t1, DivisionTable,timepoint, OpeningCost = 30, ClosingCost = 30):
#rename
ip0 = buildFeatureFrame(filename_t0,timepoint)
ip0 = ip0.rename(columns={"label" : "label_t0"})
ip1 = buildFeatureFrame(filename_t1,timepoint+1)
ip1 = ip1.rename(columns={"label" : "label_t1"})
ip0["slabel_t0"] = np.array(range(ip0.label_t0.shape[0]))+1
i0max = np.max(np.asarray(ip0["slabel_t0"]))
ip1["slabel_t1"] = np.array(range(i0max,i0max+ip1.label_t1.shape[0]))+1
i1max = np.max(np.asarray(ip1["slabel_t1"]))
i0_translation = ip0[["label_t0","slabel_t0"]]
i1_translation = ip1[["label_t1","slabel_t1"]]
result_tmp = pd.merge(DivisionTable, i0_translation, on=['label_t0'])
result = pd.merge(result_tmp, i1_translation, on=['label_t1'])
result_shorthand = result[['slabel_t0','slabel_t1','Cost','TransitionCapacity']]
transNodes0 = np.array(result_shorthand['slabel_t0']) ;
transNodes1 = np.array(result_shorthand['slabel_t1']) ;
transCosts = np.array(result_shorthand['Cost']) ;
transCaps = np.repeat(1,transNodes0.size) ;
sourceNodes0 = np.repeat([0],i1max)
sourceNodes1 = np.array(range(i1max))+1
sourceCosts = np.concatenate((np.repeat(1,ip0.shape[0]),np.repeat(OpeningCost,ip1.shape[0])), axis=None)
#Source capacities are dictates by which node could be splitting. Source capacity = 2 if there was a division candidate
tmpUnique0 = result_shorthand[["slabel_t0","TransitionCapacity"]].drop_duplicates()
HighCaps = tmpUnique0.loc[tmpUnique0["TransitionCapacity"]==2,]
LowCaps = pd.DataFrame(i0_translation).copy(deep=True)
LowCaps['Cap'] = 1
LowCaps.loc[LowCaps['slabel_t0'].isin(np.array(HighCaps['slabel_t0'])),'Cap'] = 2
sourceCaps = np.concatenate((
|
np.array(LowCaps['Cap'])
|
numpy.array
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.cm as cm
import gym
import cv2
import sys
class AtariGame(object):
"""Environment wrapper for Atari Pong.
This class simplifies the interaction of the agent with the Pong
environment. The API follows the OpenAI gym API.
Each frame (RGB image) of shape (210, 160, 3) will be rescaled to
grayscale (84,84,1).
The observation state contains 4 stacked frames and is of shape
(84,84,4)
The last frame results from the current action
while the previous 3 frames from the previous 3 actions.
Actions for Atari Pong
0 (no operation)
1 (fire)
2 (right)
3 (left)
"""
def __init__(self, env_name, seed):
self.env_name = env_name
self.seed = seed
self.env = gym.make(self.env_name)
self.env.seed(self.seed)
self.lives = 0
self.n_plays = 0
self.done_game = True
# 4 frame stack
self.obs = np.zeros((84, 84, 4))
self.d_observation = (84,84,4)
# number of actions
# we fix the number of allowed actions to 2 (left or right)
self.actions = [2,3]
self.n_actions = len(self.actions)
# number of frames per step i.e. the same action
# is applied n_frames times
self.n_frames = 4
# maximal number of plays per game
# Atari Pong does not limit this number
self.max_n_plays = 18
def reset(self):
""" Reset environment """
if self.done_game:
# reset game
obs = self.env.reset()
# fire = start the game
obs, reward, done, info = self.env.step(1)
obs = self.encode_obs(obs) # (84, 84, 1)
# one game (episode) consists of several plays
self.done_game = False
self.n_plays = 0
self.lives = self.env.unwrapped.ale.lives()
# fill whole stack with current frame
self.obs[..., 0:]= np.copy(obs)
self.obs[..., 1:] = np.copy(obs)
self.obs[..., 2:] =
|
np.copy(obs)
|
numpy.copy
|
from unittest.mock import Mock
import numpy as np
from shfl.data_base.data_base import DataBase
from shfl.data_distribution.data_distribution_iid import IidDataDistribution
from shfl.federated_aggregator.iowa_federated_aggregator import IowaFederatedAggregator
from shfl.federated_government.iowa_federated_government import IowaFederatedGovernment
class TestDataBase(DataBase):
def __init__(self):
super(TestDataBase, self).__init__()
def load_data(self):
self._train_data = np.random.rand(50).reshape([10, 5])
self._test_data = np.random.rand(50).reshape([10, 5])
self._train_labels = np.random.randint(0, 2, 10)
self._test_labels = np.random.randint(0, 2, 10)
def test_IowaFederatedGovernment():
model_builder = Mock
database = TestDataBase()
database.load_data()
db = IidDataDistribution(database)
num_nodes = 3
federated_data, test_data, test_labels = db.get_federated_data(num_nodes)
a = 0
b = 1
c = 2
y_b = 3
k = 4
dynamic = True
iowa_fg = IowaFederatedGovernment(model_builder, federated_data, model_params_access=None,
dynamic=dynamic, a=a, b=b, c=c, y_b=y_b, k=k)
assert isinstance(iowa_fg._aggregator, IowaFederatedAggregator)
assert isinstance(iowa_fg._model, model_builder)
assert np.array_equal(iowa_fg._federated_data, federated_data)
assert iowa_fg._a == a
assert iowa_fg._b == b
assert iowa_fg._c == c
assert iowa_fg._y_b == y_b
assert iowa_fg._k == k
assert iowa_fg._dynamic == dynamic
def test_performance_clients():
model_builder = Mock
database = TestDataBase()
database.load_data()
db = IidDataDistribution(database)
num_nodes = 3
federated_data, test_data, test_labels = db.get_federated_data(num_nodes)
iowa_fg = IowaFederatedGovernment(model_builder, federated_data)
for i, data_node in enumerate(iowa_fg._federated_data):
data_node.performance = Mock()
data_node.performance.return_value = i
res = np.arange(iowa_fg._federated_data.num_nodes())
data_val = np.random.rand(25).reshape((5, 5))
labels_val = np.random.randint(0, 2, 5)
performance = iowa_fg.performance_clients(data_val, labels_val)
assert np.array_equal(performance, res)
for data_node in iowa_fg._federated_data:
data_node.performance.assert_called_once_with(data_val, labels_val)
def test_run_rounds():
np.random.seed(123)
model_builder = Mock
database = TestDataBase()
database.load_data()
db = IidDataDistribution(database)
num_nodes = 3
federated_data, test_data, test_label = db.get_federated_data(num_nodes)
iowa_fg = IowaFederatedGovernment(model_builder, federated_data)
n = 1
iowa_fg.deploy_central_model = Mock()
iowa_fg.train_all_clients = Mock()
iowa_fg.evaluate_clients = Mock()
iowa_fg.performance_clients = Mock()
iowa_fg.performance_clients.return_value = 0
iowa_fg._aggregator.set_ponderation = Mock()
iowa_fg.aggregate_weights = Mock()
iowa_fg.evaluate_global_model = Mock()
iowa_fg.run_rounds(n, test_data, test_label)
# Replicate test an validate data
randomize = [0, 9, 3, 4, 6, 8, 2, 1, 5, 7]
test_data = test_data[randomize,]
test_label = test_label[randomize]
validation_data = test_data[:int(0.15 * len(test_label)), ]
validation_label = test_label[:int(0.15 * len(test_label))]
test_data = test_data[int(0.15 * len(test_label)):, ]
test_label = test_label[int(0.15 * len(test_label)):]
iowa_fg.deploy_central_model.assert_called_once()
iowa_fg.deploy_central_model.assert_called_once()
iowa_fg.train_all_clients.assert_called_once()
iowa_fg.evaluate_clients.assert_called_once()
assert len(iowa_fg.evaluate_clients.call_args[0]) == 2
np.testing.assert_array_equal(iowa_fg.evaluate_clients.call_args[0][0], test_data)
np.testing.assert_array_equal(iowa_fg.evaluate_clients.call_args[0][1], test_label)
iowa_fg.performance_clients.assert_called_once()
assert len(iowa_fg.performance_clients.call_args[0]) == 2
np.testing.assert_array_equal(iowa_fg.performance_clients.call_args[0][0], validation_data)
|
np.testing.assert_array_equal(iowa_fg.performance_clients.call_args[0][1], validation_label)
|
numpy.testing.assert_array_equal
|
"""SMEFT beta functions"""
import numpy as np
from collections import OrderedDict
from wilson.util.smeftutil import C_keys, C_keys_shape, C_array2dict, C_dict2array
from functools import lru_cache
I3 = np.identity(3)
class HashableArray(np.ndarray):
def __new__(cls, data, dtype=None):
return np.array(data, dtype).view(cls)
def __hash__(self):
return hash(self.data.tobytes())
# return int(sha1(self).hexdigest(), 16)
def __eq__(self, other):
return np.all(np.ndarray.__eq__(self, other))
def __setitem__(self, key, value):
raise Exception('HashableArray is read-only')
def my_einsum(indices, *args):
hashargs = [HashableArray(arg) for arg in args]
return _cached_einsum(indices, *hashargs)
@lru_cache(2048)
def _cached_einsum(indices, *args):
return np.einsum(indices, *args)
def beta(C, HIGHSCALE=1, newphys=True):
"""Return the beta functions of all SM parameters and SMEFT Wilson
coefficients."""
g = C["g"]
gp = C["gp"]
gs = C["gs"]
m2 = C["m2"]
Lambda = C["Lambda"]
Gu = C["Gu"]
Gd = C["Gd"]
Ge = C["Ge"]
Eta1 = (3*np.trace(C["uphi"] @ Gu.conj().T) \
+ 3*np.trace(C["dphi"] @ Gd.conj().T) \
+ np.trace(C["ephi"] @ Ge.conj().T) \
+ 3*np.conj(np.trace(C["uphi"] @ Gu.conj().T)) \
+ 3*np.conj(np.trace(C["dphi"] @ Gd.conj().T)) \
+ np.conj(np.trace(C["ephi"] @ Ge.conj().T)))/2
Eta2 = -6*np.trace(C["phiq3"] @ Gu @ Gu.conj().T) \
- 6*np.trace(C["phiq3"] @ Gd @ Gd.conj().T) \
- 2*np.trace(C["phil3"] @ Ge @ Ge.conj().T) \
+ 3*(np.trace(C["phiud"] @ Gd.conj().T @ Gu) \
+ np.conj(np.trace(C["phiud"] @ Gd.conj().T @ Gu)))
Eta3 = 3*np.trace(C["phiq1"] @ Gd @ Gd.conj().T) \
- 3*np.trace(C["phiq1"] @ Gu @ Gu.conj().T) \
+ 9*np.trace(C["phiq3"] @ Gd @ Gd.conj().T) \
+ 9*np.trace(C["phiq3"] @ Gu @ Gu.conj().T) \
+ 3*np.trace(C["phiu"] @ Gu.conj().T @ Gu) \
- 3*np.trace(C["phid"] @ Gd.conj().T @ Gd) \
- 3*(np.trace(C["phiud"] @ Gd.conj().T @ Gu) \
+ np.conj(np.trace(C["phiud"] @ Gd.conj().T @ Gu))) \
+ np.trace(C["phil1"] @ Ge @ Ge.conj().T) \
+ 3*np.trace(C["phil3"] @ Ge @ Ge.conj().T) \
- np.trace(C["phie"] @ Ge.conj().T @ Ge)
Eta4 = 12*np.trace(C["phiq1"] @ Gd @ Gd.conj().T) \
- 12*np.trace(C["phiq1"] @ Gu @ Gu.conj().T) \
+ 12*np.trace(C["phiu"] @ Gu.conj().T @ Gu) \
- 12*np.trace(C["phid"] @ Gd.conj().T @ Gd) \
+ 6*(np.trace(C["phiud"] @ Gd.conj().T @ Gu) \
+ np.conj(np.trace(C["phiud"] @ Gd.conj().T @ Gu))) \
+ 4*np.trace(C["phil1"] @ Ge @ Ge.conj().T) \
- 4*np.trace(C["phie"] @ Ge.conj().T @ Ge)
Eta5 = 1j*3/2*(np.trace(Gd @ C["dphi"].conj().T) \
- np.conj(np.trace(Gd @ C["dphi"].conj().T))) \
- 1j*3/2*(np.trace(Gu @ C["uphi"].conj().T) \
- np.conj(np.trace(Gu @ C["uphi"].conj().T))) \
+ 1j*1/2*(np.trace(Ge @ C["ephi"].conj().T) \
- np.conj(np.trace(Ge @ C["ephi"].conj().T)))
GammaH = np.trace(3*Gu @ Gu.conj().T + 3*Gd @ Gd.conj().T + Ge @ Ge.conj().T)
Gammaq = 1/2*(Gu @ Gu.conj().T + Gd @ Gd.conj().T)
Gammau = Gu.conj().T @ Gu
Gammad = Gd.conj().T @ Gd
Gammal = 1/2*Ge @ Ge.conj().T
Gammae = Ge.conj().T @ Ge
Beta = OrderedDict()
Beta["g"] = -19/6*g**3 - 8*g*m2/HIGHSCALE**2*C["phiW"]
Beta["gp"] = 41/6*gp**3 - 8*gp*m2/HIGHSCALE**2*C["phiB"]
Beta["gs"] = -7*gs**3 - 8*gs*m2/HIGHSCALE**2*C["phiG"]
Beta["Lambda"] = 12*Lambda**2 \
+ 3/4*gp**4 + 3/2*g**2*gp**2 + 9/4*g**4 - 3*(gp**2 + 3*g**2)*Lambda \
+ 4*Lambda*GammaH \
- 4*(3*np.trace(Gd @ Gd.conj().T @ Gd @ Gd.conj().T) \
+ 3*np.trace(Gu @ Gu.conj().T @ Gu @ Gu.conj().T) \
+ np.trace(Ge @ Ge.conj().T @ Ge @ Ge.conj().T)) \
+ 4*m2/HIGHSCALE**2*(12*C["phi"] \
+ (-16*Lambda + 10/3*g**2)*C["phiBox"] \
+ (6*Lambda + 3/2*(gp**2 - g**2))*C["phiD"] \
+ 2*(Eta1 + Eta2) \
+ 9*g**2*C["phiW"] \
+ 3*gp**2*C["phiB"] \
+ 3*g*gp*C["phiWB"] \
+ 4/3*g**2*(np.trace(C["phil3"]) \
+ 3*np.trace(C["phiq3"])))
Beta["m2"] = m2*(6*Lambda - 9/2*g**2 - 3/2*gp**2 \
+ 2*GammaH + 4*m2/HIGHSCALE**2*(C["phiD"] \
- 2*C["phiBox"]))
Beta["Gu"] = 3/2*(Gu @ Gu.conj().T @ Gu - Gd @ Gd.conj().T @ Gu) \
+ (GammaH - 9/4*g**2 - 17/12*gp**2 - 8*gs**2)*Gu \
+ 2*m2/HIGHSCALE**2*(3*C["uphi"] \
+ 1/2*(C["phiD"] - 2*C["phiBox"])*Gu \
- C["phiq1"].conj().T @ Gu \
+ 3*C["phiq3"].conj().T @ Gu \
+ Gu @ C["phiu"].conj().T \
- Gd @ C["phiud"].conj().T \
- 2*(my_einsum("rpts,pt", C["qu1"], Gu) \
+ 4/3*my_einsum("rpts,pt", C["qu8"], Gu)) \
- my_einsum("ptrs,pt", C["lequ1"], np.conj(Ge)) \
+ 3*my_einsum("rspt,pt", C["quqd1"], np.conj(Gd)) \
+ 1/2*(my_einsum("psrt,pt", C["quqd1"], np.conj(Gd)) \
+ 4/3*my_einsum("psrt,pt", C["quqd8"], np.conj(Gd))))
Beta["Gd"] = 3/2*(Gd @ Gd.conj().T @ Gd - Gu @ Gu.conj().T @ Gd) \
+ (GammaH - 9/4*g**2 - 5/12*gp**2 - 8*gs**2)*Gd \
+ 2*m2/HIGHSCALE**2*(3*C["dphi"] + 1/2*(C["phiD"] \
- 2*C["phiBox"])*Gd \
+ C["phiq1"].conj().T @ Gd \
+ 3*C["phiq3"].conj().T @ Gd \
- Gd @ C["phid"].conj().T \
- Gu @ C["phiud"] \
- 2*(my_einsum("rpts,pt", C["qd1"], Gd) \
+ 4/3*my_einsum("rpts,pt", C["qd8"], Gd)) \
+ my_einsum("ptsr,pt", np.conj(C["ledq"]), Ge) \
+ 3*my_einsum("ptrs,pt", C["quqd1"],
|
np.conj(Gu)
|
numpy.conj
|
import heatsim2
import copy
import cProfile
import numpy as np
numpy=np
import pylab as pl
import scipy as sp
import scipy.interpolate
# define materials:
composite_rho=1.75e3 # kg/m^3
composite_c=730.0 # J/kg/deg K
## Use these values for (roughly) quasi-isotropic layup specimens
#composite_k=3.7 # W/m/deg K
#max_x = 51.0e-3 # length of specimen
## Use these values for (roughly) 0/90 layup specimens
#composite_k=4.0 # W/m/deg K
#max_x = 51.0e-3 # length of specimen
## Use these values for (roughly) 0 uni layup specimens
#composite_k=5.1 # W/m/deg K
#max_x = 51.0e-3 # length of specimen
# Use these values for (roughly) 90 uni layup specimens
composite_k=0.71 # W/m/deg K
max_x = 25.0e-3 # length of specimen
# WARNING: X and Y GRID BOUNDARIES MUST LINE UP WITH
# composite_min_z and composite_max_z FOR AN ACCURATE CALCULATION
# Create x,y,z voxel center coords
nx=15
ny=40
nz=20
(dz,dy,dx,
z,y,x,
zgrid,ygrid,xgrid,
z_bnd,y_bnd,x_bnd,
z_bnd_z,z_bnd_y,z_bnd_x,
y_bnd_z,y_bnd_y,y_bnd_x,
x_bnd_z,x_bnd_y,x_bnd_x,
r3d,r2d) = heatsim2.build_grid(-20e-3,20e-3,nz,
-40e-3,40e-3,ny,
0.0,max_x,nx)
composite_min_z=-4e-3
composite_max_z=4e-3
composite_min_y=-10e-3;
composite_max_y=10e-3
## XPS foam, approximate but pessimistic values
foam_k=.04 # W/m/deg K
foam_rho=40.0 # kg/m^3
foam_c=1500.0 # J/kg/deg K
#spaceloft foam:
#foam_k=.016 # W/m/deg K
#foam_rho=150.0 # kg/m^3
#foam_c=1000.0 # J/kg/deg K
materials=(
# material 0: foam
(heatsim2.TEMPERATURE_COMPUTE,foam_k,foam_rho,foam_c),
# material 1: composite
(heatsim2.TEMPERATURE_COMPUTE,composite_k,composite_rho,composite_c),
)
materials_nofoam=(
# material 0: foam
(heatsim2.TEMPERATURE_COMPUTE,0.0,foam_rho,foam_c),
# material 1: composite
(heatsim2.TEMPERATURE_COMPUTE,composite_k,composite_rho,composite_c),
)
boundaries=(
# boundary 0: conducting
(heatsim2.boundary_conducting,),
(heatsim2.boundary_insulating,),
)
heat_energy=500e3 # Energy J/m^2
volumetric=( # on material grid
# 0: nothing
(heatsim2.NO_SOURCE,),
#1: impulse source @ t=0
(heatsim2.IMPULSE_SOURCE,0.0,heat_energy/dx), # t (sec), Energy J/m^2
)
# initialize all elements to zero
(material_elements,
boundary_z_elements,
boundary_y_elements,
boundary_x_elements,
volumetric_elements)=heatsim2.zero_elements(nz,ny,nx)
material_elements[ (zgrid >= composite_min_z) &
(zgrid <= composite_max_z) &
(ygrid >= composite_min_y) &
(ygrid <= composite_max_y)]=1 # set composite material
boundary_x_elements[:,:,0]=1 # insulating
boundary_x_elements[:,:,-1]=1 # insulating
boundary_y_elements[:,0,:]=1 # insulating
boundary_y_elements[:,-1,:]=1 # insulating
boundary_z_elements[0,:,:]=1 # insulating
boundary_z_elements[-1,:,:]=1 # insulating
# The no-foam version converges much better (and with many fewer
# time/spatial steps) if we make the insulation around the
# composite an explicit boundary condition
#
# (try commenting out the _nofoam boundary changes below to see what I mean)
boundary_z_elements_nofoam=copy.copy(boundary_z_elements)
boundary_y_elements_nofoam=copy.copy(boundary_y_elements)
boundary_x_elements_nofoam=copy.copy(boundary_x_elements)
boundary_y_elements_nofoam[((y_bnd_y==y_bnd[np.argmin(np.abs(y_bnd-composite_min_y))])
| (y_bnd_y==y_bnd[np.argmin(np.abs(y_bnd-composite_max_y))])) &
(y_bnd_z >= composite_min_z) &
(y_bnd_z <= composite_max_z)]=1 # insulating
boundary_z_elements_nofoam[((z_bnd_z==z_bnd[np.argmin(np.abs(z_bnd-composite_min_z))])
| (z_bnd_z==z_bnd[np.argmin(np.abs(z_bnd-composite_max_z))])) &
(z_bnd_y >= composite_min_y) &
(z_bnd_y <= composite_max_y)]=1 # insulating
volumetric_elements[(xgrid==x[0]) &
(ygrid >= composite_min_y) &
(ygrid <= composite_max_y) &
(zgrid >= composite_min_z) &
(zgrid <= composite_max_z)]=1 # impulse
#t0=-0.001
t0=0.0
dt=0.05 # must be no bigger than .5 if we don't have the special nofoam boundaries, above
nt=18000
tvec=t0+numpy.arange(nt,dtype='d')*dt
(ADI_params,ADI_steps)=heatsim2.setup(z[0],y[0],x[0],
dz,dy,dx,
nz,ny,nx,
dt,
materials,
boundaries,
volumetric,
material_elements,
boundary_z_elements,
boundary_y_elements,
boundary_x_elements,
volumetric_elements)
(ADI_params_nofoam,ADI_steps_nofoam)=heatsim2.setup(z[0],y[0],x[0],
dz,dy,dx,
nz,ny,nx,
dt,
materials_nofoam,
boundaries,
volumetric,
material_elements,
boundary_z_elements_nofoam,
boundary_y_elements_nofoam,
boundary_x_elements_nofoam,
volumetric_elements)
# t0,dt,nt)
T=np.zeros((nt+1,nz,ny,nx),dtype='d')
T_nofoam=np.zeros((nt+1,nz,ny,nx),dtype='d')
for tcnt in range(nt):
t=t0+dt*tcnt
print("t={}".format(t))
T[tcnt+1,::]=heatsim2.run_adi_steps(ADI_params,ADI_steps,t,dt,T[tcnt,::],volumetric_elements,volumetric)
T_nofoam[tcnt+1,::]=heatsim2.run_adi_steps(ADI_params_nofoam,ADI_steps_nofoam,t,dt,T_nofoam[tcnt,::],volumetric_elements,volumetric)
pass
pl.figure(1)
pl.clf()
pl.imshow(T[nt,nz//2,::]);
pl.colorbar()
pl.figure(2)
pl.clf()
#maxT=np.max(T[1:,nz//2,ny//2,nx-1])
maxTidx=np.argmax(T[1:,nz//2,ny//2,nx-1])
if 2*maxTidx >= T.shape[0]:
raise ValueError("Not enough timesteps to capture 2x peak")
maxTtime=(tvec+dt/2)[maxTidx]
#halfmaxTidx=np.argmin(abs(T[1:,nz//2,ny//2,nx-1]-maxT/2.0))
#halfmaxTtime=(tvec+dt/2)[halfmaxTidx]
#maxT_nofoam=np.max(T_nofoam[1:,nz//2,ny//2,nx-1])
#halfmaxTidx_nofoam=np.argmin(abs(T_nofoam[1:,nz//2,ny//2,nx-1]-maxT_nofoam/2.0))
#halfmaxTtime_nofoam=(tvec+dt/2)[halfmaxTidx_nofoam]
## Projection trick:
#MaxT_time=(tvec+dt/2)[np.argmax(T[1:,nz//2,ny//2,nx-1])]
#MaxT_doubletimeidx=np.argmin(abs(MaxT_time*2-(tvec+dt/2)))
#MaxT_onefivetimeidx=np.argmin(abs(MaxT_time*1.5-(tvec+dt/2)))
#MaxTslope=(T[MaxT_doubletimeidx+1,nz//2,ny//2,nx-1]-T[MaxT_onefivetimeidx+1,nz//2,ny//2,nx-1])/((tvec+dt/2)[MaxT_doubletimeidx]-(tvec+dt/2)[MaxT_onefivetimeidx])
#MaxT_projected=maxT+MaxTslope*(-MaxT_time)
#MaxT_projected=T[1+MaxT_doubletimeidx,nz//2,ny//2,nx-1]+MaxTslope*(-(tvec+dt/2)[MaxT_doubletimeidx])
#halfmaxT_projectedidx=np.argmin(abs(T[1:,nz//2,ny//2,nx-1]-MaxT_projected/2.0))
#halfmaxT_projectedtime=(tvec+dt/2)[halfmaxT_projectedidx]
#Tinterpolate=sp.interpolate.splrep((tvec+dt/2),T[1:,nz//2,ny//2,nx-1]-MaxT_projected/2.0,s=0)
#halfmaxT_projectedtime=sp.interpolate.sproot(Tinterpolate)
# Inflection point trick (Ringermacher)
# Use data up to twice time to reach peak, as in ktester v2 implementation
tsr_num_predefknots=22 # same as in tsr_analysis.h for thermalconductivitytester. This number includes one on either end that we don't provide to splrep()
knotinterval=dt*(maxTidx*2-1-1)/(tsr_num_predefknots-1)
Tinterpolate=sp.interpolate.splrep((tvec+dt/2)[:(maxTidx*2-1)],T[1:(maxTidx*2),nz//2,ny//2,nx-1],k=5,task=-1,t=
|
np.arange(tsr_num_predefknots-2,dtype='d')
|
numpy.arange
|
#
# Beeler-Reuter model for mammalian ventricular action potential.
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
from __future__ import print_function
import numpy as np
import pints
import scipy.integrate
from . import ToyModel
class ActionPotentialModel(pints.ForwardModel, ToyModel):
"""
The 1977 Beeler-Reuter model of the mammalian ventricular action potential
(AP) [1].
This model describes several ion currents, each with a maximum conductance
parameter, that together give rise to the cardiac AP and calcium transient.
In this (non-trivial) 'toy' model, we use the maximum conductances as the
parameters, and the AP and calcium transient as observable outputs. All
other model parameters are assumed to be known.
The parameters are _scaled_: instead of passing in the conductances
directly, users should provide the natural log of the maximum conductances.
This makes the parameters easier to find for optimisation algorithms.
References:
[1] Reconstruction of the action potential of ventricular myocardial
fibres. <NAME> (1977) Journal of Physiology
Arguments:
``y0``
(Optional) The initial condition of the observables ``v`` and ``cai``,
where ``cai >= 0``.
*Extends:* :class:`pints.ForwardModel`, :class:`pints.toy.ToyModel`.
"""
def __init__(self, y0=None):
if y0 is None:
self.set_initial_conditions([-84.622, 2e-7])
else:
self.set_initial_conditions(y0)
# Initial condition for non-observable states
self._m0 = 0.01
self._h0 = 0.99
self._j0 = 0.98
self._d0 = 0.003
self._f0 = 0.99
self._x10 = 0.0004
# membrane capacitance, in uF/cm^2
self._C_m = 1.0
# Nernst reversal potentials, in mV
self._E_Na = 50.0
# Stimulus current
self._I_Stim_amp = 25.0
self._I_Stim_period = 1000.0
self._I_Stim_length = 2.0
# Solver tolerances
self.set_solver_tolerances()
def initial_conditions(self):
"""
Returns the initial conditions of this model.
"""
return [self._v0, self._cai0]
def n_outputs(self):
""" See :meth:`pints.ForwardModel.n_outputs()`. """
# membrane voltage and calcium concentration
return 2
def n_parameters(self):
""" See :meth:`pints.ForwardModel.n_parameters()`. """
# 5 conductance values
return 5
def _rhs(self, states, time, parameters):
"""
Right-hand side equation of the ode to solve.
"""
# Set-up
V, Cai, m, h, j, d, f, x1 = states
gNaBar, gNaC, gCaBar, gK1Bar, gx1Bar = np.exp(parameters)
# Equations
# INa
INa = (gNaBar * m**3 * h * j + gNaC) * (V - self._E_Na)
alpha = (V + 47) / (1 - np.exp(-0.1 * (V + 47)))
beta = 40 * np.exp(-0.056 * (V + 72))
dmdt = alpha * (1 - m) - beta * m
alpha = 0.126 * np.exp(-0.25 * (V + 77))
beta = 1.7 / (1 + np.exp(-0.082 * (V + 22.5)))
dhdt = alpha * (1 - h) - beta * h
alpha = 0.055 *
|
np.exp(-0.25 * (V + 78))
|
numpy.exp
|
#!/usr/bin/env python
# coding: utf-8
'''
Author: <NAME>
License: Apache 2.0
'''
import os
import argparse
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, MinMaxScaler, LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GroupShuffleSplit
import umap
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics import pairwise_distances_argmin_min
from sklearn.metrics import silhouette_score
from scipy.spatial import ConvexHull
from datetime import datetime
from statsmodels.stats.multitest import multipletests
from scipy.stats import zscore
from scipy import stats
import itertools
# In[2]:
def str2boolean(s):
if s == 'False':
s_new = False
else:
s_new = True
return s_new
# In[47]:
# Config
toy=False
sample = 0
pre_or_post = 'pre'
plot=False
timestamp = datetime.today().strftime('%Y-%m-%d-%H-%M')
# data_folder = '/content/drive/My Drive/ML4HC_Final_Project/data/input/'
data_folder = './../../datum/reddit/input/reddit_mental_health_dataset/'
output_dir = f'./../../datum/reddit/output/supervised_umap/umap_cluster_{pre_or_post}_{timestamp}/'
print(output_dir)
# In[48]:
# # # Mount GDrive and attach it to the colab for data I/O
# from google.colab import drive
# drive.mount('/content/drive')
# In[49]:
try: os.mkdir(output_dir)
except: pass
# In[50]:
# Or load from arguments
parser = argparse.ArgumentParser()
parser.add_argument('--job_array_task_id',
help='default: ${SLURM_ARRAY_TASK_ID} or 1. When using job arrays, this will be set by the bash script by ${SLURM_ARRAY_TASK_ID} or set to 1, which will be substracted below by 1 for zero indexing')
parser.add_argument('--plot', help='plot of each run')
parser.add_argument('--toy', help='run quickly with less labels, parameters and splits')
parser.add_argument('--pre_or_post', help='post, pre, 2019, 2018')
args = parser.parse_args()
if args.job_array_task_id != None:
sample = int(args.job_array_task_id) - 1
if args.plot!=None:
plot = str2boolean(args.plot)
if args.toy!=None:
toy = str2boolean(args.toy)
if args.toy!=None:
pre_or_post = str(args.pre_or_post)
# In[4]:
seed_value = None
# silence NumbaPerformanceWarning
import warnings
import numba
from numba.errors import NumbaPerformanceWarning
warnings.filterwarnings("ignore", category=NumbaPerformanceWarning)
# In[12]:
import pickle
def save_obj(obj, path ):
with open(path , 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(path):
with open(path, 'rb') as f:
return pickle.load(f)
def unison_shuffled_copies(a, b, c):
assert len(a) == len(b) == len(c)
p = np.random.permutation(len(a))
a = np.array(a)
b =
|
np.array(b)
|
numpy.array
|
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
from util.data_util import ModelNet40
from model.GDANet_cls import GDANET
import numpy as np
from torch.utils.data import DataLoader
from util.util import cal_loss, IOStream
import sklearn.metrics as metrics
from datetime import datetime
import provider
import rsmix_provider
from modelnetc_utils import eval_corrupt_wrapper, ModelNetC
# weight initialization:
def weight_init(m):
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_normal_(m.weight)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Conv1d):
torch.nn.init.xavier_normal_(m.weight)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm1d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
def _init_():
if not os.path.exists('checkpoints'):
os.makedirs('checkpoints')
if not os.path.exists('checkpoints/' + args.exp_name):
os.makedirs('checkpoints/' + args.exp_name)
# backup the running files:
if not args.eval:
os.system('cp main_cls.py checkpoints' + '/' + args.exp_name + '/' + 'main.py.backup')
os.system('cp model/GDANet_cls.py checkpoints' + '/' + args.exp_name + '/' + 'GDANet_cls.py.backup')
os.system('cp util.GDANet_util.py checkpoints' + '/' + args.exp_name + '/' + 'GDANet_util.py.backup')
os.system('cp util.data_util.py checkpoints' + '/' + args.exp_name + '/' + 'data_util.py.backup')
def train(args, io):
train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points, args=args if args.pw else None),
num_workers=8, batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8,
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
model = GDANET().to(device)
print(str(model))
model.apply(weight_init)
model = nn.DataParallel(model)
print("Let's use", torch.cuda.device_count(), "GPUs!")
if args.use_sgd:
print("Use SGD")
opt = optim.SGD(model.parameters(), lr=args.lr * 100, momentum=args.momentum, weight_decay=1e-4)
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)
else:
print("Use Adam")
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr / 100)
criterion = cal_loss
best_test_acc = 0
for epoch in range(args.epochs):
scheduler.step()
####################
# Train
####################
train_loss = 0.0
count = 0.0
model.train()
train_pred = []
train_true = []
for data, label in train_loader:
'''
implement augmentation
'''
rsmix = False
# for new augmentation code, remove squeeze because it will be applied after augmentation.
# default from baseline model, scale, shift, shuffle was default augmentation
if args.rot or args.rdscale or args.shift or args.jitter or args.shuffle or args.rddrop or (
args.beta is not 0.0):
data = data.cpu().numpy()
if args.rot:
data = provider.rotate_point_cloud(data)
data = provider.rotate_perturbation_point_cloud(data)
if args.rdscale:
tmp_data = provider.random_scale_point_cloud(data[:, :, 0:3])
data[:, :, 0:3] = tmp_data
if args.shift:
tmp_data = provider.shift_point_cloud(data[:, :, 0:3])
data[:, :, 0:3] = tmp_data
if args.jitter:
tmp_data = provider.jitter_point_cloud(data[:, :, 0:3])
data[:, :, 0:3] = tmp_data
if args.rddrop:
data = provider.random_point_dropout(data)
if args.shuffle:
data = provider.shuffle_points(data)
r = np.random.rand(1)
if args.beta > 0 and r < args.rsmix_prob:
rsmix = True
data, lam, label, label_b = rsmix_provider.rsmix(data, label, beta=args.beta, n_sample=args.nsample,
KNN=args.knn)
if args.rot or args.rdscale or args.shift or args.jitter or args.shuffle or args.rddrop or (
args.beta is not 0.0):
data = torch.FloatTensor(data)
if rsmix:
lam = torch.FloatTensor(lam)
lam, label_b = lam.to(device), label_b.to(device).squeeze()
data, label = data.to(device), label.to(device).squeeze()
if rsmix:
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
opt.zero_grad()
logits = model(data)
loss = 0
for i in range(batch_size):
loss_tmp = criterion(logits[i].unsqueeze(0), label[i].unsqueeze(0).long()) * (1 - lam[i]) \
+ criterion(logits[i].unsqueeze(0), label_b[i].unsqueeze(0).long()) * lam[i]
loss += loss_tmp
loss = loss / batch_size
else:
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
opt.zero_grad()
logits = model(data)
loss = criterion(logits, label)
loss.backward()
opt.step()
preds = logits.max(dim=1)[1]
count += batch_size
train_loss += loss.item() * batch_size
train_true.append(label.cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch,
train_loss * 1.0 / count,
metrics.accuracy_score(
train_true, train_pred),
metrics.balanced_accuracy_score(
train_true, train_pred))
io.cprint(outstr)
####################
# Test
####################
test_loss = 0.0
count = 0.0
model.eval()
test_pred = []
test_true = []
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
logits = model(data)
loss = criterion(logits, label)
preds = logits.max(dim=1)[1]
count += batch_size
test_loss += loss.item() * batch_size
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (epoch,
test_loss * 1.0 / count,
test_acc,
avg_per_class_acc)
io.cprint(outstr)
if test_acc >= best_test_acc:
best_test_acc = test_acc
io.cprint('Max Acc:%.6f' % best_test_acc)
torch.save(model.state_dict(), 'checkpoints/%s/best_model.t7' % args.exp_name)
def test(args, io):
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points),
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
model = GDANET().to(device)
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.model_path))
model = model.eval()
test_acc = 0.0
count = 0.0
test_true = []
test_pred = []
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
logits = model(data)
preds = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
outstr = 'Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc, avg_per_class_acc)
io.cprint(outstr)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description='3D Object Classification')
parser.add_argument('--exp_name', type=str, default='GDANet', metavar='N',
help='Name of the experiment')
parser.add_argument('--batch_size', type=int, default=64, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--test_batch_size', type=int, default=32, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--epochs', type=int, default=350, metavar='N',
help='number of episode to train')
parser.add_argument('--use_sgd', type=bool, default=True,
help='Use SGD')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001, 0.1 if using sgd)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--no_cuda', type=bool, default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--eval', type=bool, default=False,
help='evaluate the model')
parser.add_argument('--eval_corrupt', type=bool, default=False,
help='evaluate the model under corruption')
parser.add_argument('--num_points', type=int, default=1024,
help='num of points to use')
parser.add_argument('--model_path', type=str, default='', metavar='N',
help='Pretrained model path')
# added arguments
parser.add_argument('--rdscale', action='store_true', help='random scaling data augmentation')
parser.add_argument('--shift', action='store_true', help='random shift data augmentation')
parser.add_argument('--shuffle', action='store_true', help='random shuffle data augmentation')
parser.add_argument('--rot', action='store_true', help='random rotation augmentation')
parser.add_argument('--jitter', action='store_true', help='jitter augmentation')
parser.add_argument('--rddrop', action='store_true', help='random point drop data augmentation')
parser.add_argument('--rsmix_prob', type=float, default=0.5, help='rsmix probability')
parser.add_argument('--beta', type=float, default=0.0, help='scalar value for beta function')
parser.add_argument('--nsample', type=float, default=512,
help='default max sample number of the erased or added points in rsmix')
parser.add_argument('--modelnet10', action='store_true', help='use modelnet10')
parser.add_argument('--normal', action='store_true', help='use normal')
parser.add_argument('--knn', action='store_true', help='use knn instead ball-query function')
parser.add_argument('--data_path', type=str, default='./data/modelnet40_normal_resampled', help='dataset path')
# pointwolf
parser.add_argument('--pw', action='store_true', help='use PointWOLF')
parser.add_argument('--w_num_anchor', type=int, default=4, help='Num of anchor point')
parser.add_argument('--w_sample_type', type=str, default='fps',
help='Sampling method for anchor point, option : (fps, random)')
parser.add_argument('--w_sigma', type=float, default=0.5, help='Kernel bandwidth')
parser.add_argument('--w_R_range', type=float, default=10, help='Maximum rotation range of local transformation')
parser.add_argument('--w_S_range', type=float, default=3, help='Maximum scailing range of local transformation')
parser.add_argument('--w_T_range', type=float, default=0.25,
help='Maximum translation range of local transformation')
args = parser.parse_args()
_init_()
if not args.eval:
io = IOStream('checkpoints/' + args.exp_name + '/%s_train.log' % (args.exp_name))
else:
io = IOStream('checkpoints/' + args.exp_name + '/%s_test.log' % (args.exp_name))
io.cprint(str(args))
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
io.cprint(
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
torch.cuda.manual_seed(args.seed)
else:
io.cprint('Using CPU')
if not args.eval and not args.eval_corrupt:
train(args, io)
elif args.eval:
test(args, io)
elif args.eval_corrupt:
device = torch.device("cuda" if args.cuda else "cpu")
model = GDANET().to(device)
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.model_path))
model = model.eval()
def test_corrupt(args, split, model):
test_loader = DataLoader(ModelNetC(split=split),
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
test_true = []
test_pred = []
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
logits = model(data)
preds = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true =
|
np.concatenate(test_true)
|
numpy.concatenate
|
#!/usr/bin/env python
###############################################################################
#
# Primary code to perform RING-MaP analysis on ShapeMapper output files
#
# See README for further details
# Run with -h flag for arguments
#
# Lead developer: <NAME>
# Contributors: <NAME>, <NAME>
#
# This file is licensed under the terms of the MIT license
#
# Version 4.2
# December 2018
#
###############################################################################
import sys, argparse, itertools, math
import numpy as np
import readMutStrings # cython code containing I/O funcs
class RINGexperiment(object):
"""RINGexperiment objects contain matrices and methods for computing correlated mutations
from RINGexperiments parsed by ShapeMapper
"""
def __init__(self, fasta = None, exfile=None, bgfile=None, arraysize=1000,
corrtype = 'g', verbal=False, **kwargs):
"""
fasta = fasta file of the sequence being analyzed
exfile = datafile containing experiment data
bgfile = datafile containing bg data
arraysize = optional size of the arrays, if fasta not provided
corrtype = type of correlation
kwargs are passed to initDataMatrices
"""
if fasta is not None:
self.sequence = self.readFasta(fasta, verbal=verbal)
# set arraysize to sequence length, plus a little padding to guard against indexErrors
self.arraysize = len(self.sequence)
else:
self.sequence = None
self.arraysize = arraysize
# initialize matrix values
# ex matrices hold raw experiment data
self.ex_readarr = None
self.ex_comutarr = None
self.ex_inotjarr = None
# bg matrices hold raw bg data
self.bg_readarr = None
self.bg_comutarr = None
self.bg_inotjarr = None
# correlation matrices hold correlations
self.ex_correlations = None
self.bg_correlations = None
self.ex_zscores = None
self.window = None
self.setCorrType(corrtype, verbal=verbal)
if exfile:
self.initDataMatrices('ex', exfile, verbal=verbal, **kwargs)
if bgfile:
self.initDataMatrices('bg', bgfile, verbal=verbal, **kwargs)
def setCorrType(self, corrtype, verbal=False):
"""Set correlation type
Valid options are 'phi' and 'mi'
"""
# convert to lower case
corrtype = corrtype.lower()
if corrtype == 'chi':
self.correlationfunc = self._phiyates
#self.significancefunc = self._phiyates
if verbal: print("Using Yates-corrected Chi2 based correlation metric")
elif corrtype == 'g':
self.correlationfunc = self._mistatistic
#self.significancefunc = self._mistatistic
if verbal: print("Using G-test correlation metric")
elif corrtype == 'apc':
self.correlationfunc = self._mistatistic
#self.significancefunc = self._mistatistic
if verbal: print("Using APC corrected G-test correlation metric")
elif corrtype == 'mi':
self.correlationfunc = self._mutualinformation
if verbal: print("Using MI correlation metric")
elif corrtype == 'nmi':
self.correlationfunc = self._norm_mutualinformation
if verbal: print("Using normalized MI correlation metric")
else:
raise ValueError("Unrecognized correlation metric : {0}".format(corrtype))
self.corrtype = corrtype
def readFasta(self, fasta, verbal=False):
"""Read the sequence in the provided sequence file"""
with open(fasta) as inp:
inp.readline()
seq = ''
for line in inp:
if line[0] == '>':
break
seq += line.strip()
if verbal:
print("Sequence length={0} read from {1}".format(len(seq), fasta))
return seq
def initDataMatrices(self, prefix, datafile, window=1, verbal=False, **kwargs):
"""initialize and fill the read, comut, inotj matrices
prefix = ex/bg, indicating the type of the data
datafile = new/old mutation string file
**kwargs are passed onto appropriate fillMatrices function
"""
if self.window is None:
self.window = window
if verbal: print("Computing correlations using window={0}".format(self.window))
elif window != self.window:
raise ValueError("Window already set to {0}; passed window={1}".format(self.window, window))
# initialize the matrices
read = np.zeros( (self.arraysize, self.arraysize), dtype=np.int32)
comut = np.zeros( (self.arraysize, self.arraysize), dtype=np.int32)
inotj = np.zeros( (self.arraysize, self.arraysize), dtype=np.int32)
# determine whether new or old mutstring format
filetype = self._filetype(datafile)
if filetype > 0:
if verbal: print("Filling {0} arrays from {1}".format(prefix, datafile))
self._fillMatrices(datafile, read, comut, inotj, filetype, verbal=verbal, **kwargs)
else:
if verbal: print("Filling {0} arrays from OLD format file {1}".format(prefix, datafile))
self._fillMatrices_Old(datafile, read, comut, inotj, verbal=verbal, **kwargs)
# assign the matrices
setattr(self, prefix+'_readarr', read)
setattr(self, prefix+'_comutarr', comut)
setattr(self, prefix+'_inotjarr', inotj)
def _filetype(self, datafile):
"""Determine format of datafile:
return 0 if old format
return 1 if ShapeMapper 2.1.1 format
return 2 if ShapeMapper 2.1.2 format
return 3 if ShapeMapper 2.1.4-rc or higher format
"""
try:
fileformat = 999
with open(datafile) as inp:
line = inp.readline()
spl = line.split()
if '|' in spl[3]:
fileformat = 0
elif spl[0][:4] in ('MERG', 'PAIR','UNPA', 'UNSP'):
if not spl[4].isdigit():
fileformat = 3
else:
fileformat = 2
else:
fileformat = 1
return fileformat
except:
raise IOError('{0} has unrecognized format'.format(datafile))
def _fillMatrices(self, datafile, read, comut, inotj, fileformat, mincoverage=0, undersample=-1, verbal=False, **kwargs):
"""Call the cython fillMatrices function for new classified mutation file format
datafile = New classified mutations file to read
read
comut
inotj = NxN matrices to fill
fileformat = parsed mutation file code from _filetype
mincoverage = Minimum number of valid 'read' positions required per read
undersample = Maximum number of reads to read; default of -1 means read all reads
"""
if 0<mincoverage<1:
validpos = sum([x.isupper() for x in self.sequence])
mincoverage *= validpos
if verbal and mincoverage>0:
print("Read length filtering ON\n\tMatch threshold = {0}".format(mincoverage))
fillstats = readMutStrings.fillMatrices(datafile, read, comut, inotj, self.window, mincoverage, fileformat, undersample)
if verbal:
print("Input summary:")
print("\t{0} reads in {1}".format(fillstats[0], datafile))
print("\t{0} reads passed filtering".format(fillstats[1]))
def _fillMatrices_Old(self, datafile, read, comut, inotj, phred_cut=30,
accepted_events = 'AGCT-', mutseparation=5, maxdel=1000, verbal=False, **kwargs):
"""Call the cython fillMatrices_Old function
datafile = Old mutation string file to read
read
comut
inotj = NxN matrices to fill
phred_cut = Minimum phred value required for valid mutations
accepted_events = Accepted valid mutations events
mutseparation = Separation distance required between valid mutations
maxdel = maximum deletion/no-data region allowed for valid reads
"""
if verbal:
print("Post-processing old ShapeMapper called mutations:")
print("\tPhred cutoff = {0}".format(phred_cut))
print("\tMut. event separation = {0}".format(mutseparation))
print("\tMaximum deletion cutoff = {0}".format(maxdel))
print("\tAccepted mut. events = {0}".format(accepted_events))
fillstats = readMutStrings.fillMatrices_Old(datafile, read, comut, inotj, self.window,
phred_cut, accepted_events, mutseparation, maxdel)
if verbal:
print("Input summary:")
print("\t{0} reads in {1}".format(fillstats[0], datafile))
print("\t{0} reads passed filtering".format(fillstats[1]))
def getMaxArrayIndex(self, prefix='ex'):
"""Return index of the last non-zero diagonal element. Equal to sequence length if set.
Otherweise, determine length of molecule after read matrices are filled"""
try:
return self.maxarrayindex
except AttributeError:
if self.sequence is not None:
self.maxarrayindex = len(self.sequence)
else:
arr = getattr(self, prefix+'_readarr')
last = 0
for i in xrange(arr.shape[0]):
if arr[i,i] != 0:
last = i
self.maxarrayindex = last+1
return self.maxarrayindex
def getReactiveNts(self, ratecut, prefix='ex'):
"""Return indices of nts with mutation rates above ratecut"""
readarr = getattr(self, prefix+'_readarr')
comutarr = getattr(self, prefix+'_comutarr')
ntlist = []
for i in xrange( self.getMaxArrayIndex() ):
if readarr[i,i] == 0:
continue
mutrate = float(comutarr[i,i])/readarr[i,i]
if mutrate > ratecut:
ntlist.append(i)
return ntlist
def getUnreactiveNts(self, ratecut, prefix='ex'):
"""Return indices of nts with mutation rates below ratecut"""
readarr = getattr(self, prefix+'_readarr')
comutarr = getattr(self, prefix+'_comutarr')
ntlist = []
for i in xrange( self.getMaxArrayIndex() ):
if readarr[i,i] == 0:
continue
mutrate = float(comutarr[i,i])/readarr[i,i]
if mutrate < ratecut:
ntlist.append(i)
return ntlist
def _phistatistic(self, phi, n):
"""convert phi coefficient to chi2 statistic
phi = phi coeff
n = total number observations
"""
if np.isnan(phi):
return 0
return n*phi**2
def _phiyates(self, n,b,c,d):
"""Compute yates chi2 from contigency table values"""
af = float(n-b-c-d)
bf = float(b)
# multiply floats, which should avoid overflow errs
bot = (af+bf)*(c+d)*(af+c)*(bf+d)
if bot < 1:
return 0
# multiply floats which should avoid overflow errs
top = n*(abs(af*d - bf*c) - 0.5*n)**2
return top/bot
def _phi(self, n,b,c,d):
""" Return Phi
n = a+b+c+d
- a, b, c, d correspond to values in the 2 x 2
contingency table tested for nucs i and j:
i
0 1
-----------
0| a b
|
1| c d
"""
# convert to float for multiplication to avoid overflow
af = float(n-b-c-d)
bf = float(b)
#return (af*d - bf*c) / min( (af+bf)*(bf+d), (af+c)*(c+d) )
bot = (af+bf)*(c+d)*(af+c)*(bf+d)
if bot < 1:
return 0
return (af*d - bf*c)/np.sqrt(bot)
def _mutualinformation(self, n, b,c,d):
"""Compute Mutual Information for a given nt pair
n = a+b+c+d
- a, b, c, d correspond to values in the 2 x 2
contingency table tested for nucs i and j:
i
0 1
-----------
0| a b
|
1| c d
"""
bf = float(b)
df = float(d)
a = n-bf-c-df
if min(a,b,c,d) < 1:
return 0
mi = a*np.log(a) + bf*np.log(bf) + c*np.log(c) + df*np.log(df)
mi += n*np.log(n)
mi -= (a+c)*np.log(a+c) + (a+bf)*np.log(a+bf) + (bf+df)*np.log(bf+df) + (c+df)*np.log(c+df)
mi /= n
return mi
def _mistatistic(self, n, b,c,d):
"""convert mutual information value to g statistic
n = total number observations
"""
return 2*n*self._mutualinformation(n,b,c,d)
def _norm_mutualinformation(self, n, b, c, d):
mi = self._mutualinformation(n,b,c,d)
bf = float(b)
df = float(d)
cf = float(c)
af = n-bf-c-df
hx = -1*( (af+bf)*np.log(af+bf) + (cf+df)*np.log(cf+df) - n*np.log(n) ) / n
hy = -1*( (af+cf)*np.log(af+cf) + (bf+df)*np.log(bf+df) - n*
|
np.log(n)
|
numpy.log
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import division
import numpy as np
from scipy.interpolate import interp1d
from . import model
__author__ = '<NAME>'
class AbrahamsonSilvaKamai2014(model.Model):
"""Abrahamson, Silva, and Kamai (2014, :cite:`abrahamson14`) model.
This model was developed for active tectonic regions as part of the
NGA-West2 effort.
"""
NAME = '<NAME>, & Kamai (2014)'
ABBREV = 'ASK14'
# Reference velocity (m/sec)
V_REF = 1180.
# Load the coefficients for the model
COEFF = model.load_data_file('abrahamson_silva_kamai_2014.csv', 2)
PERIODS = COEFF['period']
INDICES_PSA = np.arange(22)
INDEX_PGA = -2
INDEX_PGV = -1
PARAMS = [
model.NumericParameter('dist_rup', True, None, 300),
model.NumericParameter('dist_jb', True),
model.NumericParameter('mag', True, 3, 8.5),
model.NumericParameter('v_s30', True, 180, 1000),
model.NumericParameter('depth_1_0', False),
model.NumericParameter('depth_tor', False),
model.NumericParameter('dip', True),
model.NumericParameter('dist_crjb', False, default=15),
model.NumericParameter('dist_x', False),
model.NumericParameter('dist_y0', False),
model.NumericParameter('width', False),
model.CategoricalParameter('mechanism', True, ['SS', 'NS', 'RS']),
model.CategoricalParameter(
'region', False,
['global', 'california', 'china', 'italy', 'japan', 'taiwan'],
'global'
),
model.CategoricalParameter(
'vs_source', False, ['measured', 'inferred'], 'measured'),
model.CategoricalParameter(
'is_aftershock', False, [True, False], False),
model.CategoricalParameter('on_hanging_wall', False,
[True, False], False),
]
def _check_inputs(self, **kwds):
super(AbrahamsonSilvaKamai2014, self)._check_inputs(**kwds)
p = self.params
if p['width'] is None:
p['width'] = self.calc_width(p['mag'], p['dip'])
if p['depth_tor'] is None:
p['depth_tor'] = self.calc_depth_tor(p['mag'])
def __init__(self, **kwds):
"""Initialize the model.
Keyword Args:
depth_1_0 (Optional[float]): depth to the 1.0 km∕s shear-wave
velocity horizon beneath the site, :math:`Z_{1.0}` in (km).
Used to estimate `depth_2_5`.
depth_2_5 (Optional[float]): depth to the 2.5 km∕s shear-wave
velocity horizon beneath the site, :math:`Z_{2.5}` in (km).
If *None*, then it is computed from `depth_1_0` or `v_s30`
and the `region` parameter.
depth_tor (Optional[float]): depth to the top of the rupture
plane (:math:`Z_{tor}`, km). If *None*, then the average
model is used.
depth_bor (Optional[float]): depth to the bottom of the rupture
plane (:math:`Z_{bor}`, km). If *None*, then the average
model is used.
dip (float): fault dip angle (:math:`\phi`, deg).
dist_jb (float): Joyner-Boore distance to the rupture plane
(:math:`R_\\text{JB}`, km)
dist_rup (float): closest distance to the rupture plane
(:math:`R_\\text{rup}`, km)
dist_x (float): site coordinate measured perpendicular to the
fault strike from the fault line with the down-dip direction
being positive (:math:`R_x`, km).
dist_y0 (Optional[float]): the horizontal distance off the end of
the rupture measured parallel to strike (:math:`R_{y0}`, km).
mag (float): moment magnitude of the event (:math:`M_w`)
mechanism (str): fault mechanism. Valid options: "SS", "NS", "RS".
on_hanging_wall (Optional[bool]): If the site is located on the
hanging wall of the fault. If *None*, then *False* is assumed.
region (Optional[str]): region. Valid options: "global",
"california", "china", "italy", "japan", "taiwan". If *None*,
then "global" is used as a default value.
v_s30 (float): time-averaged shear-wave velocity over the top 30 m
of the site (:math:`V_{s30}`, m/s).
vs_source (Optional[str]): source of the `v_s30` value. Valid
options: "measured", "inferred"
width (Optional[float]): Down-dip width of the fault. If *None*,
then the model average is used.
"""
super(AbrahamsonSilvaKamai2014, self).__init__(**kwds)
# Compute the response at the reference velocity
resp_ref = np.exp(self._calc_ln_resp(self.V_REF, np.nan))
self._ln_resp = self._calc_ln_resp(self.params['v_s30'], resp_ref)
self._ln_std = self._calc_ln_std(resp_ref)
def _calc_ln_resp(self, v_s30, resp_ref):
"""Calculate the natural logarithm of the response.
Args:
v_s30 (float): site condition. Set `v_s30` to the reference
velocity (e.g., 1180 m/s) for the reference response.
resp_ref (Optional[:class:`np.array`]): response at the reference
condition. Required if `v_s30` is not equal to reference
velocity.
Returns:
:class:`np.array`: Natural log of the response.
"""
c = self.COEFF
p = self.params
# Magnitude scaling
f1 = self._calc_f1()
if p['on_hanging_wall']:
# Hanging-wall term
f4 = self._calc_f4()
else:
f4 = 0
# Depth to top of rupture term
f6 = c.a15 * np.clip(p['depth_tor'] / 20, 0, 1)
# Style of faulting
if p['mechanism'] == 'RS':
f7 = c.a11 * np.clip(p['mag'] - 4, 0, 1)
f8 = 0
elif p['mechanism'] == 'NS':
f7 = 0
f8 = c.a12 * np.clip(p['mag'] - 4, 0, 1)
else:
f7, f8 = 0, 0
# Site term
###########
v_1 = np.exp(-0.35 * np.log(np.clip(c.period, 0.5, 3) / 0.5) +
np.log(1500))
vs_ratio = np.minimum(v_s30, v_1) / c.v_lin
# Linear site model
f5 = (c.a10 + c.b * c.n) * np.log(vs_ratio)
# Nonlinear model
mask = vs_ratio < 1
f5[mask] = (
c.a10 * np.log(vs_ratio) -
c.b * np.log(resp_ref + c.c) +
c.b * np.log(resp_ref + c.c * vs_ratio ** c.n)
)[mask]
# Basin term
if v_s30 == self.V_REF or p['depth_1_0'] is None:
# No basin response
f10 = 0
else:
# Ratio between site depth_1_0 and model center
ln_depth_ratio = np.log(
(p['depth_1_0'] + 0.01) /
(self.calc_depth_1_0(v_s30, p['region']) + 0.01)
)
slope = interp1d(
[150, 250, 400, 700],
np.c_[c.a43, c.a44, c.a45, c.a46],
copy=False,
bounds_error=False,
fill_value=(c.a43, c.a46),
)(v_s30)
f10 = slope * ln_depth_ratio
# Aftershock term
if p['is_aftershock']:
f11 = c.a14 * np.clip(1 - (p['dist_crjb'] - 5) / 10, 0, 1)
else:
f11 = 0
if p['region'] == 'taiwan':
freg = c.a31 * np.log(vs_ratio) + c.a25 * p['dist_rup']
elif p['region'] == 'china':
freg = c.a28 * p['dist_rup']
elif p['region'] == 'japan':
f13 = interp1d(
[150, 250, 350, 450, 600, 850, 1150],
np.c_[c.a36, c.a37, c.a38, c.a39, c.a40, c.a41, c.a42],
copy=False,
bounds_error=False,
fill_value=(c.a36, c.a42),
)(v_s30)
freg = f13 + c.a29 * p['dist_rup']
else:
freg = 0
return f1 + f4 + f5 + f6 + f7 + f8 + f10 + f11 + freg
def _calc_ln_std(self, psa_ref):
"""Calculate the logarithmic standard deviation.
Returns:
:class:`np.array`: Logarithmic standard deviation.
"""
p = self.params
c = self.COEFF
if p['region'] == 'japan':
phi_al = c.s5 + (c.s6 - c.s5) * np.clip((p['dist_rup'] - 30) / 50,
0, 1)
else:
transition = np.clip((p['mag'] - 4) / 2, 0, 1)
if p['vs_source'] == 'measured':
phi_al = c.s1m + (c.s2m - c.s1m) * transition
else:
phi_al = c.s1e + (c.s2e - c.s1e) * transition
tau_al = c.s3 + (c.s4 - c.s3) * np.clip((p['mag'] - 5) / 2, 0, 1)
tau_b = tau_al
# Remove period independent site amplification uncertainty of 0.4
phi_amp = 0.4
phi_b = np.sqrt(phi_al ** 2 - phi_amp ** 2)
# The partial derivative of the amplification with respect to
# the reference intensity
deriv = ((-c.b * psa_ref) / (psa_ref + c.c) +
(c.b * psa_ref) /
(psa_ref + c.c * (p['v_s30'] / c.v_lin) ** c.n))
deriv[p['v_s30'] >= c.v_lin] = 0
tau = tau_b * (1 + deriv)
phi = np.sqrt(phi_b ** 2 * (1 + deriv) ** 2 + phi_amp ** 2)
ln_std = np.sqrt(phi ** 2 + tau ** 2)
return ln_std
@staticmethod
def calc_width(mag, dip):
"""Compute the fault width based on equation in NGW2 spreadsheet.
This equation is not provided in the paper.
Args:
mag (float): moment magnitude of the event (:math:`M_w`)
dip (float): Fault dip angle (:math:`\phi`, deg)
Returns:
float: estimated fault width (:math:`W`, km)
"""
return min(
18 / np.sin(np.radians(dip)),
10 ** (-1.75 + 0.45 * mag)
)
@staticmethod
def calc_depth_tor(mag):
"""Calculate the depth to top of rupture (km).
Args:
mag (float): moment magnitude of the event (:math:`M_w`)
Returns:
float: estimated depth (km)
"""
return np.interp(mag, [5., 7.2], [7.8, 0])
@staticmethod
def calc_depth_1_0(v_s30, region='california'):
"""Estimate the depth to 1 km/sec horizon (:math:`Z_{1.0}`) based on
:math:`V_{s30}` and region.
This is based on equations 18 and 19 in the :cite:`abrahamson14`
and differs from the equations in the :cite:`chiou14`.
Args:
v_s30 (float): time-averaged shear-wave velocity over the top 30 m
of the site (:math:`V_{s30}`, m/s).
Keyword Args:
region (Optional[str]): region of basin model. Valid options:
"california", "japan". If *None*, then "california" is used as
the default value.
Returns:
float: depth to a shear-wave velocity of 1,000 m/sec
(:math:`Z_{1.0}`, km).
"""
if region in ['japan']:
# Japan
power = 2
v_ref = 412
slope = -5.23 / power
else:
# Global
power = 4
v_ref = 610
slope = -7.67 / power
return np.exp(slope * np.log((v_s30 ** power + v_ref ** power) /
(1360. ** power + v_ref ** power))) / 1000
def _calc_f1(self):
"""Calculate the magnitude scaling parameter f1.
Returns:
:class:`np.array`: Model parameter f1.
"""
c = self.COEFF
p = self.params
# Magnitude dependent taper
dist = np.sqrt(
p['dist_rup'] ** 2 +
(c.c4 - (c.c4 - 1) * np.clip(5 - p['mag'], 0, 1)) ** 2
)
# Magnitude scaling
# Need to copy c.a1 to that it isn't modified during the following
# operations.
f1 = np.array(c.a1)
ma1 = (p['mag'] <= c.m2)
f1[ma1] += (
c.a4 * (c.m2 - c.m1) + c.a8 * (8.5 - c.m2) ** 2 +
c.a6 * (p['mag'] - c.m2) +
c.a7 * (p['mag'] - c.m2) +
(c.a2 + c.a3 * (c.m2 - c.m1)) *
|
np.log(dist)
|
numpy.log
|
# -*- coding: utf-8 -*-
import os
import pickle
import random
import argparse
import torch as t
import numpy as np
import resource
from tqdm import tqdm
from torch.optim import SGD
from torch.utils.data import Dataset, DataLoader
from model import SkipGramNeg
#from data_utils import build_dataset, DataPipeline
#os.environ["CUDA_VISIBLE_DEVICES"]="2"
print("Using {}".format(t.cuda.device_count()))
device = t.device("cuda:0" if t.cuda.is_available() else "cpu")
def memory_limit():
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (get_memory() * 1024 / 4, hard))
def get_memory():
with open('/proc/meminfo', 'r') as mem:
free_memory = 0
for i in mem:
sline = i.split()
if str(sline[0]) in ('MemFree:', 'Buffers:', 'Cached:'):
free_memory += int(sline[1])
return free_memory
memory_limit()
#available, total = cuda.mem_get_info()
#print("Available: %.2f GB\nTotal: %.2f GB"%(available/1e9, total/1e9))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, default='sgns', help="model name")
parser.add_argument('--data_dir', type=str, default='./data/', help="data directory path")
parser.add_argument('--save_dir', type=str, default='./pts/', help="model directory path")
parser.add_argument('--e_dim', type=int, default=200, help="embedding dimension")
parser.add_argument('--n_negs', type=int, default=10, help="number of negative samples")
parser.add_argument('--epoch', type=int, default=5, help="number of epochs")
parser.add_argument('--mb', type=int, default=4096, help="mini-batch size")
parser.add_argument('--ss_t', type=float, default=1e-5, help="subsample threshold")
parser.add_argument('--conti', action='store_true', help="continue learning")
parser.add_argument('--weights', action='store_true', help="use weights for negative sampling")
parser.add_argument('--cuda', action='store_true', help="use CUDA")
return parser.parse_args()
class PermutedSubsampledCorpus(Dataset):
def __init__(self, datapath, data_utils, ws=None):
data = pickle.load(open(datapath, 'rb'))
if ws is not None:
self.data = []
for iword, owords in data:
if random.random() > ws[iword]:
self.data.append((iword, owords))
else:
self.data = data
#self.window_size = window_size
#self.batch_neg = get_neg_data(args.mb, args.n_negs, self.data[1])
self.iword, self.owords = list(zip(*data))
self.owords = t.tensor([random.sample(oword, 1) for oword in self.owords], dtype=t.long).squeeze()
#self.vocab = vocab
self.data_utils = data_utils
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
#iword, owords = self.data[idx]
#sample = {'i':iword, 'o':np.array(owords)}
#print(self.iword[idx])
#print(self.owords[idx])
#boundary = np.random.randint(1, self.window_size)
return self.iword[idx], self.owords[idx], self.data_utils.getNegatives(self.owords[idx])
#return self.iword[idx], self.owords[idx] #sample
class DataUtils:
def __init__(self, wc, n_negs):
self.wc = wc
self.negative_table_size = 1e8
self.negatives = []
self.negpos = 0
self.n_negs = n_negs
def initTableNegatives(self):
pow_frequency = np.array(list(self.wc.values())) ** 0.5
words_pow = sum(pow_frequency)
ratio = pow_frequency / words_pow
count = np.round(ratio * self.negative_table_size)
for wid, c in enumerate(count):
self.negatives += [wid] * int(c)
self.negatives = np.array(self.negatives)
np.random.shuffle(self.negatives)
def getNegatives(self, target): # TODO check equality with target
size = self.n_negs
response = self.negatives[self.negpos:self.negpos + size]
self.negpos = (self.negpos + size) % len(self.negatives)
if len(response) != size:
response = np.concatenate((response, self.negatives[0:self.negpos]))
while target.tolist() in response:
#print("already seen")
self.negpos = (self.negpos + size) % len(self.negatives)
response = self.negatives[self.negpos:self.negpos + size]
if len(response) != size:
response = np.concatenate((response, self.negatives[0:self.negpos]))
return response
def get_neg_data(batch_size, num, target_inputs, vocab):
neg = np.zeros(num)
print(len(target_inputs))
for j in range(len(target_inputs)):
print(j)
for i in range(len(target_inputs[j])):
delta = random.sample(vocab, num)
#print(target_inputs[i])
#print(delta)
#print(target_inputs[j][i])
while target_inputs[j][i] in delta:
print("sample new")
delta = random.sample(target_inputs[j-1], num)
neg =
|
np.vstack([neg, delta])
|
numpy.vstack
|
# coding: utf-8
# In[14]:
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# In[15]:
import numpy as np
import os
import sys
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
from skimage.transform import rescale, resize
from tqdm import tqdm
import cv2
import random
# In[16]:
from tqdm import tqdm_notebook, tnrange
from itertools import chain
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
from sklearn.model_selection import train_test_split
import tensorflow as tf
import keras
from keras.layers import concatenate, add
from keras.models import Sequential, Model
from keras.layers import Input
from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout
from keras.layers import Dense
from keras.layers import Conv2D
from keras.layers import Conv2DTranspose
from keras.layers import MaxPool2D, AvgPool2D
from keras.layers import UpSampling2D
# from tensorflow.keras.layers.advanced_activations import LeakyReLU
from keras.layers import LeakyReLU
from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import Lambda
from keras.layers import MaxPooling2D, GlobalMaxPool2D
from keras.layers import Flatten
from keras.layers import Reshape
from keras.utils import plot_model
from keras.layers import Add, Multiply
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.losses import mse, binary_crossentropy
from keras.preprocessing.image import ImageDataGenerator
import keras.backend as K
from keras.utils import multi_gpu_model
from keras.optimizers import Adam
from sklearn.metrics import confusion_matrix
from keras import initializers
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
import numpy as np
from keras.models import Model
from keras import layers
from keras.layers import Input
from keras.layers import Activation
from keras.layers import Concatenate
from keras.layers import Add
from keras.layers import Dropout
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import DepthwiseConv2D
from keras.layers import ZeroPadding2D
from keras.layers import AveragePooling2D
from keras.engine import Layer
from keras.engine import InputSpec
from keras.engine.topology import get_source_inputs
from keras import backend as K
from keras.applications import imagenet_utils
from keras.utils import conv_utils
from keras.utils.data_utils import get_file
# from model import Deeplabv3
# In[17]:
# Set some parameters
im_width = 128
im_height = 128
n_channels = 3
border = 5
n_filters=16
dropout=0.05
batchnorm=True
b_size = 16
# In[18]:
path_train = '../../../Dataset/Compaq_orignal/Compaq_orignal/Compaq_orignal/train/'
path_valid = '../../../Dataset/Compaq_orignal/Compaq_orignal/Compaq_orignal/test/'
path_test = '../../../Dataset/NIR_Dataset_New/'
# In[19]:
import cv2
def get_data(train_data_path):
img_size = 128
# train_ids = next(os.walk(train_data_path))[1]
train_ids = next(os.walk(train_data_path + "image/1"))[2]
x_train = []
# x_train = np.zeros((len(train_ids), img_size, img_size, 3), dtype=np.uint8)
y_train = np.zeros((len(train_ids), img_size, img_size, 1), dtype=np.bool)
for i, id_ in tqdm_notebook(enumerate(train_ids), total=len(train_ids)):
path = train_data_path+"image/1"+"/{}".format(id_)
img = cv2.imread(path,1)
img = cv2.resize(img, (img_size, img_size))
img = np.asarray(img) / 127.5
img = img - 1
x_train.append(img)
height, width, _ = img.shape
label = np.zeros((height, width, 1))
path2 = train_data_path+"label/1/"
mask_ = cv2.imread(path2+id_, 0)
mask_ = cv2.resize(mask_, (img_size, img_size))
mask_ = np.expand_dims(mask_, axis=-1)
label = np.maximum(label, mask_)
y_train[i]=label
x_train = np.array(x_train)
return x_train , y_train
X_train, y_train = get_data(path_train)
X_valid , y_valid = get_data(path_valid)
X_test , y_test = get_data(path_test)
# In[20]:
# Check if training data looks all right
ix = random.randint(0, len(X_train))
has_mask = y_train[ix].max() > 0
fig, ax = plt.subplots(1, 2, figsize=(20, 10))
# image = X_train[ix, ... , 0]
image = X_train[ix,:,:,:].reshape(128,128,3)
image = (image + 1 ) / 2
image = image * 255
ax[0].imshow(image.astype('uint8'))
if has_mask:
ax[0].contour(y_train[ix].squeeze(), colors='k', levels=[0.5])
ax[0].set_title('Image')
ax[1].imshow(y_train[ix].squeeze(), interpolation='bilinear', cmap='gray')
ax[1].set_title('Mask');
# In[21]:
#SET A SEED FOR REPRODUCABILITY
np.random.seed(20)
#NUMBER OF DIMENSIONS IN THE ENCODED LAYER
latent_dims = 64
image_size = 128
n_channel = 3
# In[22]:
def edge_comp(image):
edge = tf.image.sobel_edges(image)
edge = concatenate([edge[:,:,:,:,0],edge[:,:,:,:,1]],axis = -1)
print(edge.shape)
return edge
# In[23]:
#ENCODER
#BUILT WITH FUNCTIONAL MODEL DUE TO THE MULTIPLE INPUTS AND OUTPUTS
encoder_in = Input(shape=(image_size,image_size,n_channel),name = 'encoder_input') ##INPUT FOR THE IMAGE
input_edge = Lambda(edge_comp)(encoder_in)
encoder_l1 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same', input_shape=(image_size,image_size,n_channel),kernel_initializer = initializers.TruncatedNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(encoder_in)
# encoder_l1 = BatchNormalization()(encoder_l1)
encoder_l1 = Activation(LeakyReLU(0.2))(encoder_l1)
encoder_l1 = Conv2D(filters=128, kernel_size=3, strides=2, padding='same',kernel_initializer = initializers.TruncatedNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(encoder_l1)
# encoder_l1 = BatchNormalization()(encoder_l1)
encoder_l1 = Activation(LeakyReLU(0.2))(encoder_l1)
encoder_l2 = Conv2D(filters=128, kernel_size=3, strides=2, padding='same',kernel_initializer = initializers.TruncatedNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(encoder_l1)
# encoder_l2 = BatchNormalization()(encoder_l2)
encoder_l2 = Activation(LeakyReLU(0.2))(encoder_l2)
encoder_l3 = Conv2D(filters=128, kernel_size=3, strides=2, padding='same',kernel_initializer = initializers.TruncatedNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(encoder_l2)
# encoder_l3 = BatchNormalization()(encoder_l3)
encoder_l3 = Activation(LeakyReLU(0.2))(encoder_l3)
encoder_l4 = Conv2D(filters=128, kernel_size=3, strides=2, padding='same',kernel_initializer = initializers.TruncatedNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(encoder_l3)
# encoder_l4 = BatchNormalization()(encoder_l4)
encoder_l4 = Activation(LeakyReLU(0.2))(encoder_l4)
encoder_l5 = Conv2D(filters=128, kernel_size=3, strides=2, padding='same',kernel_initializer = initializers.TruncatedNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(encoder_l4)
# encoder_l4 = BatchNormalization()(encoder_l4)
encoder_l5 = Activation(LeakyReLU(0.2))(encoder_l5)
flatten = Flatten()(encoder_l5)
encoder_dense = Dense(1024,kernel_initializer = initializers.RandomNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(flatten)
# encoder_dense = BatchNormalization()(encoder_dense)
encoder_out = Activation(LeakyReLU(0.2))(encoder_dense)
mu = Dense(latent_dims,kernel_initializer = initializers.RandomNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(encoder_out)
log_var = Dense(latent_dims,kernel_initializer = initializers.RandomNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(encoder_out)
epsilon = Input(tensor=K.random_normal(shape=(K.shape(mu)[0], latent_dims))) ##INPUT EPSILON FOR RANDOM SAMPLING
sigma = Lambda(lambda x: K.exp(0.5 * x))(log_var) # CHANGE log_var INTO STANDARD DEVIATION(sigma)
z_eps = Multiply()([sigma, epsilon])
z = Add()([mu, z_eps])
encoder=Model(inputs = [encoder_in,epsilon], outputs =[z,input_edge],name='encoder')
print(encoder.summary())
# In[24]:
## DECODER
# # layer 1
decoder_in = Input(shape=(latent_dims,),name='decoder_input')
decoder_edge = Input(shape = (image_size,image_size,6),name = 'edge_input')
decoder_l1 = Dense(1024, input_shape=(latent_dims,),kernel_initializer = initializers.RandomNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(decoder_in)
# decoder_l1 = BatchNormalization()(decoder_l1)
decoder_l1 = Activation(LeakyReLU(0.2))(decoder_l1)
#layer 2
decoder_l2 = Dense(2048,kernel_initializer = initializers.RandomNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(decoder_l1)
# decoder_l2 = BatchNormalization()(decoder_l2)
decoder_l2 = Activation(LeakyReLU(0.2))(decoder_l2)
#reshape
decoder_reshape = Reshape(target_shape=(4,4,128))(decoder_l2)
# layer 3
decoder_l3 = Conv2DTranspose(filters=128, kernel_size=3, strides=2, padding='same',kernel_initializer = initializers.RandomNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(decoder_reshape)
# decoder_l3 = BatchNormalization()(decoder_l3)
decoder_l3 = Activation(LeakyReLU(0.2))(decoder_l3)
#layer 4
decoder_l4 = Conv2DTranspose(filters=128, kernel_size=3, strides=2, padding='same',kernel_initializer = initializers.RandomNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(decoder_l3)
# decoder_l4 = BatchNormalization()(decoder_l4)
decoder_l4 = Activation(LeakyReLU(0.2))(decoder_l4)
#layer 5
decoder_l5 = Conv2DTranspose(filters=128, kernel_size=3, strides=2, padding='same',kernel_initializer = initializers.RandomNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(decoder_l4)
# decoder_l5 = BatchNormalization()(decoder_l5)
decoder_l5 = Activation(LeakyReLU(0.2))(decoder_l5)
#layer 6
decoder_l6 = Conv2DTranspose(filters=128, kernel_size=3, strides=2, padding='same',kernel_initializer = initializers.RandomNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(decoder_l5)
# decoder_l6 = BatchNormalization()(decoder_l6)
decoder_l6 = Activation(LeakyReLU(0.2))(decoder_l6)
#layer 7
# decoder_l7 = Conv2DTranspose(filters=128, kernel_size=3, strides=2, padding='same',kernel_initializer = initializers.RandomNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(decoder_l6)
# # decoder_l7 = BatchNormalization()(decoder_l7)
# decoder_l7 = Activation(LeakyReLU(0.2))(decoder_l7)
#layer 8
decoder_l8 = Conv2DTranspose(filters=3, kernel_size=3, strides=2, padding='same',kernel_initializer = initializers.RandomNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(decoder_l6)
# decoder_l8 = BatchNormalization()(decoder_l8)
# decoder_l8 = Activation(LeakyReLU(0.2))(decoder_l8)
decoder_l8 = Activation('tanh')(decoder_l8)
decoder_ledge = concatenate([decoder_l8,decoder_edge],axis = -1)
#layer 9
decoder_l9 = Conv2D(filters=128, kernel_size=3, strides=1, padding='same',kernel_initializer = initializers.TruncatedNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(decoder_ledge)
# decoder_l9 = BatchNormalization()(decoder_l9)
decoder_l9 = Activation(LeakyReLU(0.2))(decoder_l9)
decoder_l10 = Conv2D(filters=128, kernel_size=3, strides=1, padding='same',kernel_initializer = initializers.TruncatedNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(decoder_l9)
# decoder_l9 = BatchNormalization()(decoder_l9)
decoder_l10 = Activation(LeakyReLU(0.2))(decoder_l10)
decoder_l11 = Conv2D(filters=3, kernel_size=3, strides=1, padding='same',kernel_initializer = initializers.TruncatedNormal(stddev=0.02),bias_initializer=initializers.Constant(value=0.0))(decoder_l10)
# decoder_l9 = BatchNormalization()(decoder_l9)
decoder_out = Activation('tanh')(decoder_l11)
decoder=Model(inputs = [decoder_in , decoder_edge],outputs = [decoder_out],name='vae_decoder')
print(decoder.summary())
# In[25]:
# orignal DeepLab
WEIGHTS_PATH_X = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_xception_tf_dim_ordering_tf_kernels.h5"
WEIGHTS_PATH_MOBILE = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5"
WEIGHTS_PATH_X_CS = "https://github.com/rdiazgar/keras-deeplab-v3-plus/releases/download/1.2/deeplabv3_xception_tf_dim_ordering_tf_kernels_cityscapes.h5"
WEIGHTS_PATH_MOBILE_CS = "https://github.com/rdiazgar/keras-deeplab-v3-plus/releases/download/1.2/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels_cityscapes.h5"
class BilinearUpsampling(Layer):
"""Just a simple bilinear upsampling layer. Works only with TF.
Args:
upsampling: tuple of 2 numbers > 0. The upsampling ratio for h and w
output_size: used instead of upsampling arg if passed!
"""
def __init__(self, upsampling=(2, 2), output_size=None, data_format=None, **kwargs):
super(BilinearUpsampling, self).__init__(**kwargs)
self.data_format = K.image_data_format()
self.input_spec = InputSpec(ndim=4)
if output_size:
self.output_size = conv_utils.normalize_tuple(
output_size, 2, 'output_size')
self.upsampling = None
else:
self.output_size = None
self.upsampling = conv_utils.normalize_tuple(
upsampling, 2, 'upsampling')
def compute_output_shape(self, input_shape):
if self.upsampling:
height = self.upsampling[0] * input_shape[1] if input_shape[1] is not None else None
width = self.upsampling[1] * input_shape[2] if input_shape[2] is not None else None
else:
height = self.output_size[0]
width = self.output_size[1]
return (input_shape[0],
height,
width,
input_shape[3])
def call(self, inputs):
if self.upsampling:
return K.tf.image.resize_bilinear(inputs, (inputs.shape[1] * self.upsampling[0],
inputs.shape[2] * self.upsampling[1]),
align_corners=True)
else:
return K.tf.image.resize_bilinear(inputs, (self.output_size[0],
self.output_size[1]),
align_corners=True)
def get_config(self):
config = {'upsampling': self.upsampling,
'output_size': self.output_size,
'data_format': self.data_format}
base_config = super(BilinearUpsampling, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3):
""" SepConv with BN between depthwise & pointwise. Optionally add activation after BN
Implements right "same" padding for even kernel sizes
Args:
x: input tensor
filters: num of filters in pointwise convolution
prefix: prefix before name
stride: stride at depthwise conv
kernel_size: kernel size for depthwise convolution
rate: atrous rate for depthwise convolution
depth_activation: flag to use activation between depthwise & poinwise convs
epsilon: epsilon to use in BN layer
"""
if stride == 1:
depth_padding = 'same'
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
depth_padding = 'valid'
if not depth_activation:
x = Activation('relu')(x)
x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),
padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)
x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
x = Conv2D(filters, (1, 1), padding='same',
use_bias=False, name=prefix + '_pointwise')(x)
x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
return x
def _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1):
"""Implements right 'same' padding for even kernel sizes
Without this there is a 1 pixel drift when stride = 2
Args:
x: input tensor
filters: num of filters in pointwise convolution
prefix: prefix before name
stride: stride at depthwise conv
kernel_size: kernel size for depthwise convolution
rate: atrous rate for depthwise convolution
"""
if stride == 1:
return Conv2D(filters,
(kernel_size, kernel_size),
strides=(stride, stride),
padding='same', use_bias=False,
dilation_rate=(rate, rate),
name=prefix)(x)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
return Conv2D(filters,
(kernel_size, kernel_size),
strides=(stride, stride),
padding='valid', use_bias=False,
dilation_rate=(rate, rate),
name=prefix)(x)
def _xception_block(inputs, depth_list, prefix, skip_connection_type, stride,
rate=1, depth_activation=False, return_skip=False):
""" Basic building block of modified Xception network
Args:
inputs: input tensor
depth_list: number of filters in each SepConv layer. len(depth_list) == 3
prefix: prefix before name
skip_connection_type: one of {'conv','sum','none'}
stride: stride at last depthwise conv
rate: atrous rate for depthwise convolution
depth_activation: flag to use activation between depthwise & pointwise convs
return_skip: flag to return additional tensor after 2 SepConvs for decoder
"""
residual = inputs
for i in range(3):
residual = SepConv_BN(residual,
depth_list[i],
prefix + '_separable_conv{}'.format(i + 1),
stride=stride if i == 2 else 1,
rate=rate,
depth_activation=depth_activation)
if i == 1:
skip = residual
if skip_connection_type == 'conv':
shortcut = _conv2d_same(inputs, depth_list[-1], prefix + '_shortcut',
kernel_size=1,
stride=stride)
shortcut = BatchNormalization(name=prefix + '_shortcut_BN')(shortcut)
outputs = layers.add([residual, shortcut])
elif skip_connection_type == 'sum':
outputs = layers.add([residual, inputs])
elif skip_connection_type == 'none':
outputs = residual
if return_skip:
return outputs, skip
else:
return outputs
def relu6(x):
return K.relu(x, max_value=6)
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1):
in_channels = inputs._keras_shape[-1]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = 'expanded_conv_{}_'.format(block_id)
if block_id:
# Expand
x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',
use_bias=False, activation=None,
name=prefix + 'expand')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'expand_BN')(x)
x = Activation(relu6, name=prefix + 'expand_relu')(x)
else:
prefix = 'expanded_conv_'
# Depthwise
x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,
use_bias=False, padding='same', dilation_rate=(rate, rate),
name=prefix + 'depthwise')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'depthwise_BN')(x)
x = Activation(relu6, name=prefix + 'depthwise_relu')(x)
# Project
x = Conv2D(pointwise_filters,
kernel_size=1, padding='same', use_bias=False, activation=None,
name=prefix + 'project')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'project_BN')(x)
if skip_connection:
return Add(name=prefix + 'add')([inputs, x])
# if in_channels == pointwise_filters and stride == 1:
# return Add(name='res_connect_' + str(block_id))([inputs, x])
return x
# Deeplabs start here
input_shape = (128,128,3)
input_tensor = None
weights = 'None'
classes = 1
backbone = 'xception'
OS = 16
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backbone == 'xception':
if OS == 8:
entry_block3_stride = 1
middle_block_rate = 2 # ! Not mentioned in paper, but required
exit_block_rates = (2, 4)
atrous_rates = (12, 24, 36)
else:
entry_block3_stride = 2
middle_block_rate = 1
exit_block_rates = (1, 2)
atrous_rates = (6, 12, 18)
x = Conv2D(32, (3, 3), strides=(2, 2),
name='entry_flow_conv1_1', use_bias=False, padding='same')(img_input)
x = BatchNormalization(name='entry_flow_conv1_1_BN')(x)
x = Activation('relu')(x)
x = _conv2d_same(x, 64, 'entry_flow_conv1_2', kernel_size=3, stride=1)
x = BatchNormalization(name='entry_flow_conv1_2_BN')(x)
x = Activation('relu')(x)
x = _xception_block(x, [128, 128, 128], 'entry_flow_block1',
skip_connection_type='conv', stride=2,
depth_activation=False)
x, skip1 = _xception_block(x, [256, 256, 256], 'entry_flow_block2',
skip_connection_type='conv', stride=2,
depth_activation=False, return_skip=True)
x = _xception_block(x, [728, 728, 728], 'entry_flow_block3',
skip_connection_type='conv', stride=entry_block3_stride,
depth_activation=False)
for i in range(16):
x = _xception_block(x, [728, 728, 728], 'middle_flow_unit_{}'.format(i + 1),
skip_connection_type='sum', stride=1, rate=middle_block_rate,
depth_activation=False)
x = _xception_block(x, [728, 1024, 1024], 'exit_flow_block1',
skip_connection_type='conv', stride=1, rate=exit_block_rates[0],
depth_activation=False)
x = _xception_block(x, [1536, 1536, 2048], 'exit_flow_block2',
skip_connection_type='none', stride=1, rate=exit_block_rates[1],
depth_activation=True)
else:
OS = 8
first_block_filters = _make_divisible(32 * alpha, 8)
x = Conv2D(first_block_filters,
kernel_size=3,
strides=(2, 2), padding='same',
use_bias=False, name='Conv')(img_input)
x = BatchNormalization(
epsilon=1e-3, momentum=0.999, name='Conv_BN')(x)
x = Activation(relu6, name='Conv_Relu6')(x)
x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,
expansion=1, block_id=0, skip_connection=False)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
expansion=6, block_id=1, skip_connection=False)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
expansion=6, block_id=2, skip_connection=True)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
expansion=6, block_id=3, skip_connection=False)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=4, skip_connection=True)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=5, skip_connection=True)
# stride in block 6 changed from 2 -> 1, so we need to use rate = 2
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, # 1!
expansion=6, block_id=6, skip_connection=False)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=7, skip_connection=True)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=8, skip_connection=True)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=9, skip_connection=True)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=10, skip_connection=False)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=11, skip_connection=True)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=12, skip_connection=True)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=2, # 1!
expansion=6, block_id=13, skip_connection=False)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=14, skip_connection=True)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=15, skip_connection=True)
x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=16, skip_connection=False)
# end of feature extractor
# branching for Atrous Spatial Pyramid Pooling
# Image Feature branch
#out_shape = int(np.ceil(input_shape[0] / OS))
b4 = AveragePooling2D(pool_size=(int(np.ceil(input_shape[0] / OS)), int(np.ceil(input_shape[1] / OS))))(x)
b4 = Conv2D(256, (1, 1), padding='same',
use_bias=False, name='image_pooling')(b4)
b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4)
b4 = Activation('relu')(b4)
b4 = BilinearUpsampling((int(np.ceil(input_shape[0] / OS)), int(np.ceil(input_shape[1] / OS))))(b4)
# simple 1x1
b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='aspp0')(x)
b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0)
b0 = Activation('relu', name='aspp0_activation')(b0)
# there are only 2 branches in mobilenetV2. not sure why
if backbone == 'xception':
# rate = 6 (12)
b1 = SepConv_BN(x, 256, 'aspp1',
rate=atrous_rates[0], depth_activation=True, epsilon=1e-5)
# rate = 12 (24)
b2 = SepConv_BN(x, 256, 'aspp2',
rate=atrous_rates[1], depth_activation=True, epsilon=1e-5)
# rate = 18 (36)
b3 = SepConv_BN(x, 256, 'aspp3',
rate=atrous_rates[2], depth_activation=True, epsilon=1e-5)
# concatenate ASPP branches & project
x = Concatenate()([b4, b0, b1, b2, b3])
else:
x = Concatenate()([b4, b0])
x = Conv2D(256, (1, 1), padding='same',
use_bias=False, name='concat_projection')(x)
x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)
x = Activation('relu')(x)
x = Dropout(0.1)(x)
# DeepLab v.3+ decoder
if backbone == 'xception':
# Feature projection
# x4 (x2) block
x = BilinearUpsampling(output_size=(int(np.ceil(input_shape[0] / 4)),
int(np.ceil(input_shape[1] / 4))))(x)
dec_skip1 = Conv2D(48, (1, 1), padding='same',
use_bias=False, name='feature_projection0')(skip1)
dec_skip1 = BatchNormalization(
name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)
dec_skip1 = Activation('relu')(dec_skip1)
x_pred = Concatenate()([x, dec_skip1])
x = SepConv_BN(x_pred, 256, 'decoder_conv0',
depth_activation=True, epsilon=1e-5)
x = SepConv_BN(x, 256, 'decoder_conv1',
depth_activation=True, epsilon=1e-5)
# you can use it with arbitary number of classes
if classes == 21:
last_layer_name = 'logits_semantic'
else:
last_layer_name = 'custom_logits_semantic'
x = Conv2D(classes, (1, 1), padding='same', name=last_layer_name,activation="sigmoid")(x)
x = BilinearUpsampling(output_size=(input_shape[0], input_shape[1]))(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
Deeplab = Model(inputs, outputs = [x_pred, x], name='deeplab')
Deeplab.summary()
# load weights
if weights == 'pascal_voc':
if backbone == 'xception':
weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH_X,
cache_subdir='models')
else:
weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH_MOBILE,
cache_subdir='models')
Deeplab.load_weights(weights_path, by_name=True)
elif weights == 'cityscapes':
if backbone == 'xception':
weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels_cityscapes.h5',
WEIGHTS_PATH_X_CS,
cache_subdir='models')
else:
weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels_cityscapes.h5',
WEIGHTS_PATH_MOBILE_CS,
cache_subdir='models')
Deeplab.load_weights(weights_path, by_name=True)
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Input array scaled to [-1.,1.]
"""
return imagenet_utils.preprocess_input(x, mode='tf')
# from model import Deeplabv3
# In[26]:
# Prep DeepLab
WEIGHTS_PATH_X = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_xception_tf_dim_ordering_tf_kernels.h5"
WEIGHTS_PATH_MOBILE = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5"
WEIGHTS_PATH_X_CS = "https://github.com/rdiazgar/keras-deeplab-v3-plus/releases/download/1.2/deeplabv3_xception_tf_dim_ordering_tf_kernels_cityscapes.h5"
WEIGHTS_PATH_MOBILE_CS = "https://github.com/rdiazgar/keras-deeplab-v3-plus/releases/download/1.2/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels_cityscapes.h5"
class BilinearUpsampling(Layer):
"""Just a simple bilinear upsampling layer. Works only with TF.
Args:
upsampling: tuple of 2 numbers > 0. The upsampling ratio for h and w
output_size: used instead of upsampling arg if passed!
"""
def __init__(self, upsampling=(2, 2), output_size=None, data_format=None, **kwargs):
super(BilinearUpsampling, self).__init__(**kwargs)
self.data_format = K.image_data_format()
self.input_spec = InputSpec(ndim=4)
if output_size:
self.output_size = conv_utils.normalize_tuple(
output_size, 2, 'output_size')
self.upsampling = None
else:
self.output_size = None
self.upsampling = conv_utils.normalize_tuple(
upsampling, 2, 'upsampling')
def compute_output_shape(self, input_shape):
if self.upsampling:
height = self.upsampling[0] * input_shape[1] if input_shape[1] is not None else None
width = self.upsampling[1] * input_shape[2] if input_shape[2] is not None else None
else:
height = self.output_size[0]
width = self.output_size[1]
return (input_shape[0],
height,
width,
input_shape[3])
def call(self, inputs):
if self.upsampling:
return K.tf.image.resize_bilinear(inputs, (inputs.shape[1] * self.upsampling[0],
inputs.shape[2] * self.upsampling[1]),
align_corners=True)
else:
return K.tf.image.resize_bilinear(inputs, (self.output_size[0],
self.output_size[1]),
align_corners=True)
def get_config(self):
config = {'upsampling': self.upsampling,
'output_size': self.output_size,
'data_format': self.data_format}
base_config = super(BilinearUpsampling, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3):
""" SepConv with BN between depthwise & pointwise. Optionally add activation after BN
Implements right "same" padding for even kernel sizes
Args:
x: input tensor
filters: num of filters in pointwise convolution
prefix: prefix before name
stride: stride at depthwise conv
kernel_size: kernel size for depthwise convolution
rate: atrous rate for depthwise convolution
depth_activation: flag to use activation between depthwise & poinwise convs
epsilon: epsilon to use in BN layer
"""
if stride == 1:
depth_padding = 'same'
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
depth_padding = 'valid'
if not depth_activation:
x = Activation('relu')(x)
x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),
padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)
x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
x = Conv2D(filters, (1, 1), padding='same',
use_bias=False, name=prefix + '_pointwise')(x)
x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
return x
def _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1):
"""Implements right 'same' padding for even kernel sizes
Without this there is a 1 pixel drift when stride = 2
Args:
x: input tensor
filters: num of filters in pointwise convolution
prefix: prefix before name
stride: stride at depthwise conv
kernel_size: kernel size for depthwise convolution
rate: atrous rate for depthwise convolution
"""
if stride == 1:
return Conv2D(filters,
(kernel_size, kernel_size),
strides=(stride, stride),
padding='same', use_bias=False,
dilation_rate=(rate, rate),
name=prefix)(x)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
return Conv2D(filters,
(kernel_size, kernel_size),
strides=(stride, stride),
padding='valid', use_bias=False,
dilation_rate=(rate, rate),
name=prefix)(x)
def _xception_block(inputs, depth_list, prefix, skip_connection_type, stride,
rate=1, depth_activation=False, return_skip=False):
""" Basic building block of modified Xception network
Args:
inputs: input tensor
depth_list: number of filters in each SepConv layer. len(depth_list) == 3
prefix: prefix before name
skip_connection_type: one of {'conv','sum','none'}
stride: stride at last depthwise conv
rate: atrous rate for depthwise convolution
depth_activation: flag to use activation between depthwise & pointwise convs
return_skip: flag to return additional tensor after 2 SepConvs for decoder
"""
residual = inputs
for i in range(3):
residual = SepConv_BN(residual,
depth_list[i],
prefix + '_separable_conv{}'.format(i + 1),
stride=stride if i == 2 else 1,
rate=rate,
depth_activation=depth_activation)
if i == 1:
skip = residual
if skip_connection_type == 'conv':
shortcut = _conv2d_same(inputs, depth_list[-1], prefix + '_shortcut',
kernel_size=1,
stride=stride)
shortcut = BatchNormalization(name=prefix + '_shortcut_BN')(shortcut)
outputs = layers.add([residual, shortcut])
elif skip_connection_type == 'sum':
outputs = layers.add([residual, inputs])
elif skip_connection_type == 'none':
outputs = residual
if return_skip:
return outputs, skip
else:
return outputs
def relu6(x):
return K.relu(x, max_value=6)
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1):
in_channels = inputs._keras_shape[-1]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = 'expanded_conv_{}_'.format(block_id)
if block_id:
# Expand
x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',
use_bias=False, activation=None,
name=prefix + 'expand')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'expand_BN')(x)
x = Activation(relu6, name=prefix + 'expand_relu')(x)
else:
prefix = 'expanded_conv_'
# Depthwise
x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,
use_bias=False, padding='same', dilation_rate=(rate, rate),
name=prefix + 'depthwise')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'depthwise_BN')(x)
x = Activation(relu6, name=prefix + 'depthwise_relu')(x)
# Project
x = Conv2D(pointwise_filters,
kernel_size=1, padding='same', use_bias=False, activation=None,
name=prefix + 'project')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'project_BN')(x)
if skip_connection:
return Add(name=prefix + 'add')([inputs, x])
# if in_channels == pointwise_filters and stride == 1:
# return Add(name='res_connect_' + str(block_id))([inputs, x])
return x
# Deeplabs start here
input_shape = (128,128,3)
input_tensor = None
weights = 'None'
classes = 1
backbone = 'xception'
OS = 16
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backbone == 'xception':
if OS == 8:
entry_block3_stride = 1
middle_block_rate = 2 # ! Not mentioned in paper, but required
exit_block_rates = (2, 4)
atrous_rates = (12, 24, 36)
else:
entry_block3_stride = 2
middle_block_rate = 1
exit_block_rates = (1, 2)
atrous_rates = (6, 12, 18)
x = Conv2D(32, (3, 3), strides=(2, 2),
name='entry_flow_conv1_1', use_bias=False, padding='same')(img_input)
x = BatchNormalization(name='entry_flow_conv1_1_BN')(x)
x = Activation('relu')(x)
x = _conv2d_same(x, 64, 'entry_flow_conv1_2', kernel_size=3, stride=1)
x = BatchNormalization(name='entry_flow_conv1_2_BN')(x)
x = Activation('relu')(x)
x = _xception_block(x, [128, 128, 128], 'entry_flow_block1',
skip_connection_type='conv', stride=2,
depth_activation=False)
x, skip1 = _xception_block(x, [256, 256, 256], 'entry_flow_block2',
skip_connection_type='conv', stride=2,
depth_activation=False, return_skip=True)
x = _xception_block(x, [728, 728, 728], 'entry_flow_block3',
skip_connection_type='conv', stride=entry_block3_stride,
depth_activation=False)
for i in range(16):
x = _xception_block(x, [728, 728, 728], 'middle_flow_unit_{}'.format(i + 1),
skip_connection_type='sum', stride=1, rate=middle_block_rate,
depth_activation=False)
x = _xception_block(x, [728, 1024, 1024], 'exit_flow_block1',
skip_connection_type='conv', stride=1, rate=exit_block_rates[0],
depth_activation=False)
x = _xception_block(x, [1536, 1536, 2048], 'exit_flow_block2',
skip_connection_type='none', stride=1, rate=exit_block_rates[1],
depth_activation=True)
else:
OS = 8
first_block_filters = _make_divisible(32 * alpha, 8)
x = Conv2D(first_block_filters,
kernel_size=3,
strides=(2, 2), padding='same',
use_bias=False, name='Conv')(img_input)
x = BatchNormalization(
epsilon=1e-3, momentum=0.999, name='Conv_BN')(x)
x = Activation(relu6, name='Conv_Relu6')(x)
x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,
expansion=1, block_id=0, skip_connection=False)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
expansion=6, block_id=1, skip_connection=False)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
expansion=6, block_id=2, skip_connection=True)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
expansion=6, block_id=3, skip_connection=False)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=4, skip_connection=True)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=5, skip_connection=True)
# stride in block 6 changed from 2 -> 1, so we need to use rate = 2
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, # 1!
expansion=6, block_id=6, skip_connection=False)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=7, skip_connection=True)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=8, skip_connection=True)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=9, skip_connection=True)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=10, skip_connection=False)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=11, skip_connection=True)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=12, skip_connection=True)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=2, # 1!
expansion=6, block_id=13, skip_connection=False)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=14, skip_connection=True)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=15, skip_connection=True)
x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=16, skip_connection=False)
# end of feature extractor
# branching for Atrous Spatial Pyramid Pooling
# Image Feature branch
#out_shape = int(np.ceil(input_shape[0] / OS))
b4 = AveragePooling2D(pool_size=(int(np.ceil(input_shape[0] / OS)), int(np.ceil(input_shape[1] / OS))))(x)
b4 = Conv2D(256, (1, 1), padding='same',
use_bias=False, name='image_pooling')(b4)
b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4)
b4 = Activation('relu')(b4)
b4 = BilinearUpsampling((int(np.ceil(input_shape[0] / OS)), int(np.ceil(input_shape[1] / OS))))(b4)
# simple 1x1
b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='aspp0')(x)
b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0)
b0 = Activation('relu', name='aspp0_activation')(b0)
# there are only 2 branches in mobilenetV2. not sure why
if backbone == 'xception':
# rate = 6 (12)
b1 = SepConv_BN(x, 256, 'aspp1',
rate=atrous_rates[0], depth_activation=True, epsilon=1e-5)
# rate = 12 (24)
b2 = SepConv_BN(x, 256, 'aspp2',
rate=atrous_rates[1], depth_activation=True, epsilon=1e-5)
# rate = 18 (36)
b3 = SepConv_BN(x, 256, 'aspp3',
rate=atrous_rates[2], depth_activation=True, epsilon=1e-5)
# concatenate ASPP branches & project
x = Concatenate()([b4, b0, b1, b2, b3])
else:
x = Concatenate()([b4, b0])
x = Conv2D(256, (1, 1), padding='same',
use_bias=False, name='concat_projection')(x)
x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)
x = Activation('relu')(x)
x = Dropout(0.1)(x)
# DeepLab v.3+ decoder
if backbone == 'xception':
# Feature projection
# x4 (x2) block
x = BilinearUpsampling(output_size=(int(np.ceil(input_shape[0] / 4)),
int(
|
np.ceil(input_shape[1] / 4)
|
numpy.ceil
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------------------------------------------------
# ROS-MAGNA
# ----------------------------------------------------------------------------------------------------------------------
# The MIT License (MIT)
# Copyright (c) 2016 GRVC University of Seville
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ----------------------------------------------------------------------------------------------------------------------
"""
Created on Mon Feb 21 2018
@author: josmilrom
"""
import sys
import rospy
import std_msgs.msg
import time
import math
import numpy as np
import tf, tf2_ros
import json
import copy
import random
import rospkg
from std_msgs.msg import Header, ColorRGBA
from geometry_msgs.msg import *
from sensor_msgs.msg import *
from xml.dom import minidom
from gazebo_msgs.srv import DeleteModel,SpawnModel
from visualization_msgs.msg import Marker
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray, TorusArray, PolygonArray
from jsk_recognition_msgs.msg import Torus as jsk_Torus
from sympy import Point3D, Line3D, Segment3D
from sympy import Point as Point2D
from sympy import Polygon as Polygon2D
import xml.etree.ElementTree
from magna.srv import *
from PoseElements import *
class GenericGeometry:
def __init__(self,geometry_def,parent_name,parent_prefix,transforms_list):
self.name = parent_prefix + '_' + geometry_def["name"]
self.prefix = parent_prefix + '_' + geometry_def["prefix"]
self.shape = geometry_def["shape"]
self.origin = geometry_def["origin"]
self.color = geometry_def["color"]
self.alpha = geometry_def["alpha"]
self.id = geometry_def["id"]
self.dimensions = geometry_def["dimensions"]
self.parent_name = parent_name
self.transforms_auxiliar_list = copy.deepcopy(transforms_list)
# print("all",len(self.transforms_auxiliar_list))
self.transforms_list = copy.deepcopy(transforms_list)
# print("persistent",len(self.transforms_list))
self.n_obs = 0
self.obs_pose_list = []
self.obs_list = []
self.obs_transforms_list = []
self.obs_shape_list = []
self.obstacles_dicc = {}
self.fsp_dicc = {"List": [], "Matrix": [], "Path" : {}}
tfbroadcaster = StaticTfBroadcaster(self.name,self.parent_name,self.origin,self.transforms_auxiliar_list)
self.transforms_auxiliar_list = tfbroadcaster.getTransforms()
# print("all",len(self.transforms_auxiliar_list))
self.transforms_list.append(tfbroadcaster.getTransforms()[-1])
# print("persistent",len(self.transforms_list))
time.sleep(0.05)
def MakeRvizMarker(self):
self.rviz_marker_def = {"shape" : self.shape}
self.rviz_marker_def["origin"] = self.origin
self.rviz_marker_def["parent_name"] = self.parent_name
self.rviz_marker_def["name"] = self.name
self.rviz_marker_def["id"] = self.id
self.rviz_marker_def["scale"] = self.dimensions
color = self.color
color.append(self.alpha)
self.rviz_marker_def["color"] = color
self.rviz_marker = RvizMarker(self.rviz_marker_def)
def EraseRvizMarker(self):
self.rviz_marker.Erase()
def MakeRvizPolygonArray(self):
self.rviz_polygon_array_def = {"parent_name" : self.parent_name}
self.rviz_polygon_array_def["name"] = self.name
self.rviz_polygon_array_def["id"] = self.id
self.rviz_polygon_array_def["polygon_array_poses"] = self.polygon_array_poses
self.rviz_polyghon_array = RvizPolygonArray(self.rviz_polygon_array_def)
def MakeRvizTrousArray(self):
self.rviz_torus_array_def = {"parent_name" : self.parent_name}
self.rviz_torus_array_def["name"] = self.name
self.rviz_torus_array_def["id"] = self.id
self.rviz_torus_array_def["torus_array_poses"] = [[False,self.PoseFromArray(self.origin),self.dimensions[0]/2,self.dimensions[2]/2]]
self.rviz_polyghon_array = RvizTorusArray(self.rviz_torus_array_def)
def EraseRvizPolygonArray(self):
self.rviz_polyghon_array.Erase()
def getTransforms(self):
# print("persistent",len(self.transforms_list))
return self.transforms_list
def getObstacles(self,indexes):
indexes = [int(x) for x in indexes]
return self.obstacles[indexes[0]][indexes[1]][indexes[2]]
def getFSPGlobalPosefromMatrix(self,indexes):
indexes = [int(x) for x in indexes]
return [self.fsp_dicc["Matrix"][indexes[0]][indexes[1]][indexes[2]].global_pose]
# def getFSPGlobalPosefromList(self):
# pose_def = {"use": "poses","quantity": 1}
# self.GeneratePosesSetRandom(pose_def)
# return self.fsp_dicc["List"][-1].global_pose
def genFSPRandomGlobalPoseList(self,quantity):
pose_def = {"use": "poses","quantity": quantity}
self.GeneratePosesSetRandom(pose_def)
return self.getFSPGlobalPosefromList(self.fsp_dicc["List"][-quantity:])
def getFSPGlobalPosefromPath(self,path_name):
return self.getFSPGlobalPosefromList(self.fsp_dicc["Path"][path_name])
def getFSPGlobalPosefromList(self,fsp_list):
global_path = []
for pose in fsp_list:
global_path.append(pose.global_pose)
return global_path
def getFSPGlobalPosefromCoordinates(self,coordinates):
return [self.GenerateFreeSpacePosesFromCoordinates(coordinates).global_pose]
def RawDensityMatrix(self,poses_set_def):
# Initializes a tensor with random values and shaped as the obstacle tube
random_raw_matrix = np.random.rand(poses_set_def["dimensions"][0],poses_set_def["dimensions"][1],poses_set_def["dimensions"][2])
# If the random value is lower than the density, set its obstacle tensor's position at True
selected_positions_matrix = random_raw_matrix<=poses_set_def["density"]
return selected_positions_matrix
def GenerateObstacleFromMatrix(self,selected_positions_matrix,poses_matrix,shapes,dimensions):
obs_list = copy.deepcopy(selected_positions_matrix.tolist())
for i in np.arange(selected_positions_matrix.shape[0]):
for j in np.arange(selected_positions_matrix.shape[1]):
for k in np.arange(selected_positions_matrix.shape[2]):
# For every obstacled that has been created
if selected_positions_matrix[i,j,k] == True:
shape = random.choice(shapes)
self.obs_shape_list.append(shape) # Add its shape to the list
# Add the obstacle object to the list of obstacles. During its initialization are already spawned
obs_list[i][j][k] = Obstacle('{0}_{1}_{2}'.format(i,j,k),\
shape,\
dimensions,\
poses_matrix[i][j][k],\
self.name,\
self.prefix,\
self.transforms_auxiliar_list)
self.transforms_auxiliar_list = obs_list[i][j][k].getTransforms()
# print("all",len(self.transforms_auxiliar_list))
# self.obs_pose_list.append([np.asarray(self.obs_list.point),np.asarray(self.obs_list.quaternion)])
# Add also its single list pose to the list of single list poses
self.obs_pose_list.append([[float(obs_list[i][j][k].global_pose.position.x),float(obs_list[i][j][k].global_pose.position.y),float(obs_list[i][j][k].global_pose.position.z)],[0,0,0,1]])
self.n_obs=self.n_obs+1 # Actualize the counter of created obstacles
return obs_list
def GenerateObstacleFromPosesList(self,selected_positions,shapes,dimensions):
obs_list = []
for i,pose in enumerate(selected_positions):
shape = random.choice(shapes)
self.obs_shape_list.append(shape)
# Add the obstacle object to the list of obstacles. During its initialization are already spawned
obs_list.append(Obstacle(str(i),\
shape,\
dimensions,\
pose,\
self.name,\
self.prefix,\
self.transforms_auxiliar_list))
self.obs_pose_list.append([[float(obs_list[-1].global_pose.position.x),float(obs_list[-1].global_pose.position.y),float(obs_list[-1].global_pose.position.z)],[0,0,0,1]])
self.n_obs=self.n_obs+1 # Actualize the counter of created obstacles
return obs_list
def GenerateFreeSpacePosesFromMatrix(self,selected_positions_matrix,poses_matrix,make_persistent = True):
fsposes_list = copy.deepcopy(selected_positions_matrix.tolist())
for i in np.arange(selected_positions_matrix.shape[0]):
for j in np.arange(selected_positions_matrix.shape[1]):
for k in np.arange(selected_positions_matrix.shape[2]):
# Add the obstacle object to the list of obstacles. During its initialization are already spawned
fsposes_list[i][j][k] = FreeSpacePose('{0}_{1}_{2}'.format(i,j,k),\
poses_matrix[i][j][k],\
self.name,\
self.prefix,\
self.transforms_auxiliar_list)
self.transforms_auxiliar_list = fsposes_list[i][j][k].getTransforms()
# print("all",len(self.transforms_auxiliar_list))
if make_persistent == True:
self.transforms_list.append(fsposes_list[i][j][k].getTransforms()[-1])
# print("persistent",len(self.transforms_list))
return fsposes_list
def GenerateFreeSpacePosesFromPosesList(self,selected_positions,make_persistent = True):
if self.fsp_dicc["List"] != []:
init_i = len(self.fsp_dicc["List"])-1
else:
init_i = 0
fsp_list = []
for i,pose in enumerate(selected_positions):
fsp_list.append(FreeSpacePose(str(init_i+i), pose, self.name, self.prefix, self.transforms_auxiliar_list))
self.transforms_auxiliar_list = fsp_list[-1].getTransforms()
# print("all",len(self.transforms_auxiliar_list))
if make_persistent == True:
self.transforms_list.append(fsp_list[-1].getTransforms()[-1])
# print("persistent",len(self.transforms_list))
return fsp_list
def GenerateFreeSpacePosesFromCoordinates(self,coordinates):
return FreeSpacePose(np.random.randint(100),self.PoseFromArray(coordinates),self.name,self.prefix,self.transforms_auxiliar_list)
def GeneratePosesSetDimensionMatrix(self,poses_set_def,make_persistent = True):
if poses_set_def["matrix_type"] == "dimension":
selected_positions_matrix = self.RawDensityMatrix(poses_set_def)
poses_matrix = self.PosesDimensionMatrix(poses_set_def,selected_positions_matrix)
elif poses_set_def["matrix_type"] == "distance":
poses_matrix, selected_positions_matrix = self.PosesDistanceMatrix(poses_set_def)
if poses_set_def["use"] == "obstacles":
self.obstacles = self.GenerateObstacleFromMatrix(selected_positions_matrix,poses_matrix,poses_set_def["obstacles_shape"],poses_set_def["obstacles_dimensions"])
elif poses_set_def["use"] == "poses":
self.fsp_dicc["Matrix"] = self.GenerateFreeSpacePosesFromMatrix(selected_positions_matrix,poses_matrix,make_persistent)
def GeneratePosesSetRandom(self,poses_set_def):
if "orienation" in poses_set_def.keys():
random_poses = self.RandomPoses(poses_set_def["quantity"],poses_set_def["orientation"])
else:
random_poses = self.RandomPoses(poses_set_def["quantity"])
if poses_set_def["use"] == "obstacles":
self.obstacles = self.GenerateObstacleFromPosesList(random_poses,poses_set_def["obstacles_shape"],poses_set_def["obstacles_dimensions"])
elif poses_set_def["use"] == "poses":
self.fsp_dicc["List"] = self.fsp_dicc["List"] + self.GenerateFreeSpacePosesFromPosesList(random_poses)
def GeneratePosesSetCoordinates(self,poses_set_def):
poses_list = [self.PoseFromArray(coordinates) for coordinates in poses_set_def["coordinates"]]
# for coordinates in poses_set_def["coordinates"]:
# poses_list.append(self.PoseFromArray(coordinates))
if poses_set_def["use"] == "obstacles":
self.obstacles = self.GenerateObstacleFromPosesList(poses_list,poses_set_def["obstacles_shape"],poses_set_def["obstacles_dimensions"])
elif poses_set_def["use"] == "poses":
self.fsp_dicc["List"] = self.fsp_dicc["List"] + self.GenerateFreeSpacePosesFromPosesList(poses_list)
return poses_list
def GeneratePosesSetZigZag(self,poses_set_def):
poses = self.ZigZagOnPerimeter(self.base_vertexes_pose_list,poses_set_def["height"],poses_set_def["sweep_angle"],poses_set_def["spacing"],poses_set_def["margins"],poses_set_def["initial_sense"])
if poses_set_def["use"] == "obstacles":
self.obstacles = self.GenerateObstacleFromPosesList(poses,poses_set_def["obstacles_shape"],poses_set_def["obstacles_dimensions"])
elif poses_set_def["use"] == "poses":
fsp_poses_list = self.GenerateFreeSpacePosesFromPosesList(poses)
self.fsp_dicc["List"] = self.fsp_dicc["List"] + fsp_poses_list
self.fsp_dicc["Path"]["Zigzag"] = fsp_poses_list
def GenerateRandomDimensionalValues(self, limits):
return np.random.uniform(limits["lower"],limits["upper"])
def PoseFromArray(self,Array):
quat = tf.transformations.quaternion_from_euler(Array[1][0],Array[1][1],Array[1][2])
return Pose(Point(Array[0][0],Array[0][1],Array[0][2]),Quaternion(quat[0],quat[1],quat[2],quat[3]))
def ArrayFromPose(self,pose):
euler = tf.transformations.euler_from_quaternion([pose.orientation.x,pose.orientation.y,pose.orientation.z,pose.orientation.w])
return [[pose.position.x,pose.position.y,pose.position.z],[euler[0],euler[1],euler[2]]]
class Cube(GenericGeometry):
def __init__(self,geometry_def,parent_name,parent_prefix,transforms_list):
GenericGeometry.__init__(self,geometry_def,parent_name,parent_prefix,transforms_list)
self.MakeRvizMarker()
if "poses_sets" in geometry_def.keys():
for poses_set_def in geometry_def["poses_sets"]:
if poses_set_def["type"] == "coordinates":
self.GeneratePosesSetCoordinates(poses_set_def)
elif poses_set_def["type"] == "matrix":
self.GeneratePosesSetDimensionMatrix(poses_set_def)
elif poses_set_def["type"] == "random":
self.GeneratePosesSetRandom(poses_set_def)
elif poses_set_def["type"] == "zigzag":
self.GeneratePosesSetZigZag(poses_set_def)
def PosesDimensionMatrix(self,poses_set_def,selected_positions_matrix):
# Create a list shaped as the obstacle tube
poses_matrix = copy.deepcopy(selected_positions_matrix.tolist()) ### HACERLO MAS FACIL SIN PASAR selected_positions_matrix
# self.obstacles_matrix[obstacle_positions] = Obstacle(np.arange(obstacle_positions.sum),"sphere",list(obstacle_positions)*2,[0,0,0,1]) SIMPLIFICAR ASÏ
# For every position in the three dimesions
for i in np.arange(poses_set_def["dimensions"][0]):
for j in np.arange(poses_set_def["dimensions"][1]):
for k in
|
np.arange(poses_set_def["dimensions"][2])
|
numpy.arange
|
import test_helpers
import numpy as np
import pytest
from trecs.components import Users, Items
from trecs.models import SocialFiltering, ContentFiltering, PopularityRecommender, BassModel
from trecs.metrics import (
InteractionSpread,
MSEMeasurement,
DiffusionTreeMeasurement,
StructuralVirality,
InteractionMeasurement,
RecSimilarity,
InteractionSimilarity,
AverageFeatureScoreRange,
RMSEMeasurement,
)
class MeasurementUtils:
@classmethod
def assert_valid_length(self, measurements, timesteps):
# there are as many states as the timesteps for which we ran the
# system, plus one to account for the initial state
for _, value in measurements.items():
assert len(value) == timesteps + 1
@classmethod
def assert_valid_final_measurements(
self, measurements, model_attribute, key_mappings, timesteps
):
for key, value in key_mappings.items():
if key in measurements.keys():
assert np.array_equal(measurements[key][timesteps], value)
else:
assert value not in model_attribute
@classmethod
def test_generic_metric(self, model, metric, timesteps):
if metric not in model.metrics:
model.add_metrics(metric)
assert metric in model.metrics
for t in range(1, timesteps + 1):
model.run(timesteps=1)
measurements = model.get_measurements()
self.assert_valid_length(measurements, t)
class TestMeasurementModule:
"""Test basic functionalities of MeasurementModule"""
def test_measurement_module(self):
# Create model, e.g., SocialFiltering
s = SocialFiltering()
# Add Interaction Spread
old_metrics = s.metrics.copy()
s.add_metrics(InteractionSpread())
assert len(old_metrics) + 1 == len(s.metrics)
with pytest.raises(ValueError):
s.add_metrics("wrong type")
with pytest.raises(ValueError):
s.add_metrics(MSEMeasurement(), print)
with pytest.raises(ValueError):
s.add_metrics()
assert len(old_metrics) + 1 == len(s.metrics)
def test_system_state_module(self):
s = SocialFiltering()
with pytest.raises(ValueError):
s.add_state_variable("wrong type")
with pytest.raises(ValueError):
s.add_state_variable(MSEMeasurement(), print)
with pytest.raises(ValueError):
s.add_state_variable()
def test_default_measurements(self, timesteps=None):
if timesteps is None:
timesteps = np.random.randint(2, 100)
s = SocialFiltering(record_base_state=True)
for t in range(1, timesteps + 1):
s.run(timesteps=1)
system_state = s.get_system_state()
state_mappings = {
"predicted_users": s.users_hat.value,
"actual_user_scores": s.users.actual_user_scores.value,
"predicted_items": s.items_hat.value,
"predicted_user_scores": s.predicted_scores.value,
}
MeasurementUtils.assert_valid_final_measurements(
system_state, s._system_state, state_mappings, t
)
MeasurementUtils.assert_valid_length(system_state, t)
s = SocialFiltering()
s.add_metrics(MSEMeasurement())
for t in range(1, timesteps + 1):
s.run(timesteps=1)
measurements = s.get_measurements()
MeasurementUtils.assert_valid_length(measurements, t)
class TestInteractionSpread:
def test_generic(self, timesteps=None):
if timesteps is None:
timesteps =
|
np.random.randint(2, 100)
|
numpy.random.randint
|
# Copyright 2022 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mujoco XML model loader for Brax."""
import dataclasses
from typing import Any, AnyStr, Dict, List, Optional
from absl import logging
from brax.physics import config_pb2
from dm_control import mjcf
from dm_control.mjcf import constants
import numpy as np
from transforms3d import euler
from transforms3d import quaternions
from transforms3d import utils as transform_utils
Vector = Any
Vector3d = Any
Quaternion = Any
# TODO: In Mujoco, the inertial of a body may be specified using an
# <inertial> element. We currently use a unit diagonal matrix instead.
DEFAULT_INERTIA = [1.0, 1.0, 1.0]
DEFAULT_STIFFNESS = 5000
# Tell the typechecker where to actually look for generic Element properties.
MjcfElement = mjcf.element._ElementImpl
@dataclasses.dataclass
class GeomCollider:
"""Represent a collider for a Mujoco geometry."""
collider: config_pb2.Collider
# Volume of the geometry in m^3.
volume: float = 0
# Mass of the geometry in kg.
mass: float = 0
@dataclasses.dataclass
class Collision:
"""Collision information for a body."""
# See http://www.mujoco.org/book/computation.html#Collision
contype: int
conaffinity: int
# Name of the parent body.
parent_name: str
def _is_worldbody(mujoco_body: MjcfElement) -> bool:
"""Returns true if the Mujoco body is the worldbody."""
return mujoco_body.tag == constants.WORLDBODY
def _vec(v: Vector3d) -> config_pb2.Vector3:
"""Converts (numpy) array to Vector3."""
x, y, z = v
return config_pb2.Vector3(x=x, y=y, z=z)
def _np_vec(v: config_pb2.Vector3) -> Vector3d:
"""Converts a Vector3 to a numpy array."""
return np.array([v.x, v.y, v.z])
def _euler(q: Optional[Quaternion]) -> config_pb2.Vector3:
"""Converts the quaternion to Euler angles in degrees."""
if q is None:
q = quaternions.qeye()
return _vec(np.degrees(euler.quat2euler(q)))
def _maybe_qmult(q1: Optional[Quaternion],
q2: Optional[Quaternion]) -> Quaternion:
"""Returns the multiplication of two quaternions."""
if q1 is not None:
return quaternions.qmult(q1, q2) if q2 is not None else q1
return q2
def _rot_quat(u: Vector3d, v: Vector3d) -> Quaternion:
"""Returns the quaternion performing rotation from u to v."""
dot_p = np.dot(v, u)
axis = np.cross(v, u)
if not np.any(axis):
return quaternions.qeye()
norm_axis = np.linalg.norm(axis)
angle = np.arctan2(norm_axis, dot_p)
return quaternions.axangle2quat(axis, angle)
def _create_joint(
name: str,
parent_body_name: str,
child_body_name: str,
parent_offset: Optional[Vector3d] = None,
child_offset: Optional[Vector3d] = None,
axis: Optional[Vector3d] = None,
stiffness: float = DEFAULT_STIFFNESS,
joint_range: Optional[Vector] = None,
rotation: Optional[Quaternion] = None,
reference_rotation: Optional[Quaternion] = None) -> config_pb2.Joint:
"""Returns a (revolute) joint with the specified properties."""
if axis is None:
# Default axis of rotation.
axis = [0, 0, 1]
if rotation is not None:
axis = quaternions.rotate_vector(axis, rotation)
axis = transform_utils.normalized_vector(axis)
angle_limit = (
config_pb2.Joint.Range(min=joint_range[0], max=joint_range[1])
if joint_range is not None else config_pb2.Joint.Range())
rotation = _rot_quat(axis, [1.0, 0, 0])
return config_pb2.Joint(
name=name,
parent=parent_body_name,
child=child_body_name,
parent_offset=_vec(parent_offset) if parent_offset is not None else None,
child_offset=_vec(child_offset) if child_offset is not None else None,
stiffness=stiffness,
rotation=_euler(rotation),
angle_limit=[angle_limit],
reference_rotation=_euler(reference_rotation))
def _create_fixed_joint(name: str,
parent_body_name: str,
child_body_name: str,
parent_offset: Optional[Vector3d] = None,
rotation: Optional[Quaternion] = None,
reference_rotation: Optional[Quaternion] = None):
"""Returns a fixed joint."""
# Brax does not support such joints. Instead we use a revolute joint with a
# high stiffness and zero angle range.
return _create_joint(
name,
parent_body_name,
child_body_name,
stiffness=DEFAULT_STIFFNESS,
parent_offset=parent_offset,
rotation=rotation,
reference_rotation=reference_rotation)
class MujocoConverter(object):
"""Converts a Mujoco model to a Brax config."""
def __init__(self,
xml_string: AnyStr,
add_collision_pairs: bool = False,
ignore_unsupported_joints: bool = False,
add_joint_to_nearest_body: bool = False):
"""Creates a MujocoConverter.
Args:
xml_string: XML string containing an Mujoco model description.
add_collision_pairs: If true, then the collision pairs between bodies will
be added automatically based on the structure of the model and Mujoco
collision mask settings. See
http://www.mujoco.org/book/computation.html#Collision.
ignore_unsupported_joints: If true, then unsupported joints, e.g. slide,
will be ignored, otherwise they raise an exception.
add_joint_to_nearest_body: Adds a joint to the nearest (child)body when
multiple geometries of a Mujoco body are represented as separate bodies.
"""
mjcf_model = mjcf.from_xml_string(xml_string, escape_separators=True)
self._mjcf_model = mjcf_model
config = config_pb2.Config()
self._config = config
self._ignore_unsupported_joints = ignore_unsupported_joints
self._add_joint_to_nearest_body = add_joint_to_nearest_body
# Brax uses local coordinates. If global coordinates are used in the
# Mujoco model, we convert them to local ones.
self._uses_global = mjcf_model.compiler.coordinate == 'global'
self._uses_radian = mjcf_model.compiler.angle == 'radian'
default = mjcf_model.default
geom = default.geom
# See http://www.mujoco.org/book/XMLreference.html#geom. Mujoco uses SI
# units, i.e. m(eter) for size, kg for mass and kg/m^3 for density.
self._default_density = (
geom.density if geom.density is not None else 1000.0)
self._default_contype = geom.contype if geom.contype is not None else 1
self._default_conaffinity = (
geom.conaffinity if geom.conaffinity is not None else 1)
joint = default.joint
self._default_stiffness = (
joint.stiffness if joint.stiffness is not None else DEFAULT_STIFFNESS)
if joint.damping is not None:
self._config.velocity_damping = joint.damping
option = mjcf_model.option
self._config.gravity.CopyFrom(
_vec(option.gravity if option.gravity is not None else [0, 0, -9.81]))
self._collisions: Dict[str, Collision] = {}
# Worldbody is the root of the scene tree. We add the bodies starting from
# the world body in a depth-first manner.
self._add_body(mjcf_model.worldbody, None)
# Add the actuators and the collision pairs.
self._add_actuators()
if add_collision_pairs:
self._add_collision_pairs()
@property
def config(self) -> config_pb2.Config:
"""Returns the Brax config for the Mujoco model."""
return self._config
def _maybe_to_local(self, pos: Vector3d,
mujoco_body: MjcfElement) -> Vector3d:
"""Converts position to local coordinates."""
if self._uses_global and mujoco_body and not _is_worldbody(mujoco_body):
return pos - mujoco_body.pos
return pos
def _get_position(self, elem: MjcfElement) -> Vector3d:
"""Returns the local position of the Mujoco element, a geom or joint."""
if elem.pos is None:
return np.zeros(3)
return self._maybe_to_local(elem.pos, elem.parent)
def _maybe_to_radian(self, a: float) -> float:
"""Converts the angle to radian."""
return a if self._uses_radian else
|
np.radians(a)
|
numpy.radians
|
#!/usr/bin/env python3
"""
@author: <NAME>
"""
import numpy.testing as nt
import numpy as np
import roboticstoolbox as rp
import spatialmath as sm
import unittest
import math
class TestDHRobot(unittest.TestCase):
def test_DHRobot(self):
l0 = rp.DHLink()
rp.DHRobot([l0])
def test_prismaticjoints(self):
l0 = rp.PrismaticDH()
l1 = rp.RevoluteDH()
l2 = rp.PrismaticDH()
l3 = rp.RevoluteDH()
r0 = rp.DHRobot([l0, l1, l2, l3])
ans = [True, False, True, False]
self.assertEqual(r0.prismaticjoints, ans)
def test_revolutejoints(self):
l0 = rp.PrismaticDH()
l1 = rp.RevoluteDH()
l2 = rp.PrismaticDH()
l3 = rp.RevoluteDH()
r0 = rp.DHRobot([l0, l1, l2, l3])
ans = [False, True, False, True]
self.assertEqual(r0.revolutejoints, ans)
def test_isprismatic(self):
l0 = rp.PrismaticDH()
l1 = rp.RevoluteDH()
l2 = rp.PrismaticDH()
l3 = rp.RevoluteDH()
r0 = rp.DHRobot([l0, l1, l2, l3])
self.assertEqual(r0.isprismatic(0), True)
self.assertEqual(r0.isprismatic(1), False)
self.assertEqual(r0.isprismatic(2), True)
self.assertEqual(r0.isprismatic(3), False)
def test_isrevolute(self):
l0 = rp.PrismaticDH()
l1 = rp.RevoluteDH()
l2 = rp.PrismaticDH()
l3 = rp.RevoluteDH()
r0 = rp.DHRobot([l0, l1, l2, l3])
ans = [False, True, False, True]
self.assertEqual(r0.isrevolute(0), False)
self.assertEqual(r0.isrevolute(1), True)
self.assertEqual(r0.isrevolute(2), False)
self.assertEqual(r0.isrevolute(3), True)
def test_todegrees(self):
l0 = rp.PrismaticDH()
l1 = rp.RevoluteDH()
l2 = rp.PrismaticDH()
l3 = rp.RevoluteDH()
r0 = rp.DHRobot([l0, l1, l2, l3])
q = np.array([np.pi, np.pi, np.pi, np.pi / 2.0])
ans = np.array([np.pi, 180, np.pi, 90])
nt.assert_array_almost_equal(r0.todegrees(q), ans)
def test_toradians(self):
l0 = rp.PrismaticDH()
l1 = rp.RevoluteDH()
l2 = rp.PrismaticDH()
l3 = rp.RevoluteDH()
r0 = rp.DHRobot([l0, l1, l2, l3])
q = np.array([np.pi, 180, np.pi, 90])
r0.q = q
ans = np.array([np.pi, np.pi, np.pi, np.pi / 2.0])
nt.assert_array_almost_equal(r0.toradians(q), ans)
def test_d(self):
l0 = rp.PrismaticDH()
l1 = rp.RevoluteDH(d=2.0)
l2 = rp.PrismaticDH()
l3 = rp.RevoluteDH(d=4.0)
r0 = rp.DHRobot([l0, l1, l2, l3])
ans = [0.0, 2.0, 0.0, 4.0]
self.assertEqual(r0.d, ans)
def test_a(self):
l0 = rp.PrismaticDH(a=1.0)
l1 = rp.RevoluteDH(a=2.0)
l2 = rp.PrismaticDH(a=3.0)
l3 = rp.RevoluteDH(a=4.0)
r0 = rp.DHRobot([l0, l1, l2, l3])
ans = [1.0, 2.0, 3.0, 4.0]
self.assertEqual(r0.a, ans)
def test_theta(self):
l0 = rp.PrismaticDH(theta=1.0)
l1 = rp.RevoluteDH()
l2 = rp.PrismaticDH(theta=3.0)
l3 = rp.RevoluteDH()
r0 = rp.DHRobot([l0, l1, l2, l3])
ans = [1.0, 0.0, 3.0, 0.0]
self.assertEqual(r0.theta, ans)
def test_r(self):
r = np.r_[1, 2, 3]
l0 = rp.PrismaticDH(r=r)
l1 = rp.RevoluteDH(r=r)
l2 = rp.PrismaticDH(r=r)
l3 = rp.RevoluteDH(r=r)
r0 = rp.DHRobot([l0, l1, l2, l3])
r1 = rp.DHRobot([l0])
ans = np.c_[r, r, r, r]
nt.assert_array_almost_equal(r0.r, ans)
nt.assert_array_almost_equal(r1.r, r.flatten())
def test_offset(self):
l0 = rp.PrismaticDH(offset=1.0)
l1 = rp.RevoluteDH(offset=2.0)
l2 = rp.PrismaticDH(offset=3.0)
l3 = rp.RevoluteDH(offset=4.0)
r0 = rp.DHRobot([l0, l1, l2, l3])
ans = [1.0, 2.0, 3.0, 4.0]
self.assertEqual(r0.offset, ans)
def test_qlim(self):
qlim = [-1, 1]
l0 = rp.PrismaticDH(qlim=qlim)
l1 = rp.RevoluteDH(qlim=qlim)
l2 = rp.PrismaticDH(qlim=qlim)
l3 = rp.RevoluteDH(qlim=qlim)
r0 = rp.DHRobot([l0, l1, l2, l3])
r1 = rp.DHRobot([l0])
ans = np.c_[qlim, qlim, qlim, qlim]
nt.assert_array_almost_equal(r0.qlim, ans)
nt.assert_array_almost_equal(r1.qlim, np.c_[qlim])
def test_fkine(self):
l0 = rp.PrismaticDH()
l1 = rp.RevoluteDH()
l2 = rp.PrismaticDH(theta=2.0)
l3 = rp.RevoluteDH()
q = np.array([1, 2, 3, 4])
T1 = np.array(
[
[-0.14550003, -0.98935825, 0, 0],
[0.98935825, -0.14550003, 0, 0],
[0, 0, 1, 4],
[0, 0, 0, 1],
]
)
r0 = rp.DHRobot([l0, l1, l2, l3])
nt.assert_array_almost_equal(r0.fkine(q).A, T1)
def test_fkine_traj(self):
l0 = rp.PrismaticDH()
l1 = rp.RevoluteDH()
l2 = rp.PrismaticDH(theta=2.0)
l3 = rp.RevoluteDH()
q = np.array([1, 2, 3, 4])
qq = np.r_[q, q, q, q]
r0 = rp.DHRobot([l0, l1, l2, l3])
T1 = r0.fkine(q).A
TT = r0.fkine(qq)
nt.assert_array_almost_equal(TT[0].A, T1)
nt.assert_array_almost_equal(TT[1].A, T1)
nt.assert_array_almost_equal(TT[2].A, T1)
nt.assert_array_almost_equal(TT[3].A, T1)
def test_links(self):
l0 = rp.PrismaticDH()
with self.assertRaises(TypeError):
rp.DHRobot(l0)
def test_multiple(self):
l0 = rp.PrismaticDH()
l1 = rp.RevoluteDH()
l2 = rp.PrismaticDH(theta=2.0)
l3 = rp.RevoluteDH()
r0 = rp.DHRobot([l0, l1])
r1 = rp.DHRobot([l2, l3])
r3 = rp.DHRobot([r0, r1])
r4 = rp.DHRobot([r0, l2, l3])
q = np.array([1, 2, 3, 4])
T1 = np.array(
[
[-0.14550003, -0.98935825, 0, 0],
[0.98935825, -0.14550003, 0, 0],
[0, 0, 1, 4],
[0, 0, 0, 1],
]
)
nt.assert_array_almost_equal(r3.fkine(q).A, T1)
nt.assert_array_almost_equal(r4.fkine(q).A, T1)
def test_bad_list(self):
l0 = rp.PrismaticDH()
with self.assertRaises(TypeError):
rp.DHRobot([l0, 1])
def test_add_DHRobot(self):
l0 = rp.PrismaticDH()
l1 = rp.RevoluteDH()
l2 = rp.PrismaticDH(theta=2.0)
l3 = rp.RevoluteDH()
r0 = rp.DHRobot([l0, l1])
r1 = rp.DHRobot([l2, l3])
r3 = r0 + r1
q = np.array([1, 2, 3, 4])
T1 = np.array(
[
[-0.14550003, -0.98935825, 0, 0],
[0.98935825, -0.14550003, 0, 0],
[0, 0, 1, 4],
[0, 0, 0, 1],
]
)
nt.assert_array_almost_equal(r3.fkine(q).A, T1)
def test_add_links(self):
l0 = rp.PrismaticDH()
l1 = rp.RevoluteDH()
l2 = rp.PrismaticDH(theta=2.0)
l3 = rp.RevoluteDH()
r0 = rp.DHRobot([l0, l1])
r1 = rp.DHRobot([l1, l2, l3])
r3 = r0 + l2 + l3
r4 = l0 + r1
q = np.array([1, 2, 3, 4])
T1 = np.array(
[
[-0.14550003, -0.98935825, 0, 0],
[0.98935825, -0.14550003, 0, 0],
[0, 0, 1, 4],
[0, 0, 0, 1],
]
)
nt.assert_array_almost_equal(r3.fkine(q).A, T1)
nt.assert_array_almost_equal(r4.fkine(q).A, T1)
def test_add_error(self):
l0 = rp.PrismaticDH()
l1 = rp.RevoluteDH()
r0 = rp.DHRobot([l0, l1])
with self.assertRaises(TypeError):
r0 + 2
def test_dh_error(self):
l0 = rp.PrismaticMDH()
l1 = rp.RevoluteDH()
r0 = rp.DHRobot([l0])
r1 = rp.DHRobot([l1])
with self.assertRaises(ValueError):
rp.DHRobot([l0, l1])
with self.assertRaises(ValueError):
r0 + r1
with self.assertRaises(ValueError):
rp.DHRobot([l0, l1])
r0 + l1
def test_name(self):
panda = rp.models.DH.Panda()
panda.name = "new"
self.assertEqual(panda.name, "new")
def test_base(self):
panda = rp.models.DH.Panda()
panda.base = sm.SE3.Rx(2)
nt.assert_array_almost_equal(panda.base.A, sm.SE3.Rx(2).A)
panda.base = sm.SE3.Ty(2)
nt.assert_array_almost_equal(panda.base.A, sm.SE3.Ty(2).A)
def test_A(self):
panda = rp.models.DH.Panda()
q = [1, 2, 3, 4, 5, 6, 7]
panda.q = q
T1 = np.array(
[
[0.5403, -0.8415, 0, 0],
[0.8415, 0.5403, 0, 0],
[0, 0, 1, 0.333],
[0, 0, 0, 1],
]
)
T2 = np.array(
[
[-0.3279, -0.9015, -0.2826, 0.2918],
[0.9232, -0.3693, 0.1068, 0.06026],
[-0.2006, -0.2258, 0.9533, 0.3314],
[0, 0, 0, 1],
]
)
nt.assert_array_almost_equal(panda.A(0, q).A, T1, decimal=4)
nt.assert_array_almost_equal(panda.A([1, 4], q).A, T2, decimal=4)
def test_A_error(self):
panda = rp.models.DH.Panda()
q = [1, 2, 3, 4, 5, 6, 7]
with self.assertRaises(ValueError):
panda.A(7, q).A
def test_islimit(self):
panda = rp.models.DH.Panda()
q = [1, 2, 3, 4, 5, 6, 7]
panda.q = q
ans = np.r_[False, True, True, True, True, True, True]
nt.assert_array_equal(panda.islimit(q), ans)
nt.assert_array_equal(panda.islimit(), ans)
def test_isspherical(self):
l0 = rp.RevoluteDH()
l1 = rp.RevoluteDH(alpha=-np.pi / 2)
l2 = rp.RevoluteDH(alpha=np.pi / 2)
l3 = rp.RevoluteDH()
r0 = rp.DHRobot([l0, l1, l2, l3])
r1 = rp.DHRobot([l0, l1])
r2 = rp.DHRobot([l1, l2, l3, l0])
self.assertTrue(r0.isspherical())
self.assertFalse(r1.isspherical())
self.assertFalse(r2.isspherical())
def test_payload(self):
panda = rp.models.DH.Panda()
nt.assert_array_almost_equal(panda.r[:, 6], np.zeros(3))
# nt.assert_array_almost_equal(panda.links[6].m, 0)
m = 6
p = [1, 2, 3]
panda.payload(m, p)
nt.assert_array_almost_equal(panda.r[:, 6], p)
nt.assert_array_almost_equal(panda.links[6].m, m)
def test_jointdynamics(self):
puma = rp.models.DH.Puma560()
jd = puma.jointdynamics(puma.qn)
print(jd[0])
# numbers come from MATLAB
nt.assert_array_almost_equal(jd[0][1], [0.001133478453251, 0.001480000000000])
nt.assert_array_almost_equal(
jd[1][1], [0.579706964030143e-3, 0.817000000000000e-3]
)
nt.assert_array_almost_equal(jd[2][1], [0.000525146448377, 0.001380000000000])
def test_twists(self):
# TODO
panda = rp.models.DH.Panda()
q = [1, 2, 3, 4, 5, 6, 7]
panda.q = q
panda.twists()
panda.twists(q)
puma = rp.models.DH.Puma560()
q = [1, 2, 3, 4, 5, 6]
puma.q = q
puma.twists()
puma.twists(q)
l0 = rp.PrismaticMDH()
r = rp.DHRobot([l0])
r.twists()
l0 = rp.PrismaticDH()
l1 = rp.PrismaticDH()
r = rp.DHRobot([l0, l1])
r.twists()
def test_fkine_panda(self):
panda = rp.models.DH.Panda()
q = [1, 2, 3, 4, 5, 6, 7]
T = np.array(
[
[-0.8583, 0.1178, 0.4994, 0.1372],
[0.1980, 0.9739, 0.1106, 0.3246],
[-0.4734, 0.1938, -0.8593, 0.4436],
[0, 0, 0, 1],
]
)
nt.assert_array_almost_equal(panda.fkine(q).A, T, decimal=4)
def test_jacobe(self):
l0 = rp.PrismaticDH(theta=4)
l1 = rp.RevoluteDH(a=2)
l2 = rp.PrismaticDH(theta=2)
l3 = rp.RevoluteDH()
r0 = rp.DHRobot([l0, l1, l2, l3])
q = [1, 2, 3, 4]
r0.q = q
Je = np.array(
[
[0, -0.5588, 0, 0],
[0, 1.9203, 0, 0],
[1.0000, 0, 1.0000, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1.0000, 0, 1.0000],
]
)
nt.assert_array_almost_equal(r0.jacobe(q), Je, decimal=4)
def test_jacob0(self):
l0 = rp.PrismaticDH(theta=4)
l1 = rp.RevoluteDH(a=2)
l2 = rp.PrismaticDH(theta=2)
l3 = rp.RevoluteDH()
r0 = rp.DHRobot([l0, l1, l2, l3])
q = [1, 2, 3, 4]
r0.q = q
J0 = np.array(
[
[0, 0.5588, 0, 0],
[0, 1.9203, 0, 0],
[1.0000, 0, 1.0000, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1.0000, 0, 1.0000],
]
)
nt.assert_array_almost_equal(r0.jacob0(q), J0, decimal=4)
def test_jacobe_panda(self):
panda = rp.models.DH.Panda()
q = [1, 2, 3, 4, 5, 6, 7]
panda.q = q
Je = np.array(
[
[0.3058, 0.1315, -0.2364, -0.0323, 0.0018, 0.2095, 0],
[0.0954, 0.0303, -0.0721, 0.1494, -0.0258, 0.0144, 0],
[-0.1469, 0.3385, 0.0506, 0.0847, -0.0000, -0.0880, 0],
[-0.4734, 0.8292, -0.0732, 0.8991, -0.2788, -0.0685, 0],
[0.1938, 0.4271, 0.7224, 0.3461, -0.0191, 0.9976, 0],
[-0.8593, -0.3605, 0.6876, -0.2679, -0.9602, 0.0000, 1.0000],
]
)
nt.assert_array_almost_equal(panda.jacobe(q), Je, decimal=4)
# def test_jacob0v(self):
# l0 = rp.PrismaticDH(theta=4)
# l1 = rp.RevoluteDH(a=2)
# l2 = rp.PrismaticDH(theta=2)
# l3 = rp.RevoluteDH()
# r0 = rp.DHRobot([l0, l1, l2, l3])
# q = [1, 2, 3, 4]
# r0.q = q
# J = np.array([
# [0.8439, 0.5366, 0, 0, 0, 0],
# [-0.5366, 0.8439, 0, 0, 0, 0],
# [0, 0, 1, 0, 0, 0],
# [0, 0, 0, 0.8439, 0.5366, 0],
# [0, 0, 0, -0.5366, 0.8439, 0],
# [0, 0, 0, 0, 0, 1],
# ])
# nt.assert_array_almost_equal(r0.jacob0v(q), J, decimal=4)
# nt.assert_array_almost_equal(r0.jacob0v(), J, decimal=4)
# def test_jacobev(self):
# l0 = rp.PrismaticDH(theta=4)
# l1 = rp.RevoluteDH(a=2)
# l2 = rp.PrismaticDH(theta=2)
# l3 = rp.RevoluteDH()
# r0 = rp.DHRobot([l0, l1, l2, l3])
# q = [1, 2, 3, 4]
# r0.q = q
# J = np.array([
# [0.8439, -0.5366, 0, 0, 0, 0],
# [0.5366, 0.8439, 0, 0, 0, 0],
# [0, 0, 1, 0, 0, 0],
# [0, 0, 0, 0.8439, -0.5366, 0],
# [0, 0, 0, 0.5366, 0.8439, 0],
# [0, 0, 0, 0, 0, 1],
# ])
# nt.assert_array_almost_equal(r0.jacobev(q), J, decimal=4)
# nt.assert_array_almost_equal(r0.jacobev(), J, decimal=4)
def test_nofriction(self):
l0 = rp.DHLink(Tc=2, B=3)
l1 = rp.DHLink(Tc=2, B=3)
l2 = rp.DHLink(Tc=2, B=3)
l3 = rp.DHLink(Tc=2, B=3)
L = [l0, l1, l2, l3]
r0 = rp.DHRobot(L)
n0 = r0.nofriction()
n1 = r0.nofriction(viscous=True)
n2 = r0.nofriction(coulomb=False)
for i in range(4):
nt.assert_array_almost_equal(n0.links[i].B, L[i].B)
nt.assert_array_almost_equal(n0.links[i].Tc, [0, 0])
nt.assert_array_almost_equal(n1.links[i].B, 0)
nt.assert_array_almost_equal(n1.links[i].Tc, [0, 0])
nt.assert_array_almost_equal(n2.links[i].B, L[i].B)
nt.assert_array_almost_equal(n2.links[i].Tc, L[i].Tc)
@unittest.skip("payload needs fixing")
def test_pay(self):
panda = rp.models.DH.Panda()
q = [1, 2, 3, 4, 5, 6, 7]
w = [1, 2, 3, 4, 5, 6]
wT = np.c_[w, w, w, w].T
qT = np.c_[q, q, q, q].T
tau = np.array([6.0241, -4.4972, -7.2160, -4.2400, 7.0215, -4.6884, -6.0000])
tau0 = np.array([-5.9498, 1.4604, -3.4544, 1.5026, -3.7777, -6.6578, 2.6047])
tauT = np.c_[tau, tau, tau, tau].T
tau0T = np.c_[tau0, tau0, tau0, tau0].T
Je = panda.jacobe(q)
J0 = panda.jacob0(q)
JeT = np.zeros((4, 6, 7))
for i in range(4):
JeT[i, :, :] = Je
panda.pay(w)
nt.assert_array_almost_equal(panda.pay(w), tau, decimal=4)
nt.assert_array_almost_equal(panda.pay(w, frame=0), tau0, decimal=4)
nt.assert_array_almost_equal(panda.pay(w, q=q), tau, decimal=4)
nt.assert_array_almost_equal(panda.pay(wT, q=qT), tauT, decimal=4)
nt.assert_array_almost_equal(panda.pay(wT, q=qT, frame=0), tau0T, decimal=4)
nt.assert_array_almost_equal(panda.pay(w, J=Je), tau, decimal=4)
nt.assert_array_almost_equal(panda.pay(w, J=J0), tau0, decimal=4)
nt.assert_array_almost_equal(panda.pay(wT, J=JeT), tauT, decimal=4)
with self.assertRaises(ValueError):
panda.pay(wT, q)
with self.assertRaises(TypeError):
panda.pay(wT)
def test_friction(self):
l0 = rp.RevoluteDH(d=2, B=3, G=2, Tc=[2, -1])
qd = [1, 2, 3, 4]
r0 = rp.DHRobot([l0, l0, l0, l0])
tau = np.array([-16, -28, -40, -52])
nt.assert_array_almost_equal(r0.friction(qd), tau)
def test_fkine_all(self):
panda = rp.models.DH.Panda()
q = [1, 2, 3, 4, 5, 6, 7]
panda.q = q
t0 = np.eye(4)
t1 = np.array(
[
[0.5403, -0.8415, 0, 0],
[0.8415, 0.5403, 0, 0],
[0, 0, 1, 0.333],
[0, 0, 0, 1],
]
)
t2 = np.array(
[
[-0.2248, -0.4913, -0.8415, 0],
[-0.3502, -0.7651, 0.5403, 0],
[-0.9093, 0.4161, 0, 0.333],
[0, 0, 0, 1],
]
)
t3 = np.array(
[
[0.1038, 0.8648, 0.4913, 0.1552],
[0.4229, -0.4855, 0.7651, 0.2418],
[0.9002, 0.1283, -0.4161, 0.2015],
[0, 0, 0, 1],
]
)
t4 = np.array(
[
[-0.4397, -0.2425, -0.8648, 0.1638],
[-0.8555, -0.1801, 0.4855, 0.2767],
[-0.2735, 0.9533, -0.1283, 0.2758],
[0, 0, 0, 1],
]
)
t5 = np.array(
[
[-0.9540, -0.1763, -0.2425, 0.107],
[0.2229, -0.9581, -0.1801, 0.2781],
[-0.2006, -0.2258, 0.9533, 0.6644],
[0, 0, 0, 1],
]
)
t6 = np.array(
[
[-0.8482, -0.4994, 0.1763, 0.107],
[0.2643, -0.1106, 0.9581, 0.2781],
[-0.4590, 0.8593, 0.2258, 0.6644],
[0, 0, 0, 1],
]
)
t7 = np.array(
[
[-0.5236, 0.6902, 0.4994, 0.08575],
[0.8287, 0.5487, 0.1106, 0.3132],
[-0.1977, 0.4718, -0.8593, 0.5321],
[0, 0, 0, 1],
]
)
Tall = panda.fkine_all(q)
nt.assert_array_almost_equal(Tall[0].A, t0, decimal=4)
nt.assert_array_almost_equal(Tall[1].A, t1, decimal=4)
nt.assert_array_almost_equal(Tall[2].A, t2, decimal=4)
nt.assert_array_almost_equal(Tall[3].A, t3, decimal=4)
nt.assert_array_almost_equal(Tall[4].A, t4, decimal=4)
nt.assert_array_almost_equal(Tall[5].A, t5, decimal=4)
nt.assert_array_almost_equal(Tall[6].A, t6, decimal=4)
nt.assert_array_almost_equal(Tall[7].A, t7, decimal=4)
# def test_gravjac(self):
# l0 = rp.RevoluteDH(d=2, B=3, G=2, Tc=[2, -1], alpha=0.4, a=0.2,
# r=[0.1, 0.2, 0.05], m=0.5)
# l1 = rp.PrismaticDH(theta=0.1, B=3, G=2, Tc=[2, -1], a=0.2,
# r=[0.1, 0.2, 0.05], m=0.5)
# r0 = rp.DHRobot([l0, l0, l0, l0])
# r1 = rp.DHRobot([l0, l0, l0, l1])
# q = [0.3, 0.4, 0.2, 0.1]
# qT = np.c_[q, q]
# grav = [0.3, 0.5, 0.7]
# tauB = [0, 4.6280, 3.1524, 0.9324]
# tauB2 = [1.9412, 1.1374, 0.3494, -0.0001]
# tauB3 = [0, 3.2819, 2.0195, 1.9693]
# res0 = r0.gravjac(qT)
# res1 = r0.gravjac(q)
# res2 = r0.gravjac(q, grav)
# res4 = r1.gravjac(q)
# nt.assert_array_almost_equal(res0[:, 0], tauB, decimal=4)
# nt.assert_array_almost_equal(res0[:, 1], tauB, decimal=4)
# nt.assert_array_almost_equal(res1, tauB, decimal=4)
# nt.assert_array_almost_equal(res2, tauB2, decimal=4)
# nt.assert_array_almost_equal(res3, tauB, decimal=4)
# nt.assert_array_almost_equal(res4, tauB3, decimal=4)
# def test_ikine3(self):
# l0 = rp.RevoluteDH(alpha=np.pi / 2)
# l1 = rp.RevoluteDH(a=0.4318)
# l2 = rp.RevoluteDH(d=0.15005, a=0.0203, alpha=-np.pi / 2)
# l3 = rp.PrismaticDH()
# l4 = rp.PrismaticMDH()
# r0 = rp.DHRobot([l0, l1, l2])
# r1 = rp.DHRobot([l3, l3])
# r2 = rp.DHRobot([l3, l3, l3])
# r3 = rp.DHRobot([l4, l4, l4])
# q = [1, 1, 1]
# r0.q = q
# T = r0.fkine(q)
# # T2 = r1.fkine(q)
# Tt = sm.SE3([T, T])
# res = [2.9647, 1.7561, 0.2344]
# res2 = [1.0000, 0.6916, 0.2344]
# res3 = [2.9647, 2.4500, 3.1762]
# res4 = [1.0000, 1.3855, 3.1762]
# q0 = r0.ikine3(T.A)
# q1 = r0.ikine3(Tt)
# q2 = r0.ikine3(T, left=False, elbow_up=False)
# q3 = r0.ikine3(T, elbow_up=False)
# q4 = r0.ikine3(T, left=False)
# nt.assert_array_almost_equal(q0, res, decimal=4)
# nt.assert_array_almost_equal(q1[0, :], res, decimal=4)
# nt.assert_array_almost_equal(q1[1, :], res, decimal=4)
# nt.assert_array_almost_equal(q2, res2, decimal=4)
# nt.assert_array_almost_equal(q3, res3, decimal=4)
# nt.assert_array_almost_equal(q4, res4, decimal=4)
# with self.assertRaises(ValueError):
# r1.ikine3(T)
# with self.assertRaises(ValueError):
# r2.ikine3(T)
# with self.assertRaises(ValueError):
# r3.ikine3(T)
# def test_ikine6s_rrp(self):
# l0 = rp.RevoluteDH(alpha=-np.pi / 2)
# l1 = rp.RevoluteDH(alpha=np.pi / 2)
# l2 = rp.PrismaticDH()
# l3 = rp.RevoluteDH(alpha=-np.pi / 2)
# l4 = rp.RevoluteDH(alpha=np.pi / 2)
# l5 = rp.RevoluteDH()
# r0 = rp.DHRobot([l0, l1, l2, l3, l4, l5])
# r1 = rp.DHRobot([l1, l0, l2, l3, l4, l5])
# q = [1, 1, 1, 1, 1, 1]
# T1 = r0.fkine(q)
# T2 = r1.fkine(q)
# qr0 = [1.0000, -2.1416, -1.0000, -1.0000, -2.1416, 1.0000]
# qr1 = [-2.1416, -1.0000, 1.0000, -2.1416, 1.0000, 1.0000]
# qr2 = [1.0000, 1.0000, 1.0000, -2.1416, -1.0000, -2.1416]
# qr3 = [-2.1416, 2.1416, -1.0000, -1.0000, 2.1416, -2.1416]
# q0, _ = r0.ikine6s(T1)
# q1, _ = r0.ikine6s(T1, left=False, elbow_up=False, wrist_flip=True)
# q2, _ = r1.ikine6s(T2)
# q3, _ = r1.ikine6s(T2, left=False, elbow_up=False, wrist_flip=True)
# nt.assert_array_almost_equal(q0, qr0, decimal=4)
# nt.assert_array_almost_equal(q1, qr1, decimal=4)
# nt.assert_array_almost_equal(q2, qr2, decimal=4)
# nt.assert_array_almost_equal(q3, qr3, decimal=4)
# def test_ikine6s_simple(self):
# l0 = rp.RevoluteDH(alpha=-np.pi / 2)
# l1 = rp.RevoluteDH()
# l2 = rp.RevoluteDH(alpha=np.pi / 2)
# l3 = rp.RevoluteDH(alpha=-np.pi / 2)
# l4 = rp.RevoluteDH(alpha=np.pi / 2)
# l5 = rp.RevoluteDH()
# r0 = rp.DHRobot([l0, l1, l2, l3, l4, l5])
# r1 = rp.DHRobot([l2, l1, l0, l3, l4, l5])
# q = [1, 1, 1, 1, 1, 1]
# T1 = r0.fkine(q)
# T2 = r1.fkine(q)
# qr0 = [0, 0, 0, -0.9741, -2.2630, -0.4605]
# qr1 = [0, 0, 0, 0.1947, -1.3811, 1.8933]
# qr2 = [0, 0, 0, 2.1675, 2.2630, 2.6811]
# qr3 = [0, 0, 0, -2.9468, 1.3811, -1.2483]
# q0, _ = r0.ikine6s(T1)
# q1, _ = r0.ikine6s(T1, left=False, elbow_up=False, wrist_flip=True)
# q2, _ = r1.ikine6s(T2)
# q3, _ = r1.ikine6s(T2, left=False, elbow_up=False, wrist_flip=True)
# nt.assert_array_almost_equal(q0, qr0, decimal=4)
# nt.assert_array_almost_equal(q1, qr2, decimal=4)
# nt.assert_array_almost_equal(q2, qr1, decimal=4)
# nt.assert_array_almost_equal(q3, qr3, decimal=4)
# def test_ikine6s_offset(self):
# self.skipTest("error introduced with DHLink change")
# l0 = rp.RevoluteDH(alpha=-np.pi / 2)
# l1 = rp.RevoluteDH(d=1.0)
# l2 = rp.RevoluteDH(alpha=np.pi / 2)
# l3 = rp.RevoluteDH(alpha=-np.pi / 2)
# l4 = rp.RevoluteDH(alpha=np.pi / 2)
# l5 = rp.RevoluteDH()
# r0 = rp.DHRobot([l0, l1, l2, l3, l4, l5])
# r1 = rp.DHRobot([l2, l1, l0, l3, l4, l5])
# q = [1, 1, 1, 1, 1, 1]
# T1 = r0.fkine(q)
# T2 = r1.fkine(q)
# qr0 = [1.0000, 3.1416, -0.0000, -1.1675, -0.8786, 2.6811]
# qr1 = [1.0000, -1.1059, 2.6767, 0.8372, 1.2639, 1.3761]
# qr2 = [1.0000, 3.1416, -3.1416, -0.8053, -1.3811, 1.8933]
# qr3 = [1.0000, -1.1059, -0.4649, 1.8311, 2.3192, -2.6398]
# q0, _ = r0.ikine6s(T1.A)
# q1, _ = r0.ikine6s(T1, left=False, elbow_up=False, wrist_flip=True)
# q2, _ = r1.ikine6s(T2)
# q3, _ = r1.ikine6s(T2, left=False, elbow_up=False, wrist_flip=True)
# nt.assert_array_almost_equal(q0, qr0, decimal=4)
# nt.assert_array_almost_equal(q1, qr1, decimal=4)
# nt.assert_array_almost_equal(q2, qr2, decimal=4)
# nt.assert_array_almost_equal(q3, qr3, decimal=4)
# def test_ikine6s_traj(self):
# self.skipTest("error introduced with DHLink change")
# r0 = rp.models.DH.Puma560()
# q = r0.qr
# T = r0.fkine(q)
# Tt = sm.SE3([T, T, T])
# qr0 = [0.2689, 1.5708, -1.4768, -3.1416, 0.0940, 2.8726]
# q0, _ = r0.ikine6s(Tt)
# nt.assert_array_almost_equal(q0[0, :], qr0, decimal=4)
# nt.assert_array_almost_equal(q0[1, :], qr0, decimal=4)Fikin
# nt.assert_array_almost_equal(q0[2, :], qr0, decimal=4)
# def test_ikine6s_fail(self):
# l0 = rp.RevoluteDH(alpha=np.pi / 2)
# l1 = rp.RevoluteDH(d=1.0)
# l2 = rp.RevoluteDH(alpha=np.pi / 2)
# l3 = rp.RevoluteDH(alpha=-np.pi / 2)
# l4a = rp.RevoluteDH(alpha=np.pi / 2)
# l4b = rp.RevoluteDH()
# l5 = rp.RevoluteDH()
# l6 = rp.RevoluteMDH()
# r0 = rp.DHRobot([l0, l1, l2, l3, l4a, l5])
# r1 = rp.DHRobot([l0, l1, l2, l3, l4b, l5])
# r2 = rp.DHRobot([l1, l2, l3])
# r3 = rp.DHRobot([l6, l6, l6, l6, l6, l6])
# puma = rp.models.DH.Puma560()
# T = sm.SE3(0, 10, 10)
# puma.ikine6s(T)
# q = [1, 1, 1, 1, 1, 1]
# T = r0.fkine(q)
# with self.assertRaises(ValueError):
# r0.ikine6s(T)
# with self.assertRaises(ValueError):
# r1.ikine6s(T)
# with self.assertRaises(ValueError):
# r2.ikine6s(T)
# with self.assertRaises(ValueError):
# r3.ikine6s(T)
def test_ikine_a(self):
puma = rp.models.DH.Puma560()
T = puma.fkine(puma.qn)
# test configuration validation
config = puma.config_validate("l", ("lr", "ud", "nf"))
self.assertEqual(len(config), 3)
self.assertTrue("l" in config)
self.assertTrue("u" in config)
self.assertTrue("n" in config)
with self.assertRaises(ValueError):
config = puma.config_validate("lux", ("lr", "ud", "nf"))
# analytic solution
sol = puma.ikine_a(T)
self.assertTrue(sol.success)
self.assertAlmostEqual(np.linalg.norm(T - puma.fkine(sol.q)), 0, places=6)
sol = puma.ikine_a(T, "l")
self.assertTrue(sol.success)
self.assertAlmostEqual(np.linalg.norm(T - puma.fkine(sol.q)), 0, places=6)
self.assertTrue(sol.q[0] > np.pi / 2)
sol = puma.ikine_a(T, "r")
self.assertTrue(sol.success)
self.assertAlmostEqual(np.linalg.norm(T - puma.fkine(sol.q)), 0, places=6)
self.assertTrue(sol.q[0] < np.pi / 2)
sol = puma.ikine_a(T, "u")
self.assertTrue(sol.success)
self.assertAlmostEqual(np.linalg.norm(T - puma.fkine(sol.q)), 0, places=6)
self.assertTrue(sol.q[1] > 0)
sol = puma.ikine_a(T, "d")
self.assertTrue(sol.success)
self.assertAlmostEqual(np.linalg.norm(T - puma.fkine(sol.q)), 0, places=6)
self.assertTrue(sol.q[1] < 0)
sol = puma.ikine_a(T, "n")
self.assertTrue(sol.success)
self.assertAlmostEqual(np.linalg.norm(T - puma.fkine(sol.q)), 0, places=6)
sol = puma.ikine_a(T, "f")
self.assertTrue(sol.success)
self.assertAlmostEqual(np.linalg.norm(T - puma.fkine(sol.q)), 0, places=6)
def test_ikine_LM(self):
puma = rp.models.DH.Puma560()
T = puma.fkine(puma.qn)
sol = puma.ikine_LM(T)
self.assertTrue(sol.success)
self.assertAlmostEqual(np.linalg.norm(T - puma.fkine(sol.q)), 0, places=6)
def test_ikine_LMS(self):
puma = rp.models.DH.Puma560()
T = puma.fkine(puma.qn)
sol = puma.ikine_LM(T)
self.assertTrue(sol.success)
self.assertAlmostEqual(np.linalg.norm(T - puma.fkine(sol.q)), 0, places=6)
def test_ikine_unc(self):
puma = rp.models.DH.Puma560()
T = puma.fkine(puma.qn)
sol = puma.ikine_min(T)
self.assertTrue(sol.success)
self.assertAlmostEqual(np.linalg.norm(T - puma.fkine(sol.q)), 0, places=5)
q0 = np.r_[0.1, 0.1, 0.1, 0.2, 0.3, 0.4]
sol = puma.ikine_min(T, q0=q0)
self.assertTrue(sol.success)
self.assertAlmostEqual(np.linalg.norm(T - puma.fkine(sol.q)), 0, places=5)
def test_ikine_con(self):
puma = rp.models.DH.Puma560()
T = puma.fkine(puma.qn)
sol = puma.ikine_min(T, qlim=True)
self.assertTrue(sol.success)
self.assertAlmostEqual(np.linalg.norm(T - puma.fkine(sol.q)), 0, places=5)
q0 = np.r_[0.1, 0.1, 0.1, 0.2, 0.3, 0.4]
sol = puma.ikine_min(T, q0=q0, qlim=True)
self.assertTrue(sol.success)
self.assertAlmostEqual(np.linalg.norm(T - puma.fkine(sol.q)), 0, places=5)
# def test_ikine_min(self):
# puma = rp.models.DH.Puma560()
# q = puma.qn
# T = puma.fkine(q)
# Tt = sm.SE3([T, T])
# sol0 = puma.ikine_min(Tt)
# sol1 = puma.ikine_min(T.A, qlimits=False)
# sol2 = puma.ikine_min(
# T, qlimits=False, stiffness=0.1, ilimit=1)
# print(np.sum(np.abs(T.A - puma.fkine(q0[:, 0]).A)))
# self.assertTrue(sol0[0].success)
# self.assertAlmostEqual(np.linalg.norm(T-puma.fkine(sol0[0].q)), 0, places=4)
# TODO: second solution fails, even though starting value is the
# solution. see https://stackoverflow.com/questions/34663539/scipy-optimize-fmin-l-bfgs-b-returns-abnormal-termination-in-lnsrch
# documentation is pretty bad.
# self.assertTrue(sol0[1].success)
# self.assertAlmostEqual(np.linalg.norm(T-puma.fkine(sol0[1].q)), 0, places=4)
# self.assertTrue(sol1.success)
# self.assertAlmostEqual(np.linalg.norm(T-puma.fkine(sol1.q)), 0, places=4)
# self.assertTrue(sol2.success)
# self.assertAlmostEqual(np.linalg.norm(T-puma.fkine(sol2.q)), 0, places=4)
def test_rne(self):
puma = rp.models.DH.Puma560()
z = np.zeros(6)
o = np.ones(6)
fext = [1, 2, 3, 1, 2, 3]
tr0 = [-0.0000, 31.6399, 6.0351, 0.0000, 0.0283, 0]
tr1 = [3.35311, 36.0025, 7.42596, 0.190043, 0.203441, 0.194133]
tr2 = [32.4952, 60.867, 17.7436, 1.45452, 1.29911, 0.713781]
tr3 = [29.1421, 56.5044, 16.3528, 1.26448, 1.12392, 0.519648]
tr4 = [32.4952, 29.2271, 11.7085, 1.45452, 1.27086, 0.713781]
tr5 = [0.642756, 29.0866, 4.70321, 2.82843, -1.97175, 3]
t0 = puma.rne(puma.qn, z, z)
t1 = puma.rne(puma.qn, z, o)
t2 = puma.rne(puma.qn, o, o)
t3 = puma.rne(puma.qn, o, z)
t4 = puma.rne(puma.qn, o, o, gravity=[0, 0, 0])
t5 = puma.rne(puma.qn, z, z, fext=fext)
nt.assert_array_almost_equal(t0, tr0, decimal=4)
nt.assert_array_almost_equal(t1, tr1, decimal=4)
nt.assert_array_almost_equal(t2, tr2, decimal=4)
nt.assert_array_almost_equal(t3, tr3, decimal=4)
nt.assert_array_almost_equal(t4, tr4, decimal=4)
nt.assert_array_almost_equal(t5, tr5, decimal=4)
def test_rne_traj(self):
puma = rp.models.DH.Puma560()
z = np.zeros(6)
o = np.ones(6)
tr0 = [-0.0000, 31.6399, 6.0351, 0.0000, 0.0283, 0]
tr1 = [32.4952, 60.8670, 17.7436, 1.4545, 1.2991, 0.7138]
t0 = puma.rne(np.c_[puma.qn, puma.qn].T, np.c_[z, o].T, np.c_[z, o].T)
nt.assert_array_almost_equal(t0[0, :], tr0, decimal=4)
nt.assert_array_almost_equal(t0[1, :], tr1, decimal=4)
def test_rne_delete(self):
puma = rp.models.DH.Puma560()
z = np.zeros(6)
tr0 = [-0.0000, 31.6399, 6.0351, 0.0000, 0.0283, 0]
t0 = puma.rne(puma.qn, z, z)
puma.delete_rne()
t1 = puma.rne(puma.qn, z, z)
nt.assert_array_almost_equal(t0, tr0, decimal=4)
nt.assert_array_almost_equal(t1, tr0, decimal=4)
def test_accel(self):
puma = rp.models.DH.Puma560()
puma.q = puma.qn
q = puma.qn
qd = [0.1, 0.2, 0.8, 0.2, 0.5, 1.0]
torque = [1.0, 3.2, 1.8, 0.1, 0.7, 4.6]
res = [-7.4102, -9.8432, -10.9694, -4.4314, -0.9881, 21.0228]
qdd0 = puma.accel(q, qd, torque)
qdd1 = puma.accel(np.c_[q, q].T, np.c_[qd, qd].T, np.c_[torque, torque].T)
nt.assert_array_almost_equal(qdd0, res, decimal=4)
nt.assert_array_almost_equal(qdd1[0, :], res, decimal=4)
nt.assert_array_almost_equal(qdd1[1, :], res, decimal=4)
def test_inertia(self):
puma = rp.models.DH.Puma560()
puma.q = puma.qn
q = puma.qn
Ir = [
[3.6594, -0.4044, 0.1006, -0.0025, 0.0000, -0.0000],
[-0.4044, 4.4137, 0.3509, 0.0000, 0.0024, 0.0000],
[0.1006, 0.3509, 0.9378, 0.0000, 0.0015, 0.0000],
[-0.0025, 0.0000, 0.0000, 0.1925, 0.0000, 0.0000],
[0.0000, 0.0024, 0.0015, 0.0000, 0.1713, 0.0000],
[-0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.1941],
]
I0 = puma.inertia(q)
# I1 = puma.inertia(np.c_[q, q].T)
nt.assert_array_almost_equal(I0, Ir, decimal=4)
# nt.assert_array_almost_equal(I1[0, :, :], Ir, decimal=4)
# nt.assert_array_almost_equal(I1[1, :, :], Ir, decimal=4)
def test_inertia_x(self):
puma = rp.models.DH.Puma560()
q = puma.qn
Mr = [
[17.2954, -2.7542, -9.6233, -0.0000, 0.2795, 0.0000],
[-2.7542, 12.1909, 1.2459, -0.3254, -0.0703, -0.9652],
[-9.6233, 1.2459, 13.3348, -0.0000, 0.2767, -0.0000],
[-0.0000, -0.3254, -0.0000, 0.1941, 0.0000, 0.1941],
[0.2795, -0.0703, 0.2767, 0.0000, 0.1713, 0.0000],
[0.0000, -0.9652, -0.0000, 0.1941, 0.0000, 0.5791],
]
M0 = puma.inertia_x(q, representation=None)
M1 = puma.inertia_x(np.c_[q, q].T, representation=None)
|
nt.assert_array_almost_equal(M0, Mr, decimal=4)
|
numpy.testing.assert_array_almost_equal
|
import os
import yaml
import time
import torch
from mesh_data import PointCloudData
from pathlib import Path
import argparse
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel
import numpy as np
from random import choice
from torch.utils.data import DataLoader
from models.pointnet import PointNetCls, feature_transform_regularizer
# from models.pointnet2 import PointNet2ClsMsg
# from models.dgcnn import DGCNN
from pytorch3d.ops import sample_points_from_meshes
from pytorch3d.structures import Meshes
from pytorch3d.structures import join_meshes_as_batch
from pytorch3d.utils import ico_sphere
from torch.autograd import Variable
from torch_scatter import scatter_add
import open3d as o3d
from tqdm import tqdm
from pytorch3d.io import load_obj, save_obj
from pytorch3d.loss import (
chamfer_distance,
mesh_edge_loss,
mesh_laplacian_smoothing,
mesh_normal_consistency,
)
# set path
path = Path("Manifold40/")
valid_ds = PointCloudData(path, valid=True, folder='test')
class CrossEntropyAdvLoss(nn.Module):
def __init__(self):
"""Adversarial function on output probabilities.
"""
super(CrossEntropyAdvLoss, self).__init__()
def forward(self, logits, targets):
"""Adversarial loss function using cross entropy.
Args:
logits (torch.FloatTensor): output logits from network, [B, K]
targets (torch.LongTensor): attack target class
"""
loss = F.cross_entropy(logits, targets)
return loss
def my_collate(batch):
## load unregular mesh within a batch
meshes, label = zip(*batch)
meshes = join_meshes_as_batch(meshes, include_textures=False)
label = torch.tensor(label)
return [meshes, label]
class ClipMeshv_Linf(nn.Module):
def __init__(self, budget):
"""Clip mesh vertices with a given l_inf budget.
Args:
budget (float): perturbation budget
"""
super(ClipMeshv_Linf, self).__init__()
self.budget = budget
def forward(self, vt, ori_vt):
"""Clipping every vertice in a mesh.
Args:
vt (torch.FloatTensor): batch vt, [B, 3, K]
ori_vt (torch.FloatTensor): original point cloud
"""
with torch.no_grad():
diff = vt - ori_vt # [B, 3, K]
norm = torch.sum(diff ** 2, dim=1) ** 0.5 # [B, K]
scale_factor = self.budget / (norm + 1e-9) # [B, K]
scale_factor = torch.clamp(scale_factor, max=1.) # [B, K]
diff = diff * scale_factor[:, None]
vt = ori_vt + diff
return vt
class MeshAttack:
"""Class for Mesh attack.
"""
def __init__(self, model, adv_func, attack_lr=1e-2,
init_weight=10., max_weight=80., binary_step=10, num_iter=1500):
"""Mesh attack by perturbing vertice.
Args:
model (torch.nn.Module): victim model
adv_func (function): adversarial loss function
attack_lr (float, optional): lr for optimization. Defaults to 1e-2.
init_weight (float, optional): weight factor init. Defaults to 10.
max_weight (float, optional): max weight factor. Defaults to 80.
binary_step (int, optional): binary search step. Defaults to 10.
num_iter (int, optional): max iter num in every search step. Defaults to 500.
"""
self.model = model.cuda()
self.model.eval()
self.adv_func = adv_func
self.attack_lr = attack_lr
self.init_weight = init_weight
self.max_weight = max_weight
self.binary_step = binary_step
self.num_iter = num_iter
self.clip = ClipMeshv_Linf(budget=0.1)
def attack(self, data, target,label):
"""Attack on given data to target.
Args:
data (torch.FloatTensor): victim data, [B, num_vertices, 3]
target (torch.LongTensor): target output, [B]
"""
B, K = len(data), 1024
global bas
data = data.cuda()
label_val = target.detach().cpu().numpy() # [B]
label = label.long().cuda().detach()
label_true = label.detach().cpu().numpy()
deform_ori = data.clone()
# weight factor for budget regularization
lower_bound = np.zeros((B,))
upper_bound = np.ones((B,)) * self.max_weight
current_weight = np.ones((B,)) * self.init_weight
# record best results in binary search
o_bestdist = np.array([1e10] * B)
o_bestscore = np.array([-1] * B)
o_bestattack = np.zeros((B, 3, K))
# Weight for the chamfer loss
w_chamfer = 1.0
# Weight for mesh edge loss
w_edge = 0.2
# Weight for mesh laplacian smoothing
w_laplacian = 0.5
# perform binary search
for binary_step in range(self.binary_step):
deform_verts = torch.full(deform_ori.verts_packed().shape, 0.000001, device='cuda:%s'%args.local_rank, requires_grad=True)
ori_def = deform_verts.detach().clone()
bestdist =
|
np.array([1e10] * B)
|
numpy.array
|
from numpy import allclose, hstack, vstack, zeros
from quadprog import solve_qp
"""
Solve a Quadratic Program defined as
minimize (1/2) * x.T * H * x + g.T * x
subject to
C * x <= d
A * x == b
The solution is saved and returned in QPSolution object
def <solver>Wrapper(H, g, C, d, A, b, initvals)
:param H : numpy.array
:param g : numpy.array
:param C : numpy.array
:param d : numpy.array
:param A : numpy.array, optional
:param b : numpy.array, optional
:param initvals : numpy.array, optional
"""
class QPSolution:
"""
Solution of a Quadratic Program minimize f(x) subject to ineq and eq constraints.
argmin: solution of the problem. x*
optimum: value of the objective at argmin. f(x*)
active: set of indices of the active constraints.
dual: value of the lagrangian multipliers.
niter: number of iterations needed to solve the problem.
"""
def __init__(self, nx, nc):
self.argmin = zeros(nx)
self.optimum = 0.
self.active = zeros(nx).astype(bool)
self.dual = zeros(nc)
self.niter = 0.
def quadprogWrapper(H, g, C=None, d=None, A=None, b=None, initvals=None):
"""
Quadprog <https://pypi.python.org/pypi/quadprog/>.
The quadprog solver only considers the lower entries of `H`, therefore it
will use a wrong cost function if a non-symmetric matrix is provided.
"""
assert (allclose(H, H.T, atol=1e-10))
if initvals is not None:
print("quadprog: note that warm-start values ignored by wrapper")
if A is not None and C is not None:
qp_C = -vstack([A, C]).T
qp_b = -
|
hstack([b, d])
|
numpy.hstack
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.