prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
from __future__ import division
from __future__ import print_function
import os
import math
import random
import time
import argparse
import csv
from datetime import datetime
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from texttable import Texttable
import scipy.sparse as sp
from sklearn.metrics import adjusted_rand_score, normalized_mutual_info_score
from utils import get_powers_sparse, scipy_sparse_to_torch_sparse
from utils import split_labels, getClassMean, write_log, extract_edges
from metrics import triplet_loss_InnerProduct_alpha, Prob_Balanced_Ratio_Loss, Prob_Balanced_Normalized_Loss
from metrics import label_size_ratio, print_performance_mean_std, get_cut_and_distribution, link_sign_loss_function
from PyG_models import SSSNET
from cluster import Cluster
from preprocess import load_data
from param_parser import parameter_parser
args = parameter_parser()
torch.manual_seed(args.seed)
device = args.device
if args.dataset[-1] != '/':
args.dataset += '/'
if args.cuda:
torch.cuda.manual_seed(args.seed)
no_magnet = True
compare_names = []
if 'spectral' in args.all_methods:
compare_names = ['A','sns','dns','L','L_sym','BNC','BRC','SPONGE','SPONGE_sym']
num_gnn = 0
if 'SSSNET' in args.all_methods:
num_gnn += 1
compare_names.append('SSSNET')
compare_names_all = []
compare_names_all.extend(compare_names[:-1])
for feat_opt in args.feature_options:
compare_names_all.append(
compare_names[-1]+'_'+feat_opt)
else:
compare_names_all = compare_names
class SSSNET_Trainer(object):
"""
Object to train and score different models.
"""
def __init__(self, args, random_seed):
"""
Constructing the trainer instance.
:param args: Arguments object.
"""
self.args = args
self.device = args.device
label, self.train_mask, self.val_mask, self.test_mask, self.seed_mask, comb = load_data(args, args.load_only, random_seed)
# normalize label, the minimum should be 0 as class index
_label_ = label - np.amin(label)
self.label = torch.from_numpy(_label_[np.newaxis]).to(device)
self.cluster_dim = np.amax(_label_)+1
self.num_clusters = self.cluster_dim
self.feat_adj_reg, self.feat_L, self.feat_given, self.A_p_scipy, self.A_n_scipy = comb
self.edge_index_p = torch.LongTensor(self.A_p_scipy.nonzero()).to(self.args.device)
self.edge_weight_p = torch.FloatTensor(sp.csr_matrix(self.A_p_scipy).data).to(self.args.device)
self.edge_index_n = torch.LongTensor(self.A_n_scipy.nonzero()).to(self.args.device)
self.edge_weight_n = torch.FloatTensor(sp.csr_matrix(self.A_n_scipy).data).to(self.args.device)
self.A_p = get_powers_sparse(self.A_p_scipy, hop=1, tau=self.args.tau)[1].to(self.args.device)
self.A_n = get_powers_sparse(self.A_n_scipy, hop=1, tau=0)[1].to(self.args.device)
self.A_pt = get_powers_sparse(self.A_p_scipy.transpose(), hop=1, tau=self.args.tau)[1].to(self.args.device)
self.A_nt = get_powers_sparse(self.A_n_scipy.transpose(), hop=1, tau=0)[1].to(self.args.device)
if self.args.dense:
self.A_p = self.A_p.to_dense()
self.A_n = self.A_n.to_dense()
self.A_pt = self.A_pt.to_dense()
self.A_nt = self.A_nt.to_dense()
self.c = Cluster((0.5*(self.A_p_scipy+self.A_p_scipy.transpose()),
0.5*(self.A_n_scipy+self.A_n_scipy.transpose()), int(self.num_clusters)))
date_time = datetime.now().strftime('%m-%d-%H:%M:%S')
if args.dataset[:-1].lower() == 'ssbm':
default_values = [args.p, args.K,args.N,args.seed_ratio, args.train_ratio, args.test_ratio,args.size_ratio,args.eta, args.num_trials]
elif args.dataset[:-1].lower() == 'polarized':
default_values = [args.total_n, args.num_com, args.p, args.K,args.N,args.seed_ratio, args.train_ratio, args.test_ratio,args.size_ratio,args.eta, args.num_trials]
else:
default_values = [args.K, args.seed_ratio,
args.train_ratio, args.test_ratio, args.num_trials]
save_name = '_'.join([str(int(100*value)) for value in default_values])
save_name += 'Seed' + str(random_seed)
self.log_path = os.path.join(os.path.dirname(os.path.realpath(
__file__)), args.log_root, args.dataset[:-1], save_name, date_time)
if os.path.isdir(self.log_path) == False:
try:
os.makedirs(self.log_path)
except FileExistsError:
print('Folder exists!')
self.splits = self.train_mask.shape[1]
if len(self.test_mask.shape) == 1:
#data.test_mask = test_mask.unsqueeze(1).repeat(1, splits)
self.test_mask = np.repeat(
self.test_mask[:, np.newaxis], self.splits, 1)
write_log(vars(args), self.log_path) # write the setting
def SSSNET(self, feat_choice):
#################################
# SSSNET
#################################
if feat_choice == 'A_reg':
self.features = self.feat_adj_reg
elif feat_choice == 'L':
self.features = self.feat_L
elif feat_choice == 'given':
self.features = self.feat_given
elif feat_choice == 'None':
self.features = torch.eye(self.A_p_scipy.shape[0]).to(self.args.device)
res_full =
|
np.zeros([self.splits, 1])
|
numpy.zeros
|
#!/usr/bin/env python3
#
# TImestream DAta Storage (TIDAS).
#
# Copyright (c) 2015-2019 by the parties listed in the AUTHORS file. All rights
# reserved. Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import, division, print_function
from mpi4py import MPI
import os
import sys
import argparse
import shutil
import numpy as np
import numpy.testing as nt
import calendar
from tidas import (
DataType,
BackendType,
CompressionType,
AccessMode,
Dictionary,
Intrvl,
Intervals,
Field,
Schema,
Group,
Block,
Volume,
)
from tidas.mpi import mpi_dist_uniform, MPIVolume
def dict_setup():
ret = Dictionary()
ret.put_string("string", "blahblahblah")
ret.put_float64("float64", -123456789.0123)
ret.put_float32("float32", -123.456)
ret.put_int8("int8", -100)
ret.put_uint8("uint8", 100)
ret.put_int16("int16", -10000)
ret.put_uint16("uint16", 10000)
ret.put_int32("int32", -1000000000)
ret.put_uint32("uint32", 1000000000)
ret.put_int64("int64", -100000000000)
ret.put_uint64("uint64", 100000000000)
return ret
def dict_verify(dct):
nt.assert_equal(dct.get_string("string"), "blahblahblah")
nt.assert_equal(dct.get_int8("int8"), -100)
nt.assert_equal(dct.get_uint8("uint8"), 100)
nt.assert_equal(dct.get_int16("int16"), -10000)
nt.assert_equal(dct.get_uint16("uint16"), 10000)
nt.assert_equal(dct.get_int32("int32"), -1000000000)
nt.assert_equal(dct.get_uint32("uint32"), 1000000000)
nt.assert_equal(dct.get_int64("int64"), -100000000000)
nt.assert_equal(dct.get_uint64("uint64"), 100000000000)
nt.assert_almost_equal(dct.get_float32("float32"), -123.456, decimal=3)
nt.assert_almost_equal(dct.get_float64("float64"), -123456789.0123)
return
def schema_setup(ndet):
fields = list()
fields.append(Field("int8", DataType.int8, "int8"))
fields.append(Field("uint8", DataType.uint8, "uint8"))
fields.append(Field("int16", DataType.int16, "int16"))
fields.append(Field("uint16", DataType.uint16, "uint16"))
fields.append(Field("int32", DataType.int32, "int32"))
fields.append(Field("uint32", DataType.uint32, "uint32"))
fields.append(Field("int64", DataType.int64, "int64"))
fields.append(Field("uint64", DataType.uint64, "uint64"))
fields.append(Field("float32", DataType.float32, "float32"))
fields.append(Field("float64", DataType.float64, "float64"))
fields.append(Field("string", DataType.string, "string"))
for d in range(ndet):
dname = "det_{}".format(d)
fields.append(Field(dname, DataType.float64, "volts"))
ret = Schema(fields)
return ret
def test_schema_verify(fields):
for fl in fields:
if fl.name == "int8":
assert fl.type == DataType.int8
assert fl.name == fl.units
elif fl.name == "uint8":
assert fl.type == DataType.uint8
assert fl.name == fl.units
elif fl.name == "int16":
assert fl.type == DataType.int16
assert fl.name == fl.units
elif fl.name == "uint16":
assert fl.type == DataType.uint16
assert fl.name == fl.units
elif fl.name == "int32":
assert fl.type == DataType.int32
assert fl.name == fl.units
elif fl.name == "uint32":
assert fl.type == DataType.uint32
assert fl.name == fl.units
elif fl.name == "int64":
assert fl.type == DataType.int64
assert fl.name == fl.units
elif fl.name == "uint64":
assert fl.type == DataType.uint64
assert fl.name == fl.units
elif fl.name == "float32":
assert fl.type == DataType.float32
assert fl.name == fl.units
elif fl.name == "float64":
assert fl.type == DataType.float64
assert fl.name == fl.units
elif fl.name == "string":
assert fl.type == DataType.string
assert fl.name == fl.units
else:
# Must be a det
assert fl.type == DataType.float64
assert fl.units == "volts"
return
def intervals_setup(nint):
ilist = []
gap = 1.0
span = 123.4
gap_samp = 5
span_samp = 617
for i in range(nint):
start = gap + float(i) * (span + gap)
stop = float(i + 1) * (span + gap)
first = gap_samp + i * (span_samp + gap_samp)
last = (i + 1) * (span_samp + gap_samp)
ilist.append(Intrvl(start, stop, first, last))
return ilist
def intervals_verify(ilist):
comp = test_intervals_setup(len(ilist))
for i in range(len(comp)):
nt.assert_almost_equal(ilist[i].start, comp[i].start)
nt.assert_almost_equal(ilist[i].stop, comp[i].stop)
assert ilist[i].first == comp[i].first
assert ilist[i].last == comp[i].last
def group_setup(grp, ndet, nsamp):
time = np.zeros(nsamp, dtype=np.float64)
int8_data = np.zeros(nsamp, dtype=np.int8)
uint8_data = np.zeros(nsamp, dtype=np.uint8)
int16_data = np.zeros(nsamp, dtype=np.int16)
uint16_data = np.zeros(nsamp, dtype=np.uint16)
int32_data = np.zeros(nsamp, dtype=np.int32)
uint32_data = np.zeros(nsamp, dtype=np.uint32)
int64_data = np.zeros(nsamp, dtype=np.int64)
uint64_data = np.zeros(nsamp, dtype=np.uint64)
float32_data = np.zeros(nsamp, dtype=np.float32)
float64_data = np.zeros(nsamp, dtype=np.float64)
string_data = np.empty(nsamp, dtype="S64")
for i in range(nsamp):
fi = float(i)
time[i] = fi * 0.001
int8_data[i] = -(i % 128)
uint8_data[i] = i % 128
int16_data[i] = -(i % 32768)
uint16_data[i] = i % 32768
int32_data[i] = -i
uint32_data[i] = i
int64_data[i] = -i
uint64_data[i] = i
float32_data[i] = fi
float64_data[i] = fi
string_data[i] = "foobarbahblat"
grp.write_times(0, time)
grp.write("int8", 0, int8_data)
grp.write("uint8", 0, uint8_data)
grp.write("int16", 0, int16_data)
grp.write("uint16", 0, uint16_data)
grp.write("int32", 0, int32_data)
grp.write("uint32", 0, uint32_data)
grp.write("int64", 0, int64_data)
grp.write("uint64", 0, uint64_data)
grp.write("float32", 0, float32_data)
grp.write("float64", 0, float64_data)
grp.write("string", 0, string_data)
for d in range(ndet):
dname = "det_{}".format(d)
np.random.seed(d)
data = np.random.normal(loc=0.0, scale=10.0, size=nsamp)
grp.write(dname, 0, data)
return
def group_verify(grp, ndet, nsamp):
time_check = np.zeros(nsamp, dtype=np.float64)
int8_data_check = np.zeros(nsamp, dtype=np.int8)
uint8_data_check = np.zeros(nsamp, dtype=np.uint8)
int16_data_check = np.zeros(nsamp, dtype=np.int16)
uint16_data_check = np.zeros(nsamp, dtype=np.uint16)
int32_data_check = np.zeros(nsamp, dtype=np.int32)
uint32_data_check = np.zeros(nsamp, dtype=np.uint32)
int64_data_check = np.zeros(nsamp, dtype=np.int64)
uint64_data_check = np.zeros(nsamp, dtype=np.uint64)
float32_data_check = np.zeros(nsamp, dtype=np.float32)
float64_data_check = np.zeros(nsamp, dtype=np.float64)
string_data_check = np.zeros(nsamp, dtype="S64")
for i in range(nsamp):
fi = float(i)
time_check[i] = fi * 0.001
int8_data_check[i] = -(i % 128)
uint8_data_check[i] = i % 128
int16_data_check[i] = -(i % 32768)
uint16_data_check[i] = i % 32768
int32_data_check[i] = -i
uint32_data_check[i] = i
int64_data_check[i] = -i
uint64_data_check[i] = i
float32_data_check[i] = fi
float64_data_check[i] = fi
string_data_check[i] = "foobarbahblat"
time = grp.read_times(0, nsamp)
int8_data = grp.read("int8", 0, nsamp)
uint8_data = grp.read("uint8", 0, nsamp)
int16_data = grp.read("int16", 0, nsamp)
uint16_data = grp.read("uint16", 0, nsamp)
int32_data = grp.read("int32", 0, nsamp)
uint32_data = grp.read("uint32", 0, nsamp)
int64_data = grp.read("int64", 0, nsamp)
uint64_data = grp.read("uint64", 0, nsamp)
float32_data = grp.read("float32", 0, nsamp)
float64_data = grp.read("float64", 0, nsamp)
string_data = grp.read("string", 0, nsamp)
nt.assert_equal(int8_data, int8_data_check)
nt.assert_equal(uint8_data, uint8_data_check)
nt.assert_equal(int16_data, int16_data_check)
nt.assert_equal(uint16_data, uint16_data_check)
|
nt.assert_equal(int32_data, int32_data_check)
|
numpy.testing.assert_equal
|
import enum
import numpy as np
from rrc_iprl_package.control.contact_point import ContactPoint
from rrc_iprl_package.traj_opt.fixed_contact_point_opt import \
FixedContactPointOpt
from rrc_iprl_package.traj_opt.static_object_opt import StaticObjectOpt
from scipy.spatial.distance import pdist, squareform
from scipy.spatial.transform import Rotation
from trifinger_simulation.tasks import move_cube
# Here, hard code the base position of the fingers (as angle on the arena)
r = 0.15
theta_0 = 90
theta_1 = 310
theta_2 = 200
# theta_2 = 3.66519 # 210 degrees
# CUBOID_SIZE in trifinger_simulation surf_2021 branch is set to cube dims
CUBE_HALF_SIZE = move_cube._CUBOID_SIZE[0] / 2 + 0.001
OBJ_SIZE = move_cube._CUBOID_SIZE
OBJ_MASS = 0.016 # 16 grams
FINGER_BASE_POSITIONS = [
np.array(
[[np.cos(theta_0 * (np.pi / 180)) * r, np.sin(theta_0 * (np.pi / 180)) * r, 0]]
),
np.array(
[[np.cos(theta_1 * (np.pi / 180)) * r, np.sin(theta_1 * (np.pi / 180)) * r, 0]]
),
np.array(
[[np.cos(theta_2 * (np.pi / 180)) * r, np.sin(theta_2 * (np.pi / 180)) * r, 0]]
),
]
BASE_ANGLE_DEGREES = [0, -120, -240]
class PolicyMode(enum.Enum):
RESET = enum.auto()
TRAJ_OPT = enum.auto()
IMPEDANCE = enum.auto()
RL_PUSH = enum.auto()
RESIDUAL = enum.auto()
# Information about object faces given face_id
OBJ_FACES_INFO = {
1: {
"center_param": np.array([0.0, -1.0, 0.0]),
"face_down_default_quat": np.array([0.707, 0, 0, 0.707]),
"adjacent_faces": [6, 4, 3, 5],
"opposite_face": 2,
"up_axis": np.array([0.0, 1.0, 0.0]), # UP axis when this face is ground face
},
2: {
"center_param": np.array([0.0, 1.0, 0.0]),
"face_down_default_quat": np.array([-0.707, 0, 0, 0.707]),
"adjacent_faces": [6, 4, 3, 5],
"opposite_face": 1,
"up_axis": np.array([0.0, -1.0, 0.0]),
},
3: {
"center_param": np.array([1.0, 0.0, 0.0]),
"face_down_default_quat": np.array([0, 0.707, 0, 0.707]),
"adjacent_faces": [1, 2, 4, 6],
"opposite_face": 5,
"up_axis": np.array([-1.0, 0.0, 0.0]),
},
4: {
"center_param": np.array([0.0, 0.0, 1.0]),
"face_down_default_quat": np.array([0, 1, 0, 0]),
"adjacent_faces": [1, 2, 3, 5],
"opposite_face": 6,
"up_axis": np.array([0.0, 0.0, -1.0]),
},
5: {
"center_param": np.array([-1.0, 0.0, 0.0]),
"face_down_default_quat": np.array([0, -0.707, 0, 0.707]),
"adjacent_faces": [1, 2, 4, 6],
"opposite_face": 3,
"up_axis": np.array([1.0, 0.0, 0.0]),
},
6: {
"center_param": np.array([0.0, 0.0, -1.0]),
"face_down_default_quat": np.array([0, 0, 0, 1]),
"adjacent_faces": [1, 2, 3, 5],
"opposite_face": 4,
"up_axis": np.array([0.0, 0.0, 1.0]),
},
}
"""
Compute joint torques to move fingertips to desired locations
Inputs:
tip_pos_desired_list: List of desired fingertip positions for each finger
q_current: Current joint angles
dq_current: Current joint velocities
tip_forces_wf: fingertip forces in world frame
tol: tolerance for determining when fingers have reached goal
"""
def impedance_controller(
tip_pos_desired_list,
tip_vel_desired_list,
q_current,
dq_current,
custom_pinocchio_utils,
tip_forces_wf=None,
Kp=(25, 25, 25, 25, 25, 25, 25, 25, 25),
Kv=(1, 1, 1, 1, 1, 1, 1, 1, 1),
grav=-9.81,
):
torque = 0
for finger_id in range(3):
# Get contact forces for single finger
if tip_forces_wf is None:
f_wf = None
else:
f_wf = np.expand_dims(
np.array(tip_forces_wf[finger_id * 3 : finger_id * 3 + 3]), 1
)
finger_torque = impedance_controller_single_finger(
finger_id,
tip_pos_desired_list[finger_id],
tip_vel_desired_list[finger_id],
q_current,
dq_current,
custom_pinocchio_utils,
tip_force_wf=f_wf,
Kp=Kp,
Kv=Kv,
grav=grav,
)
torque += finger_torque
return torque
"""
Compute joint torques to move fingertip to desired location
Inputs:
finger_id: Finger 0, 1, or 2
tip_desired: Desired fingertip pose **ORIENTATION??**
for orientation: transform fingertip reference frame to world frame (take
into account object orientation)
for now, just track position
q_current: Current joint angles
dq_current: Current joint velocities
tip_forces_wf: fingertip forces in world frame
tol: tolerance for determining when fingers have reached goal
"""
def impedance_controller_single_finger(
finger_id,
tip_pos_desired,
tip_vel_desired,
q_current,
dq_current,
custom_pinocchio_utils,
tip_force_wf=None,
Kp=(25, 25, 25, 25, 25, 25, 25, 25, 25),
Kv=(1, 1, 1, 1, 1, 1, 1, 1, 1),
grav=-9.81,
):
Kp_x = Kp[finger_id * 3 + 0]
Kp_y = Kp[finger_id * 3 + 1]
Kp_z = Kp[finger_id * 3 + 2]
Kp = np.diag([Kp_x, Kp_y, Kp_z])
Kv_x = Kv[finger_id * 3 + 0]
Kv_y = Kv[finger_id * 3 + 1]
Kv_z = Kv[finger_id * 3 + 2]
Kv = np.diag([Kv_x, Kv_y, Kv_z])
# Compute current fingertip position
x_current = custom_pinocchio_utils.forward_kinematics(q_current)[finger_id]
delta_x = np.expand_dims(np.array(tip_pos_desired) - np.array(x_current), 1)
# print("Current x: {}".format(x_current))
# print("Desired x: {}".format(tip_desired))
# print("Delta: {}".format(delta_x))
# Get full Jacobian for finger
Ji = custom_pinocchio_utils.get_tip_link_jacobian(finger_id, q_current)
# Just take first 3 rows, which correspond to linear velocities of fingertip
Ji = Ji[:3, :]
# Get g matrix for gravity compensation
_, g = custom_pinocchio_utils.get_lambda_and_g_matrix(
finger_id, q_current, Ji, grav
)
# Get current fingertip velocity
dx_current = Ji @ np.expand_dims(np.array(dq_current), 1)
delta_dx = np.expand_dims(np.array(tip_vel_desired), 1) - np.array(dx_current)
if tip_force_wf is not None:
torque = (
np.squeeze(Ji.T @ (Kp @ delta_x + Kv @ delta_dx) + Ji.T @ tip_force_wf) + g
)
else:
torque = np.squeeze(Ji.T @ (Kp @ delta_x + Kv @ delta_dx)) + g
# print("Finger {} delta".format(finger_id))
# print(np.linalg.norm(delta_x))
return torque
"""
Compute contact point position in world frame
Inputs:
cp_param: Contact point param [px, py, pz]
cube: Block object, which contains object shape info
"""
def get_cp_pos_wf_from_cp_param(
cp_param, cube_pos_wf, cube_quat_wf, cube_half_size=CUBE_HALF_SIZE
):
cp = get_cp_of_from_cp_param(cp_param, cube_half_size)
rotation = Rotation.from_quat(cube_quat_wf)
translation = np.asarray(cube_pos_wf)
return rotation.apply(cp.pos_of) + translation
"""
Get contact point positions in world frame from cp_params
"""
def get_cp_pos_wf_from_cp_params(
cp_params, cube_pos, cube_quat, cube_half_size=CUBE_HALF_SIZE, **kwargs
):
# Get contact points in wf
fingertip_goal_list = []
for i in range(len(cp_params)):
# for i in range(cp_params.shape[0]):
fingertip_goal_list.append(
get_cp_pos_wf_from_cp_param(
cp_params[i], cube_pos, cube_quat, cube_half_size
)
)
return fingertip_goal_list
"""
Compute contact point position in object frame
Inputs:
cp_param: Contact point param [px, py, pz]
"""
def get_cp_of_from_cp_param(cp_param, cube_half_size=CUBE_HALF_SIZE):
obj_shape = (cube_half_size, cube_half_size, cube_half_size)
cp_of = []
# Get cp position in OF
for i in range(3):
cp_of.append(-obj_shape[i] + (cp_param[i] + 1) * obj_shape[i])
cp_of = np.asarray(cp_of)
x_param = cp_param[0]
y_param = cp_param[1]
z_param = cp_param[2]
# For now, just hard code quat
if y_param == -1:
quat = (0, 0, np.sqrt(2) / 2, np.sqrt(2) / 2)
elif y_param == 1:
quat = (0, 0, -np.sqrt(2) / 2, np.sqrt(2) / 2)
elif x_param == 1:
quat = (0, 0, 1, 0)
elif z_param == 1:
quat = (0, np.sqrt(2) / 2, 0, np.sqrt(2) / 2)
elif x_param == -1:
quat = (0, 0, 0, 1)
elif z_param == -1:
quat = (0, np.sqrt(2) / 2, 0, -np.sqrt(2) / 2)
cp = ContactPoint(cp_of, quat)
return cp
"""
Get face id on cube, given cp_param
cp_param: [x,y,z]
"""
def get_face_from_cp_param(cp_param):
x_param = cp_param[0]
y_param = cp_param[1]
z_param = cp_param[2]
# For now, just hard code quat
if y_param == -1:
face = 1
elif y_param == 1:
face = 2
elif x_param == 1:
face = 3
elif z_param == 1:
face = 4
elif x_param == -1:
face = 5
elif z_param == -1:
face = 6
return face
"""
Trasform point p from world frame to object frame, given object pose
"""
def get_wf_from_of(p, obj_pose):
cube_pos_wf = obj_pose.position
cube_quat_wf = obj_pose.orientation
rotation = Rotation.from_quat(cube_quat_wf)
translation = np.asarray(cube_pos_wf)
return rotation.apply(p) + translation
"""
Trasform point p from object frame to world frame, given object pose
"""
def get_of_from_wf(p, obj_pose):
cube_pos_wf = obj_pose.position
cube_quat_wf = obj_pose.orientation
rotation = Rotation.from_quat(cube_quat_wf)
translation = np.asarray(cube_pos_wf)
rotation_inv = rotation.inv()
translation_inv = -rotation_inv.apply(translation)
return rotation_inv.apply(p) + translation_inv
##############################################################################
# Lift mode functions
##############################################################################
"""
Run trajectory optimization
current_position: current joint positions of robot
x0: object initial position for traj opt
x_goal: object goal position for traj opt
nGrid: number of grid points
dt: delta t
"""
def run_fixed_cp_traj_opt(
cp_params,
current_position,
custom_pinocchio_utils,
x0,
x_goal,
nGrid,
dt,
npz_filepath=None,
):
cp_params_on_obj = []
for cp in cp_params:
if cp is not None:
cp_params_on_obj.append(cp)
fnum = len(cp_params_on_obj)
# Formulate and solve optimization problem
opt_problem = FixedContactPointOpt(
nGrid=nGrid, # Number of timesteps
dt=dt, # Length of each timestep (seconds)
fnum=fnum,
cp_params=cp_params_on_obj,
x0=x0,
x_goal=x_goal,
obj_shape=OBJ_SIZE,
obj_mass=OBJ_MASS,
npz_filepath=npz_filepath,
)
x_soln = np.array(opt_problem.x_soln)
dx_soln = np.array(opt_problem.dx_soln)
l_wf_soln = np.array(opt_problem.l_wf_soln)
return x_soln, dx_soln, l_wf_soln
"""
Get initial contact points on cube
Assign closest cube face to each finger
Since we are lifting object, don't worry about wf z-axis, just care about wf xy-plane
"""
def get_lifting_cp_params(obj_pose):
# face that is touching the ground
ground_face = get_closest_ground_face(obj_pose)
# Transform finger base positions to object frame
base_pos_list_of = []
for f_wf in FINGER_BASE_POSITIONS:
f_of = get_of_from_wf(f_wf, obj_pose)
base_pos_list_of.append(f_of)
# Find distance from x axis and y axis, and store in xy_distances
# Need some additional logic to prevent multiple fingers from being assigned to same face
x_axis = np.array([1, 0])
y_axis = np.array([0, 1])
# Object frame axis corresponding to plane parallel to ground plane
x_ind, y_ind = __get_parallel_ground_plane_xy(ground_face)
xy_distances = np.zeros(
(3, 2)
) # Row corresponds to a finger, columns are x and y axis distances
for f_i, f_of in enumerate(base_pos_list_of):
point_in_plane = np.array(
[f_of[0, x_ind], f_of[0, y_ind]]
) # Ignore dimension of point that's not in the plane
x_dist = __get_distance_from_pt_2_line(x_axis, np.array([0, 0]), point_in_plane)
y_dist = __get_distance_from_pt_2_line(y_axis,
|
np.array([0, 0])
|
numpy.array
|
from os.path import join, isfile, isdir
from glob import glob
from scipy.interpolate import RectBivariateSpline
from collections import namedtuple, OrderedDict
import netCDF4 as nc
import numpy as np
import geokit as gk
import pandas as pd
from ..util import ResError
# make a data handler
Index = namedtuple("Index", "yi xi")
class NCSource(object):
"""The NCSource object manages weather data from a generic set of netCDF4 file sources
If furthermore allows access a number of common functionalities and constants which are
often encountered when simulating renewable energy technologies
Note:
-----
Various constants can be set for a given weather source which can impact later simulation workflows.
Note that not all weather sources will have all of these constants available. Also more may be
implemented besides (so be sure to check the DocString for the source you intend to use).
These constants include:
MAX_LON_DIFFERENCE
The maximum logitude difference to accept between a grid cell and the coordinates you would
like to extract data for
MAX_LAT_DIFFERENCE
The maximum latitude difference to accept between a grid cell and the coordinates you would
like to extract data for
WIND_SPEED_HEIGHT_FOR_WIND_ENERGY
The suggested altitude of wind speed data to use for wind-energy simulations
WIND_SPEED_HEIGHT_FOR_SOLAR_ENERGY
The suggested altitude of wind speed data to use for wind-energy simulations
LONG_RUN_AVERAGE_WINDSPEED
A path to a raster file with the long-time average wind speed in each grid cell
* Can be used in wind energy simulations
* Calculated at the height specified in `WIND_SPEED_HEIGHT_FOR_WIND_ENERGY`
* Time range included in the long run averaging depends on the data source
LONG_RUN_AVERAGE_WINDDIR
A path to a raster file with the long-time average wind direction in each grid cell
* Can be used in wind energy simulations
* Calculated at the height specified in `WIND_SPEED_HEIGHT_FOR_WIND_ENERGY`
* Time range included in the long run averaging depends on the data source
LONG_RUN_AVERAGE_GHI
A path to a raster file with the long-time average global horizontal irradiance
in each grid cell
* Can be used in solar energy simulations
* Calculated at the surface
* Time range included in the long run averaging depends on the data source
LONG_RUN_AVERAGE_DNI
A path to a raster file with the long-time average direct normal irradiance
in each grid cell
* Can be used in solar energy simulations
* Calculated at the surface
* Time range included in the long run averaging depends on the data source
See Also:
---------
reskit.weather.MerraSource
reskit.weather.SarahSource
reskit.weather.Era5Source
"""
WIND_SPEED_HEIGHT_FOR_WIND_ENERGY = None
WIND_SPEED_HEIGHT_FOR_SOLAR_ENERGY = None
LONG_RUN_AVERAGE_WINDSPEED = None
LONG_RUN_AVERAGE_WINDDIR = None
LONG_RUN_AVERAGE_GHI = None
LONG_RUN_AVERAGE_DNI = None
MAX_LON_DIFFERENCE = None
MAX_LAT_DIFFERENCE = None
def __init__(self, source, bounds=None, index_pad=0, time_name="time", lat_name="lat", lon_name="lon", tz=None, _max_lon_diff=0.6, _max_lat_diff=0.6, verbose=True, forward_fill=True, flip_lat=False, flip_lon=False, time_offset_minutes=None):
"""Initialize a generic netCDF4 file source
Note:
-----
Generally not intended for normal use. Look into MerraSource, CordexSource, or CosmoSource
Parameters:
-----------
path : str or list of strings
The path to the main data file(s) to load
If multiple files are given, or if a directory of netCDF4 files is given, then it is assumed
that all files ending with the extension '.nc' or '.nc4' should be managed by this object.
* Be sure that all the netCDF4 files given share the same time and spatial dimensions!
bounds : Anything acceptable to geokit.Extent.load(), optional
The boundaries of the data which is needed
* Usage of this will help with memory mangement
* If None, the full dataset is loaded in memory
* The actual extent of the loaded data depends on the source's
available data
index_pad : int, optional
The padding to apply to the boundaries
* Useful in case of interpolation
* Units are in longitudinal degrees
time_name : str, optional
The name of the time parameter in the netCDF4 dataset
lat_name : str, optional
The name of the latitude parameter in the netCDF4 dataset
lon_name : str, optional
The name of the longitude parameter in the netCDF4 dataset
tz : str, optional
Applies the indicated timezone onto the time axis
* For example, use "GMT" for unadjusted time
verbose : bool, optional
If True, then status outputs are printed when searching for and reading weather data
forward_fill : bool, optional
If True, then missing data in the weather file is forward-filled
* Generally, there should be no missing data at all. This option is only intended to
catch the rare scenarios where one or two timesteps are missing
flip_lat : bool, optional
If True, flips the latitude dimension when reading weather data from the source
* Should only be given if latitudes are given in descending order
flip_lon : bool, optional
If True, flips the longitude dimension when reading weather data from the source
* Should only be given if longitudes are given in descending order
time_offset_minutes : numeric, optional
If not none, adds the specific offset in minutes to the timesteps read from the weather file
See Also:
---------
MerraSource
SarahSource
Era5Source
"""
# Collect sources
def addSource(src):
out = []
if isinstance(src, list):
for s in src:
out.extend(addSource(s))
elif isinstance(src, str):
if isfile(src): # Assume its an NC file
out.extend([src, ])
elif isdir(src): # Assume its a directory of NC files
for s in glob(join(src, "*.nc")):
out.append(s)
for s in glob(join(src, "*.nc4")):
out.append(s)
else: # Assume we were given a glob string
for s in glob(src):
out.extend(addSource(s))
return out
sources = addSource(source)
if len(sources) == 0:
raise ResError("No '.nc' or '.nc4' files found")
sources.sort()
# Collect all variable information
self.variables = OrderedDict()
self.fill = forward_fill
expectedShape = OrderedDict()
units = []
names = []
for src in sources:
if verbose:
print(src)
ds = nc.Dataset(src, keepweakref=True)
for var in ds.variables:
if not var in self.variables:
self.variables[var] = src
expectedShape[var] = ds[var].shape
try:
unit = ds[var].units
except:
unit = "Unknown"
try:
name = ds[var].standard_name
except:
name = "Unknown"
names.append(name)
units.append(unit)
else:
if ds[var].shape[1:] != expectedShape[var][1:]:
raise ResError("Variable %s does not match expected shape %s. From %s" % (
var, expectedShape[var], src))
ds.close()
tmp = pd.DataFrame(columns=["name", "units", "path", ], index=self.variables.keys())
tmp["name"] = names
tmp["units"] = units
tmp["shape"] = [expectedShape[v] for v in tmp.index]
tmp["path"] = [self.variables[v] for v in tmp.index]
self.variables = tmp
# set basic variables
ds = nc.Dataset(self.variables["path"][lat_name], keepweakref=True)
self._allLats = ds[lat_name][:]
ds.close()
ds = nc.Dataset(self.variables["path"][lon_name], keepweakref=True)
self._allLons = ds[lon_name][:]
ds.close()
self._maximal_lon_difference = _max_lon_diff
self._maximal_lat_difference = _max_lat_diff
if len(self._allLats.shape) == 1 and len(self._allLons.shape) == 1:
self.dependent_coordinates = False
self._lonN = self._allLons.size
self._latN = self._allLats.size
elif len(self._allLats.shape) == 2 and len(self._allLons.shape) == 2:
self.dependent_coordinates = True
self._lonN = self._allLons.shape[1]
self._latN = self._allLats.shape[0]
else:
raise ResError("latitude and longitude shapes are not usable")
# set lat and lon selections
if bounds is not None:
self.bounds = gk.Extent.load(bounds).castTo(4326)
if abs(self.bounds.xMin - self.bounds.xMax) <= self.MAX_LON_DIFFERENCE:
self.bounds = gk.Extent(
self.bounds.xMin - self.MAX_LON_DIFFERENCE / 2,
self.bounds.yMin,
self.bounds.xMax + self.MAX_LON_DIFFERENCE / 2,
self.bounds.yMax,
srs=gk.srs.EPSG4326)
if abs(self.bounds.yMin - self.bounds.yMax) <= self.MAX_LAT_DIFFERENCE:
self.bounds = gk.Extent(
self.bounds.xMin,
self.bounds.yMin - self.MAX_LAT_DIFFERENCE / 2,
self.bounds.xMax,
self.bounds.yMax + self.MAX_LAT_DIFFERENCE / 2,
srs=gk.srs.EPSG4326)
# find slices which contains our extent
if self.dependent_coordinates:
left = self._allLons < self.bounds.xMin
right = self._allLons > self.bounds.xMax
if (left | right).all():
left[:, :-1] = np.logical_and(left[:, 1:], left[:, :-1])
right[:, 1:] = np.logical_and(right[:, 1:], right[:, :-1])
bot = self._allLats < self.bounds.yMin
top = self._allLats > self.bounds.yMax
if (top | bot).all():
top[:-1, :] = np.logical_and(top[1:, :], top[:-1, :])
bot[1:, :] = np.logical_and(bot[1:, :], bot[:-1, :])
self._lonStart = np.argmin((bot | left | top).all(0)) - 1 - index_pad
self._lonStop = self._lonN - np.argmin((bot | top | right).all(0)[::-1]) + 1 + index_pad
self._latStart = np.argmin((bot | left | right).all(1)) - 1 - index_pad
self._latStop = self._latN - np.argmax((left | top | right).all(1)[::-1]) + 1 + index_pad
else:
tmp = np.logical_and(self._allLons >= self.bounds.xMin, self._allLons <= self.bounds.xMax)
self._lonStart = np.argmax(tmp) - 1
self._lonStop = self._lonStart + 1 + np.argmin(tmp[self._lonStart + 1:]) + 1
tmp = np.logical_and(self._allLats >= self.bounds.yMin, self._allLats <= self.bounds.yMax)
self._latStart = np.argmax(tmp) - 1
self._latStop = self._latStart + 1 +
|
np.argmin(tmp[self._latStart + 1:])
|
numpy.argmin
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for wave_function.py"""
import itertools
import pytest
import numpy as np
import cirq
def assert_dirac_notation_numpy(vec, expected, decimals=2):
assert cirq.dirac_notation(np.array(vec), decimals=decimals) == expected
def assert_dirac_notation_python(vec, expected, decimals=2):
assert cirq.dirac_notation(vec, decimals=decimals) == expected
def test_state_mixin():
class TestClass(cirq.StateVectorMixin):
def state_vector(self) -> np.ndarray:
return np.array([0, 0, 1, 0])
qubits = cirq.LineQubit.range(2)
test = TestClass(qubit_map={qubits[i]: i for i in range(2)})
assert test.dirac_notation() == '|10⟩'
np.testing.assert_almost_equal(test.bloch_vector_of(qubits[0]),
np.array([0, 0, -1]))
np.testing.assert_almost_equal(test.density_matrix_of(qubits[0:1]),
np.array([[0, 0], [0, 1]]))
def test_bloch_vector_simple_H_zero():
sqrt = np.sqrt(0.5)
H_state = np.array([sqrt, sqrt])
bloch = cirq.bloch_vector_from_state_vector(H_state, 0)
desired_simple = np.array([1,0,0])
np.testing.assert_array_almost_equal(bloch, desired_simple)
def test_bloch_vector_simple_XH_zero():
sqrt = np.sqrt(0.5)
XH_state = np.array([sqrt, sqrt])
bloch = cirq.bloch_vector_from_state_vector(XH_state, 0)
desired_simple = np.array([1,0,0])
np.testing.assert_array_almost_equal(bloch, desired_simple)
def test_bloch_vector_simple_YH_zero():
sqrt = np.sqrt(0.5)
YH_state = np.array([-1.0j * sqrt, 1.0j * sqrt])
bloch = cirq.bloch_vector_from_state_vector(YH_state, 0)
desired_simple = np.array([-1,0,0])
np.testing.assert_array_almost_equal(bloch, desired_simple)
def test_bloch_vector_simple_ZH_zero():
sqrt = np.sqrt(0.5)
ZH_state = np.array([sqrt, -sqrt])
bloch = cirq.bloch_vector_from_state_vector(ZH_state, 0)
desired_simple = np.array([-1,0,0])
np.testing.assert_array_almost_equal(bloch, desired_simple)
def test_bloch_vector_simple_TH_zero():
sqrt = np.sqrt(0.5)
TH_state = np.array([sqrt, 0.5+0.5j])
bloch = cirq.bloch_vector_from_state_vector(TH_state, 0)
desired_simple = np.array([sqrt,sqrt,0])
np.testing.assert_array_almost_equal(bloch, desired_simple)
def test_bloch_vector_equal_sqrt3():
sqrt3 = 1/np.sqrt(3)
test_state = np.array([0.888074, 0.325058 + 0.325058j])
bloch = cirq.bloch_vector_from_state_vector(test_state, 0)
desired_simple = np.array([sqrt3,sqrt3,sqrt3])
np.testing.assert_array_almost_equal(bloch, desired_simple)
def test_bloch_vector_multi_pure():
HH_state = np.array([0.5,0.5,0.5,0.5])
bloch_0 = cirq.bloch_vector_from_state_vector(HH_state, 0)
bloch_1 = cirq.bloch_vector_from_state_vector(HH_state, 1)
desired_simple = np.array([1,0,0])
np.testing.assert_array_almost_equal(bloch_1, desired_simple)
np.testing.assert_array_almost_equal(bloch_0, desired_simple)
def test_bloch_vector_multi_mixed():
sqrt = np.sqrt(0.5)
HCNOT_state = np.array([sqrt, 0., 0., sqrt])
bloch_0 = cirq.bloch_vector_from_state_vector(HCNOT_state, 0)
bloch_1 = cirq.bloch_vector_from_state_vector(HCNOT_state, 1)
zero = np.zeros(3)
np.testing.assert_array_almost_equal(bloch_0, zero)
np.testing.assert_array_almost_equal(bloch_1, zero)
RCNOT_state = np.array([0.90612745, -0.07465783j,
-0.37533028j, 0.18023996])
bloch_mixed_0 = cirq.bloch_vector_from_state_vector(RCNOT_state, 0)
bloch_mixed_1 = cirq.bloch_vector_from_state_vector(RCNOT_state, 1)
true_mixed_0 = np.array([0., -0.6532815, 0.6532815])
true_mixed_1 = np.array([0., 0., 0.9238795])
np.testing.assert_array_almost_equal(true_mixed_0, bloch_mixed_0)
np.testing.assert_array_almost_equal(true_mixed_1, bloch_mixed_1)
def test_bloch_vector_multi_big():
big_H_state = np.array([0.1767767] * 32)
desired_simple = np.array([1,0,0])
for qubit in range(0, 5):
bloch_i = cirq.bloch_vector_from_state_vector(big_H_state, qubit)
np.testing.assert_array_almost_equal(bloch_i, desired_simple)
def test_bloch_vector_invalid():
with pytest.raises(ValueError):
_ = cirq.bloch_vector_from_state_vector(
np.array([0.5, 0.5, 0.5]), 0)
with pytest.raises(IndexError):
_ = cirq.bloch_vector_from_state_vector(
np.array([0.5, 0.5,0.5,0.5]), -1)
with pytest.raises(IndexError):
_ = cirq.bloch_vector_from_state_vector(
np.array([0.5, 0.5,0.5,0.5]), 2)
def test_density_matrix():
test_state = np.array([0.-0.35355339j, 0.+0.35355339j, 0.-0.35355339j,
0.+0.35355339j, 0.+0.35355339j, 0.-0.35355339j, 0.+0.35355339j,
0.-0.35355339j])
full_rho = cirq.density_matrix_from_state_vector(test_state)
np.testing.assert_array_almost_equal(full_rho,
np.outer(test_state, np.conj(test_state)))
rho_one = cirq.density_matrix_from_state_vector(test_state, [1])
true_one = np.array([[0.5+0.j, 0.5+0.j], [0.5+0.j, 0.5+0.j]])
np.testing.assert_array_almost_equal(rho_one, true_one)
rho_two_zero = cirq.density_matrix_from_state_vector(test_state, [0,2])
true_two_zero = np.array([[ 0.25+0.j, -0.25+0.j, -0.25+0.j, 0.25+0.j],
[-0.25+0.j, 0.25+0.j, 0.25+0.j, -0.25+0.j],
[-0.25+0.j, 0.25+0.j, 0.25+0.j, -0.25+0.j],
[ 0.25+0.j, -0.25+0.j, -0.25+0.j, 0.25+0.j]])
np.testing.assert_array_almost_equal(rho_two_zero, true_two_zero)
# two and zero will have same single qubit density matrix.
rho_two = cirq.density_matrix_from_state_vector(test_state, [2])
true_two = np.array([[0.5+0.j, -0.5+0.j], [-0.5+0.j, 0.5+0.j]])
np.testing.assert_array_almost_equal(rho_two, true_two)
rho_zero = cirq.density_matrix_from_state_vector(test_state, [0])
np.testing.assert_array_almost_equal(rho_zero, true_two)
def test_density_matrix_invalid():
bad_state = np.array([0.5,0.5,0.5])
good_state = np.array([0.5,0.5,0.5,0.5])
with pytest.raises(ValueError):
_ = cirq.density_matrix_from_state_vector(bad_state)
with pytest.raises(ValueError):
_ = cirq.density_matrix_from_state_vector(bad_state, [0, 1])
with pytest.raises(IndexError):
_ = cirq.density_matrix_from_state_vector(good_state, [-1, 0, 1])
with pytest.raises(IndexError):
_ = cirq.density_matrix_from_state_vector(good_state, [-1])
def test_dirac_notation():
sqrt = np.sqrt(0.5)
exp_pi_2 = 0.5 + 0.5j
assert_dirac_notation_numpy([0, 0], "0")
assert_dirac_notation_python([1], "|⟩")
assert_dirac_notation_numpy([sqrt, sqrt], "0.71|0⟩ + 0.71|1⟩")
assert_dirac_notation_python([-sqrt, sqrt], "-0.71|0⟩ + 0.71|1⟩")
assert_dirac_notation_numpy([sqrt, -sqrt], "0.71|0⟩ - 0.71|1⟩")
assert_dirac_notation_python([-sqrt, -sqrt], "-0.71|0⟩ - 0.71|1⟩")
assert_dirac_notation_numpy([sqrt, 1j * sqrt], "0.71|0⟩ + 0.71j|1⟩")
assert_dirac_notation_python([sqrt, exp_pi_2], "0.71|0⟩ + (0.5+0.5j)|1⟩")
assert_dirac_notation_numpy([exp_pi_2, -sqrt], "(0.5+0.5j)|0⟩ - 0.71|1⟩")
assert_dirac_notation_python([exp_pi_2, 0.5 - 0.5j],
"(0.5+0.5j)|0⟩ + (0.5-0.5j)|1⟩")
assert_dirac_notation_numpy([0.5, 0.5, -0.5, -0.5],
"0.5|00⟩ + 0.5|01⟩ - 0.5|10⟩ - 0.5|11⟩")
assert_dirac_notation_python([0.71j, 0.71j], "0.71j|0⟩ + 0.71j|1⟩")
def test_dirac_notation_partial_state():
sqrt = np.sqrt(0.5)
exp_pi_2 = 0.5 + 0.5j
assert_dirac_notation_numpy([1, 0], "|0⟩")
assert_dirac_notation_python([1j, 0], "1j|0⟩")
assert_dirac_notation_numpy([0, 1], "|1⟩")
assert_dirac_notation_python([0, 1j], "1j|1⟩")
assert_dirac_notation_numpy([sqrt, 0, 0, sqrt], "0.71|00⟩ + 0.71|11⟩")
assert_dirac_notation_python([sqrt, sqrt, 0, 0], "0.71|00⟩ + 0.71|01⟩")
assert_dirac_notation_numpy([exp_pi_2, 0, 0, exp_pi_2],
"(0.5+0.5j)|00⟩ + (0.5+0.5j)|11⟩")
assert_dirac_notation_python([0, 0, 0, 1], "|11⟩")
def test_dirac_notation_precision():
sqrt = np.sqrt(0.5)
assert_dirac_notation_numpy([sqrt, sqrt], "0.7|0⟩ + 0.7|1⟩", decimals=1)
assert_dirac_notation_python([sqrt, sqrt],
"0.707|0⟩ + 0.707|1⟩",
decimals=3)
def test_to_valid_state_vector():
np.testing.assert_almost_equal(cirq.to_valid_state_vector(
np.array([1.0, 0.0, 0.0, 0.0], dtype=np.complex64), 2),
np.array([1.0, 0.0, 0.0, 0.0]))
np.testing.assert_almost_equal(cirq.to_valid_state_vector(
np.array([0.0, 1.0, 0.0, 0.0], dtype=np.complex64), 2),
np.array([0.0, 1.0, 0.0, 0.0]))
np.testing.assert_almost_equal(cirq.to_valid_state_vector(0, 2),
np.array([1.0, 0.0, 0.0, 0.0]))
np.testing.assert_almost_equal(cirq.to_valid_state_vector(1, 2),
np.array([0.0, 1.0, 0.0, 0.0]))
def test_invalid_to_valid_state_vector():
with pytest.raises(ValueError):
_ = cirq.to_valid_state_vector(
np.array([1.0, 0.0], dtype=np.complex64), 2)
with pytest.raises(ValueError):
_ = cirq.to_valid_state_vector(-1, 2)
with pytest.raises(ValueError):
_ = cirq.to_valid_state_vector(5, 2)
with pytest.raises(TypeError):
_ = cirq.to_valid_state_vector('not an int', 2)
def test_check_state():
cirq.validate_normalized_state(
np.array([0.5, 0.5, 0.5, 0.5], dtype=np.complex64),
2)
with pytest.raises(ValueError):
cirq.validate_normalized_state(np.array([1, 1], dtype=np.complex64), 2)
with pytest.raises(ValueError):
cirq.validate_normalized_state(
np.array([1.0, 0.2, 0.0, 0.0], dtype=np.complex64), 2)
with pytest.raises(ValueError):
cirq.validate_normalized_state(
np.array([1.0, 0.0, 0.0, 0.0], dtype=np.float64), 2)
def test_sample_state_big_endian():
results = []
for x in range(8):
state = cirq.to_valid_state_vector(x, 3)
sample = cirq.sample_state_vector(state, [2, 1, 0])
results.append(sample)
expecteds = [[list(reversed(x))] for x in
list(itertools.product([False, True], repeat=3))]
for result, expected in zip(results, expecteds):
np.testing.assert_equal(result, expected)
def test_sample_state_partial_indices():
for index in range(3):
for x in range(8):
state = cirq.to_valid_state_vector(x, 3)
np.testing.assert_equal(cirq.sample_state_vector(state, [index]),
[[bool(1 & (x >> (2 - index)))]])
def test_sample_state_partial_indices_oder():
for x in range(8):
state = cirq.to_valid_state_vector(x, 3)
expected = [[bool(1 & (x >> 0)), bool(1 & (x >> 1))]]
np.testing.assert_equal(cirq.sample_state_vector(state, [2, 1]),
expected)
def test_sample_state_partial_indices_all_orders():
for perm in itertools.permutations([0, 1, 2]):
for x in range(8):
state = cirq.to_valid_state_vector(x, 3)
expected = [[bool(1 & (x >> (2 - p))) for p in perm]]
np.testing.assert_equal(cirq.sample_state_vector(state, perm),
expected)
def test_sample_state():
state = np.zeros(8, dtype=np.complex64)
state[0] = 1 / np.sqrt(2)
state[2] = 1 / np.sqrt(2)
for _ in range(10):
sample = cirq.sample_state_vector(state, [2, 1, 0])
assert (np.array_equal(sample, [[False, False, False]])
or np.array_equal(sample, [[False, True, False]]))
# Partial sample is correct.
for _ in range(10):
np.testing.assert_equal(cirq.sample_state_vector(state, [2]), [[False]])
np.testing.assert_equal(cirq.sample_state_vector(state, [0]), [[False]])
def test_sample_empty_state():
state = np.array([])
np.testing.assert_almost_equal(cirq.sample_state_vector(state, []),
np.zeros(shape=(1,0)))
def test_sample_no_repetitions():
state = cirq.to_valid_state_vector(0, 3)
np.testing.assert_almost_equal(
cirq.sample_state_vector(state, [1], repetitions=0),
np.zeros(shape=(0, 1)))
np.testing.assert_almost_equal(
cirq.sample_state_vector(state, [1, 2], repetitions=0),
np.zeros(shape=(0, 2)))
def test_sample_state_repetitions():
for perm in itertools.permutations([0, 1, 2]):
for x in range(8):
state = cirq.to_valid_state_vector(x, 3)
expected = [[bool(1 & (x >> (2 - p))) for p in perm]] * 3
result = cirq.sample_state_vector(state, perm, repetitions=3)
np.testing.assert_equal(result, expected)
def test_sample_state_negative_repetitions():
state = cirq.to_valid_state_vector(0, 3)
with pytest.raises(ValueError, match='-1'):
cirq.sample_state_vector(state, [1], repetitions=-1)
def test_sample_state_not_power_of_two():
with pytest.raises(ValueError, match='3'):
cirq.sample_state_vector(np.array([1, 0, 0]), [1])
with pytest.raises(ValueError, match='5'):
cirq.sample_state_vector(np.array([0, 1, 0, 0, 0]), [1])
def test_sample_state_index_out_of_range():
state = cirq.to_valid_state_vector(0, 3)
with pytest.raises(IndexError, match='-2'):
cirq.sample_state_vector(state, [-2])
with pytest.raises(IndexError, match='3'):
cirq.sample_state_vector(state, [3])
def test_sample_no_indices():
state = cirq.to_valid_state_vector(0, 3)
np.testing.assert_almost_equal(
cirq.sample_state_vector(state, []), np.zeros(shape=(1, 0)))
def test_sample_no_indices_repetitions():
state = cirq.to_valid_state_vector(0, 3)
np.testing.assert_almost_equal(
cirq.sample_state_vector(state, [], repetitions=2),
np.zeros(shape=(2, 0)))
def test_measure_state_computational_basis():
results = []
for x in range(8):
initial_state = cirq.to_valid_state_vector(x, 3)
bits, state = cirq.measure_state_vector(initial_state, [2, 1, 0])
results.append(bits)
np.testing.assert_almost_equal(state, initial_state)
expected = [list(reversed(x)) for x in
list(itertools.product([False, True], repeat=3))]
assert results == expected
def test_measure_state_reshape():
results = []
for x in range(8):
initial_state = np.reshape(cirq.to_valid_state_vector(x, 3), [2] * 3)
bits, state = cirq.measure_state_vector(initial_state, [2, 1, 0])
results.append(bits)
np.testing.assert_almost_equal(state, initial_state)
expected = [list(reversed(x)) for x in
list(itertools.product([False, True], repeat=3))]
assert results == expected
def test_measure_state_partial_indices():
for index in range(3):
for x in range(8):
initial_state = cirq.to_valid_state_vector(x, 3)
bits, state = cirq.measure_state_vector(initial_state, [index])
np.testing.assert_almost_equal(state, initial_state)
assert bits == [bool(1 & (x >> (2 - index)))]
def test_measure_state_partial_indices_order():
for x in range(8):
initial_state = cirq.to_valid_state_vector(x, 3)
bits, state = cirq.measure_state_vector(initial_state, [2, 1])
np.testing.assert_almost_equal(state, initial_state)
assert bits == [bool(1 & (x >> 0)), bool(1 & (x >> 1))]
def test_measure_state_partial_indices_all_orders():
for perm in itertools.permutations([0, 1, 2]):
for x in range(8):
initial_state = cirq.to_valid_state_vector(x, 3)
bits, state = cirq.measure_state_vector(initial_state, perm)
np.testing.assert_almost_equal(state, initial_state)
assert bits == [bool(1 & (x >> (2 - p))) for p in perm]
def test_measure_state_collapse():
initial_state = np.zeros(8, dtype=np.complex64)
initial_state[0] = 1 / np.sqrt(2)
initial_state[2] = 1 / np.sqrt(2)
for _ in range(10):
bits, state = cirq.measure_state_vector(initial_state, [2, 1, 0])
assert bits in [[False, False, False], [False, True, False]]
expected = np.zeros(8, dtype=np.complex64)
expected[2 if bits[1] else 0] = 1.0
np.testing.assert_almost_equal(state, expected)
assert state is not initial_state
# Partial sample is correct.
for _ in range(10):
bits, state = cirq.measure_state_vector(initial_state, [2])
np.testing.assert_almost_equal(state, initial_state)
assert bits == [False]
bits, state = cirq.measure_state_vector(initial_state, [0])
np.testing.assert_almost_equal(state, initial_state)
assert bits == [False]
def test_measure_state_out_is_state():
initial_state = np.zeros(8, dtype=np.complex64)
initial_state[0] = 1 / np.sqrt(2)
initial_state[2] = 1 / np.sqrt(2)
bits, state = cirq.measure_state_vector(initial_state, [2, 1, 0],
out=initial_state)
expected = np.zeros(8, dtype=np.complex64)
expected[2 if bits[1] else 0] = 1.0
np.testing.assert_array_almost_equal(initial_state, expected)
assert state is initial_state
def test_measure_state_out_is_not_state():
initial_state = np.zeros(8, dtype=np.complex64)
initial_state[0] = 1 / np.sqrt(2)
initial_state[2] = 1 / np.sqrt(2)
out = np.zeros_like(initial_state)
_, state = cirq.measure_state_vector(initial_state, [2, 1, 0], out=out)
assert out is not initial_state
assert out is state
def test_measure_state_not_power_of_two():
with pytest.raises(ValueError, match='3'):
_, _ = cirq.measure_state_vector(np.array([1, 0, 0]), [1])
with pytest.raises(ValueError, match='5'):
cirq.measure_state_vector(
|
np.array([0, 1, 0, 0, 0])
|
numpy.array
|
import re
from scipy import misc
import numpy as np
# np.set_printoptions(threshold=np.nan)
import sys
import pandas as pd
import os
from config import Config as cfg
from libs.pyntcloud.pyntcloud import PyntCloud
import glob
from sklearn.model_selection import train_test_split
from itertools import compress
from config import DATA_TYPES_3D
from sklearn.decomposition import PCA
from colorama import Fore, Back, Style
######################################################################################################
######################################################################################################
def read(file):
if file.endswith('.float3'): return readFloat(file)
elif file.endswith('.flo'): return readFlow(file)
elif file.endswith('.ppm'): return readImage(file)
elif file.endswith('.pgm'): return readImage(file)
elif file.endswith('.png'): return readImage(file)
elif file.endswith('.jpg'): return readImage(file)
elif file.endswith('.pfm'): return readPFM(file)[0]
else: raise Exception('don\'t know how to read %s' % file)
def write(file, data):
if file.endswith('.float3'): return writeFloat(file, data)
elif file.endswith('.flo'): return writeFlow(file, data)
elif file.endswith('.ppm'): return writeImage(file, data)
elif file.endswith('.pgm'): return writeImage(file, data)
elif file.endswith('.png'): return writeImage(file, data)
elif file.endswith('.jpg'): return writeImage(file, data)
elif file.endswith('.pfm'): return writePFM(file, data)
else: raise Exception('don\'t know how to write %s' % file)
def readPFM(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header.decode("ascii") == 'PF':
color = True
elif header.decode("ascii") == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode("ascii"))
if dim_match:
width, height = list(map(int, dim_match.groups()))
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().decode("ascii").rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale
def writePFM(file, image, scale=1):
file = open(file, 'wb')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n' if color else 'Pf\n'.encode())
file.write('%d %d\n'.encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write('%f\n'.encode() % scale)
image.tofile(file)
def readFlow(name):
if name.endswith('.pfm') or name.endswith('.PFM'):
return readPFM(name)[0][:,:,0:2]
f = open(name, 'rb')
header = f.read(4)
if header.decode("utf-8") != 'PIEH':
raise Exception('Flow file header does not contain PIEH')
width = np.fromfile(f, np.int32, 1).squeeze()
height = np.fromfile(f, np.int32, 1).squeeze()
flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2))
return flow.astype(np.float32)
def readImage(name):
if name.endswith('.pfm') or name.endswith('.PFM'):
data = readPFM(name)[0]
if len(data.shape)==3:
return data[:,:,0:3]
else:
return data
return misc.imread(name)
def writeImage(name, data):
if name.endswith('.pfm') or name.endswith('.PFM'):
return writePFM(name, data, 1)
return misc.imsave(name, data)
def writeFlow(name, flow):
f = open(name, 'wb')
f.write('PIEH'.encode('utf-8'))
np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)
flow = flow.astype(np.float32)
flow.tofile(f)
def readFloat(name):
f = open(name, 'rb')
if(f.readline().decode("utf-8")) != 'float\n':
raise Exception('float file %s did not contain <float> keyword' % name)
dim = int(f.readline())
dims = []
count = 1
for i in range(0, dim):
d = int(f.readline())
dims.append(d)
count *= d
dims = list(reversed(dims))
data = np.fromfile(f, np.float32, count).reshape(dims)
if dim > 2:
data = np.transpose(data, (2, 1, 0))
data = np.transpose(data, (1, 0, 2))
return data
def writeFloat(name, data):
f = open(name, 'wb')
dim=len(data.shape)
if dim>3:
raise Exception('bad float file dimension: %d' % dim)
f.write(('float\n').encode('ascii'))
f.write(('%d\n' % dim).encode('ascii'))
if dim == 1:
f.write(('%d\n' % data.shape[0]).encode('ascii'))
else:
f.write(('%d\n' % data.shape[1]).encode('ascii'))
f.write(('%d\n' % data.shape[0]).encode('ascii'))
for i in range(2, dim):
f.write(('%d\n' % data.shape[i]).encode('ascii'))
data = data.astype(np.float32)
if dim==2:
data.tofile(f)
else:
np.transpose(data, (2, 0, 1)).tofile(f)
######################################################################################################
######################################################################################################
######################################################################################################
######################################################################################################
def getBlackListDirs():
black_dirs_txt = "black_list_dirs.txt"
with open(black_dirs_txt, 'r') as f:
black_dirs = f.read().splitlines()
return black_dirs
def load_sequence(datatype3d_base_dir, sceneflow_base_dir, sample_base_dir, data_type):
samples = []
sceneflow_dir = os.path.join(sceneflow_base_dir, sample_base_dir)
for path in sorted(glob.glob(sceneflow_dir + "/*")):
sceneflow_path = path.replace('\\', '/')
sample_number_0 = os.path.basename(path).split('.')[0]
if data_type == DATA_TYPES_3D['POINTCLOUD']:
sample_path_0 = os.path.join(sample_base_dir, sample_number_0 + ".npy")
elif data_type == DATA_TYPES_3D['BOTH']:
sample_path_0 = os.path.join(sample_base_dir, sample_number_0 + ".npz")
sample_name = sample_base_dir.replace('/', '-') + "-" + sample_number_0
sample_number_1 = str(int(os.path.basename(path).split('.')[0]) + 1).zfill(4)
if data_type == DATA_TYPES_3D['POINTCLOUD']:
sample_path_1 = os.path.join(sample_base_dir, sample_number_1 + ".npy")
elif data_type == DATA_TYPES_3D['BOTH']:
sample_path_1 = os.path.join(sample_base_dir, sample_number_1 + ".npz")
datatype3d_path_0 = os.path.join(datatype3d_base_dir, sample_path_0)
datatype3d_path_1 = os.path.join(datatype3d_base_dir, sample_path_1)
sample = [datatype3d_path_0, datatype3d_path_1, sceneflow_path, sample_name]
samples.append(sample)
return samples
def sequence_exists(sceneflow_base_dir, sample_base_dir):
"""
Returns whether or not the path to a sequence exists
:param sceneflow_base_dir:
:param sample_base_dir:
:return:
"""
sequence_path = os.path.join(sceneflow_base_dir, sample_base_dir)
if os.path.isdir(sequence_path):
return True
else:
return False
def check_sequence_number(number):
"""
Checks if the sequence number ''number'' is a valid one
:param number:
:return:
"""
if number >= 750:
raise Exception("Sequences range from 0000 to 0749")
def load_files(input_base_dir, sceneflow_base_dir, data_split, data_type, sequences_to_use):
"""
Load numpy files containing the voxelgrids and the sceneflow groundtruth
:param dataset_path:
:return: list of path files for the voxelgrids and the sceneflow groungtruth
"""
black_list_dirs = getBlackListDirs()
all_samples = []
if sequences_to_use == "ALL":
## Use the whole dataset
for letter in os.listdir(os.path.join(sceneflow_base_dir, data_split)):
for number in os.listdir(os.path.join(sceneflow_base_dir, data_split, letter)):
sequence = os.path.join(letter, number)
sample_base_dir = os.path.join(data_split, sequence).replace('\\', '/')
if sample_base_dir in black_list_dirs:
continue
sequence_samples = load_sequence(input_base_dir, sceneflow_base_dir, sample_base_dir, data_type)
all_samples.append(sequence_samples)
else:
for sequence_to_use in sequences_to_use:
if sequence_to_use == "A" or sequence_to_use == "B" or sequence_to_use == "C":
"""Get a complete letter"""
letter = sequence_to_use
for number in os.listdir(os.path.join(sceneflow_base_dir, data_split, letter)):
sequence = os.path.join(letter, number)
sample_base_dir = os.path.join(data_split, sequence).replace('\\', '/')
if sample_base_dir in black_list_dirs:
continue
sequence_samples = load_sequence(input_base_dir, sceneflow_base_dir, sample_base_dir, data_type)
all_samples.append(sequence_samples)
elif "-" in sequence_to_use:
letter, numbers_range = sequence_to_use.split('/')
_from, _to = numbers_range.split('-')
_from, _to = int(_from), int(_to)
check_sequence_number(_from)
check_sequence_number(_to)
for number in range(_from, _to + 1):
number = str(number).zfill(4)
sequence = os.path.join(letter, number)
sample_base_dir = os.path.join(data_split, sequence).replace('\\', '/')
if sample_base_dir in black_list_dirs or not sequence_exists(sceneflow_base_dir, sample_base_dir):
continue
sequence_samples = load_sequence(input_base_dir, sceneflow_base_dir, sample_base_dir, data_type)
all_samples.append(sequence_samples)
else:
number = int(sequence_to_use.split('/')[1])
check_sequence_number(number)
sample_base_dir = os.path.join(data_split, sequence_to_use).replace('\\', '/')
if sample_base_dir in black_list_dirs:
raise Exception("Sequence to eval is in Black List!")
sequence_samples = load_sequence(input_base_dir, sceneflow_base_dir, sample_base_dir, data_type)
all_samples.append(sequence_samples)
final_samples = []
for sequence_samples in all_samples:
for sample in sequence_samples:
final_samples.append(sample)
return final_samples
def get_train_val_loader(dataset_dir, data_split, data_type, use_local, use_normal,
sequences_to_train=None, batch_size_train=1, batch_size_val=1,
validation_percentage=0.05):
"""
Compute dataset loader
:param dataset_dir:
:param batch_size:
:return:
"""
import torch.utils.data
from torch.utils.data.dataloader import default_collate
if cfg.model_name == "SiameseModel3D":
detection_collate = detection_collate_baseline_train
elif cfg.model_name == "SiamesePointNet":
detection_collate = detection_collate_pointnet_train
if data_type == DATA_TYPES_3D['POINTCLOUD']:
from loader import PointcloudDataset as Dataset
elif data_type == DATA_TYPES_3D['BOTH']:
if cfg.model_name == "SiameseModel3D":
from loader import SiameseBaselineDatasetTrain as Dataset
elif cfg.model_name == "SiamesePointNet":
from loader import SiamesePointNetDatasetTrain as Dataset
## Load files lists
if cfg.model_name == "SiameseModel3D":
vg_or_pcl_dir = os.path.join(dataset_dir, "pointcloud_voxelgrid")
else:
if use_local:
if use_normal:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_features_normals")
else:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_features")
else:
if use_normal:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_xyz_normals_features")
else:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_xyz_features")
sceneflow_dir = os.path.join(dataset_dir, "sceneflow")
samples = load_files(vg_or_pcl_dir, sceneflow_dir, data_split, data_type, sequences_to_train)
samples_train, samples_val = train_test_split(samples, test_size=validation_percentage,
random_state=20)
#####################################################################
## HELP: DO NOT REMOVE - USE TO GET THE SAMPLES IN VALIDATION SET ###
#####################################################################
# validation_samples = []
# for sample_val in samples_val:
# validation_samples.append(sample_val[-1])
# validation_samples.sort()
# with open("validation_samples.txt", "w") as f:
# for sample in validation_samples:
# f.write(sample + "\n")
#####################################################################
#####################################################################
## Create TRAIN loader
train_dataset = Dataset(samples_train)
print("Train Dataset's length:", len(train_dataset))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size_train, shuffle=True,
num_workers=8, collate_fn=detection_collate,
drop_last=True, pin_memory=False)
## Create VAL loader
val_dataset = Dataset(samples_val)
print("Val Dataset's length:", len(val_dataset))
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size_val, shuffle=True,
num_workers=8, collate_fn=detection_collate,
drop_last=True, pin_memory=False)
print("Number of training batches: ", len(train_loader),
"(Samples: ", str(len(train_loader) * batch_size_train), ")")
print("Number of val batches: ", len(val_loader),
"(Samples: ", str(len(val_loader) * batch_size_val), ")")
return train_loader, val_loader
def get_eval_loader(dataset_dir, data_split, data_type, use_local, use_normal,
sequences_to_eval=None, batch_size=1):
"""
Compute dataset loader
:param dataset_dir:
:param batch_size:
:return:
"""
import torch.utils.data
from torch.utils.data.dataloader import default_collate
if cfg.model_name == "SiameseModel3D":
detection_collate = detection_collate_baseline_test
elif cfg.model_name == "SiamesePointNet":
detection_collate = detection_collate_pointnet_test
if data_type == DATA_TYPES_3D['POINTCLOUD']:
from loader import PointcloudDataset as Dataset
elif data_type == DATA_TYPES_3D['BOTH']:
if cfg.model_name == "SiameseModel3D":
from loader import SiameseBaselineDatasetTest as Dataset
elif cfg.model_name == "SiamesePointNet":
from loader import SiamesePointNetDatasetTest as Dataset
## Load files lists
if cfg.model_name == "SiameseModel3D":
vg_or_pcl_dir = os.path.join(dataset_dir, "pointcloud_voxelgrid")
else:
if use_local:
if use_normal:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_features_normals")
else:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_features")
else:
if use_normal:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_xyz_normals_features")
else:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_xyz_features")
sceneflow_dir = os.path.join(dataset_dir, "sceneflow")
samples = load_files(vg_or_pcl_dir, sceneflow_dir, data_split, data_type, sequences_to_eval)
## Create TRAIN loader
eval_dataset = Dataset(samples)
print("eval Dataset's length:", len(eval_dataset))
eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=batch_size, shuffle=True,
num_workers=8, collate_fn=detection_collate,
drop_last=True, pin_memory=False)
print("Number of eval batches: ", len(eval_loader),
"(Samples: ", str(len(eval_loader) * batch_size), ")")
return eval_loader
#################################################################################################
#################################################################################################
#################################################################################################
#################################################################################################
def compute_voxelgrid_and_sceneflow(color_frame, of_frame, disp_frame, dispChange_frame,
data_type_3D):
# import time
# import matplotlib.pyplot as plt
# import cv2
height, width, _ = color_frame.shape
## Store our input data with high precision
# colors_np_A = color_frame.reshape(-1, 3)
of = np.asarray(of_frame, dtype=np.float64)
disp = np.asarray(disp_frame, dtype=np.float64)
dispChange = np.asarray(dispChange_frame, dtype=np.float64)
## Create our matrix of indices
indices = np.indices((height, width))
py, px = indices[0], indices[1]
## Get 3D Point Cloud
z = np.float64(cfg.baseline) * np.float64(cfg.fx) / disp
x = np.multiply((px - np.float64(cfg.cx)), z) / np.float64(cfg.fx)
y = np.multiply((py - np.float64(cfg.cy)), z) / np.float64(cfg.fy)
coordinates_np_matrix = np.dstack((x, y, z))
coordinates_np_matrix_cropped = coordinates_np_matrix[1:-1, 1:-1]
coordinates_np = coordinates_np_matrix_cropped.reshape(-1, 3)
## Normal map
A = coordinates_np_matrix[2:, 1:-1] - coordinates_np_matrix[0:-2, 1:-1]
B = coordinates_np_matrix[1:-1, 2:] - coordinates_np_matrix[1:-1, 0:-2]
normal_matrix = np.cross(A, B, axis=2)
norm = np.linalg.norm(normal_matrix, axis=2)
normal_matrix[:, :, 0] /= norm
normal_matrix[:, :, 1] /= norm
normal_matrix[:, :, 2] /= norm
normal_np = normal_matrix.reshape(-1, 3)
## For visualization
# normal += 1
# normal /= 2
# cv2.imshow("normal", normal)
# cv2.waitKey()
# exit()
## For visualization
## Compute scene flow (by first getting optical flow from input)
u = of[:, :, 0] # Optical flow in horizontal direction
v = of[:, :, 1] # optical flow in vertical direction
m = np.float64(cfg.baseline) / (disp + dispChange)
dX = np.multiply(m, u - np.divide(np.multiply(dispChange, px - np.float64(cfg.cx)), disp))
dY = np.multiply(m, v - np.divide(np.multiply(dispChange, py - np.float64(cfg.cy)), disp))
dZ = cfg.fx * cfg.baseline * ((1.0 / (disp + dispChange)) - (1.0 / disp))
sceneflow_np_matrix = np.dstack((dX, dY, dZ))
sceneflow_np_matrix_cropped = sceneflow_np_matrix[1:-1, 1:-1]
sceneflow_np = sceneflow_np_matrix_cropped.reshape(-1, 3)
if data_type_3D == DATA_TYPES_3D['POINTCLOUD']:
mask = coordinates_np[:, 2] <= cfg.max_z
coordinates_np = coordinates_np[mask]
normal_np = normal_np[mask]
sceneflow_np = sceneflow_np[mask]
return (coordinates_np, normal_np), sceneflow_np
points, normals, sceneflows = filter_pointcloud(coordinates_np, normal_np, sceneflow_np)
if points.size == 0:
return None, None
voxel_coords = ((points - np.array([cfg.xrange[0], cfg.yrange[0], cfg.zrange[0]]))
/ (cfg.vx, cfg.vy, cfg.vz)).astype(np.int32)
voxel_coords, inv_ind, voxel_counts = np.unique(voxel_coords, axis=0,
return_inverse=True, return_counts=True)
## NOTE: inv_ind (inverse indices) : for every point, the voxel index in which the point resides
## TODO: REMOVE VOXELS WHICH CONTAIN LESS THAN A CERTAIN NUMBER OF POINTS ##
# voxel_coords = voxel_coords[voxel_counts >= cfg.t]
# good_pts_mask = get_good_pts_mask(voxel_counts, inv_ind, len(points))
# points = points[good_pts_mask]
# normals = normals[good_pts_mask]
# sceneflows = sceneflows[good_pts_mask]
# inv_ind = inv_ind[good_pts_mask]
## TODO: REMOVE VOXELS WHICH CONTAIN LESS THAN A CERTAIN NUMBER OF POINTS ##
voxel_sceneflows = []
# max_pts_inv_ind = []
# voxel_pts_ind = []
for i in range(len(voxel_coords)):
mask = inv_ind == i
sfs = sceneflows[mask]
# pts_global_ind = np.asarray(list(compress(range(len(mask)), mask)), dtype=np.int32)
sfs = np.median(sfs, axis=0)
voxel_sceneflows.append(sfs)
# max_pts_inv_ind.append(inv_ind[pts_global_ind])
# voxel_pts_ind.append(pts_global_ind)
return (points, normals, voxel_coords, inv_ind, voxel_counts), (sceneflows, voxel_sceneflows)
def preprocess_pointcloud(points, normals, sample_name, sceneflows=None):
pass
# if (points.size == 0):
# raise Exception(sample_name, "has no points with current ranges!")
#
# points, normals, sceneflows = filter_pointcloud(points, normals, sceneflows)
# points, normals, sceneflows = randomize(points, normals, sceneflows)
#
# voxel_coords = ((points - np.array([cfg.xrange[0], cfg.yrange[0], cfg.zrange[0]]))
# / (cfg.vx, cfg.vy, cfg.vz)).astype(np.int32)
#
# voxel_coords, inv_ind, voxel_counts = np.unique(voxel_coords, axis=0,
# return_inverse=True, return_counts=True)
#
# voxel_features = []
# voxel_sceneflows = [] if sceneflows is not None else None
# # max_pts_inv_ind = [] if sceneflows is not None else None
# # voxel_pts_ind = [] if sceneflows is not None else None
# for i in range(len(voxel_coords)):
# voxel = np.zeros((cfg.T, cfg.f), dtype=np.float64)
# mask = inv_ind == i
# n = normals[mask]
# sfs = sceneflows[mask] if sceneflows is not None else None
# # pts_global_ind = np.asarray(list(compress(range(len(mask)), mask)), dtype=np.int32)
#
# if voxel_counts[i] > cfg.T:
# pts = pts[:cfg.T, :]
# n = n[:cfg.T, :]
# sfs = sfs[:cfg.T, :] if sceneflows is not None else None
# # pts_global_ind = pts_global_ind[:cfg.T]
#
# ## augment the points with their coordinate in the voxel's reference system
# voxel[:pts.shape[0], :] = np.concatenate((pts, pts - centroid(pts), n), axis=1)
# voxel_features.append(voxel)
#
# if sceneflows is not None:
# sfs = np.median(sfs, axis=0)
# voxel_sceneflows.append(sfs)
# # max_pts_inv_ind.append(inv_ind[pts_global_ind])
# # voxel_pts_ind.append(pts_global_ind)
#
# if sceneflows is not None:
# voxel_sceneflows = np.array(voxel_sceneflows)
# # max_pts_inv_ind = np.concatenate(max_pts_inv_ind)
# # voxel_pts_ind = np.concatenate(voxel_pts_ind)
#
# return (points, np.array(voxel_features), voxel_coords, inv_ind), \
# (sceneflows, voxel_sceneflows)
def preprocess_pointnet(points, normals, voxel_coords, inv_ind, sceneflows=None):
points, normals, inv_ind, sceneflows = randomize(points, normals, inv_ind, sceneflows)
voxel_pts = []
voxel_features = []
for i in range(len(voxel_coords)):
pts_np = np.zeros((cfg.T, 3), dtype=np.float64)
normals_np = np.zeros((cfg.T, 3), dtype=np.float64)
mask = inv_ind == i
pts = points[mask]
n = normals[mask]
sfs = sceneflows[mask] if sceneflows is not None else None
if pts.shape[0] > cfg.T:
pts = pts[:cfg.T, :]
n = n[:cfg.T, :]
sfs = sfs[:cfg.T, :] if sceneflows is not None else None
pts_np[:pts.shape[0], :] = pts
normals_np[:pts.shape[0], :] = n
voxel_pts.append(pts_np)
voxel_features.append(normals_np)
return (points, np.array(voxel_pts), np.array(voxel_features), voxel_coords, inv_ind), \
sceneflows
def preprocess_voxelnet(points, normals, voxel_coords, inv_ind, sceneflows=None):
points, normals, inv_ind, sceneflows = randomize(points, normals, inv_ind, sceneflows)
voxel_features = []
for i in range(len(voxel_coords)):
voxel = np.zeros((cfg.T, 9), dtype=np.float32)
mask = inv_ind == i
pts = points[mask]
n = normals[mask]
if pts.shape[0] > cfg.T:
pts = pts[:cfg.T, :]
n = n[:cfg.T, :]
voxel[:pts.shape[0], :] = np.concatenate((pts, pts[:, :3] - centroid(pts), n), axis=1)
voxel_features.append(voxel)
return (points, np.array(voxel_features), voxel_coords, inv_ind), sceneflows
def get_good_pts_mask(voxel_counts, inv_ind, n_pts):
######################################################################
## REMOVE VOXELS WHICH CONTAIN LESS THAN A CERTAIN NUMBER OF POINTS ##
############## AND REMOVE ALSO THE CORRESPONDING POINTS ##############
## Get the indices of those bad voxels
bad_voxels_ind = np.where(voxel_counts < cfg.t)[0]
## Compute the indices of the points contained in those bad voxels
bad_pts_ind = np.concatenate([np.nonzero(inv_ind == bad)[0] for bad in bad_voxels_ind])
## Create a mask for the good points
good_pts_mask = np.ones(n_pts, dtype=bool)
good_pts_mask[bad_pts_ind] = False
return good_pts_mask
def centroid(pts):
length = pts.shape[0]
sum_x = np.sum(pts[:, 0])
sum_y = np.sum(pts[:, 1])
sum_z = np.sum(pts[:, 2])
return np.array([sum_x/length, sum_y/length, sum_z/length])
def compute_PCA(pts):
pca = PCA(n_components=3)
pca.fit(pts)
pca_score = pca.explained_variance_ratio_
V = pca.components_
# x_pca_axis, y_pca_axis, z_pca_axis = 0.2 * V
normal_vector = V[np.argmin(pca_score, axis=0)]
## VISUALIZE STUFF ##
# from mpl_toolkits.mplot3d import Axes3D
# import matplotlib.pyplot as plt
# centr = centroid(pts)
# fig = plt.figure(1, figsize=(4, 3))
# ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=30, azim=20)
# ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], marker='+', alpha=.4)
# ax.quiver(centr[0], centr[1], centr[2], x_pca_axis[0], x_pca_axis[1], x_pca_axis[2], color='r')
# ax.quiver(centr[0], centr[1], centr[2], y_pca_axis[0], y_pca_axis[1], y_pca_axis[2], color='g')
# ax.quiver(centr[0], centr[1], centr[2], normal_vector[0], normal_vector[1], normal_vector[2], color='b')
# plt.show()
## VISUALIZE STUFF ##
return normal_vector
def filter_pointcloud(points, normals, sceneflows=None):
pxs = points[:, 0]
pys = points[:, 1]
pzs = points[:, 2]
filter_x = np.where((pxs >= cfg.xrange[0]) & (pxs < cfg.xrange[1]))[0]
filter_y = np.where((pys >= cfg.yrange[0]) & (pys < cfg.yrange[1]))[0]
filter_z = np.where((pzs >= cfg.zrange[0]) & (pzs < cfg.zrange[1]))[0]
filter_xy = np.intersect1d(filter_x, filter_y)
filter_xyz = np.intersect1d(filter_xy, filter_z)
if sceneflows is not None:
sceneflows = sceneflows[filter_xyz]
return points[filter_xyz], normals[filter_xyz], sceneflows
def randomize(points, normals, inv_ind, sceneflows=None):
if sceneflows is not None:
assert points.shape==normals.shape==sceneflows.shape, "randomize 1 "
else:
assert points.shape==normals.shape, "Inputs with different shapes in randomize 1"
assert points.shape[0] == len(inv_ind), "Inputs with different shapes in randomize 2"
# Generate the permutation index array.
permutation = np.random.permutation(points.shape[0])
# Shuffle the arrays by giving the permutation in the square brackets.
shuffled_points = points[permutation]
shuffled_normals = normals[permutation]
shuffled_inv_ind = inv_ind[permutation]
shuffled_sceneflows = sceneflows[permutation] if sceneflows is not None else None
return shuffled_points, shuffled_normals, shuffled_inv_ind, shuffled_sceneflows
#######################
## COLLATE FUNCTIONS ##
#######################
def detection_collate_baseline_train(batch):
voxel_coords_t0 = []
voxel_coords_t1 = []
voxel_sceneflows = []
sample_names = []
for i, sample in enumerate(batch):
# Pointcloud data t0
voxel_coords_t0.append(
np.pad(sample[0], ((0, 0), (1, 0)),
mode='constant', constant_values=i))
# Pointcloud data t1
voxel_coords_t1.append(
np.pad(sample[1], ((0, 0), (1, 0)),
mode='constant', constant_values=i))
voxel_sceneflows.append(sample[2])
sample_names.append(sample[3])
return np.concatenate(voxel_coords_t0), \
np.concatenate(voxel_coords_t1), \
|
np.concatenate(voxel_sceneflows)
|
numpy.concatenate
|
import matplotlib.pyplot as plt
import numpy
from numpy import linalg as linalg
import math
from amanzi_xml.observations.ObservationXMLv2 import ObservationXMLv2 as ObsXML
from amanzi_xml.observations.ObservationData import ObservationData as ObsDATA
import amanzi_xml.utils.search as search
import model_hantush_anisotropic_2d
import prettytable
# load input xml file
# -- create an ObservationXML object
def loadInputXML(filename):
Obs_xml = ObsXML(filename)
return Obs_xml
# load the data file
# -- use above xml object to get observation filename
# -- create an ObservationData object
# -- load the data using this object
def loadDataFile(Obs_xml):
output_file = Obs_xml.getObservationFilename()
Obs_data = ObsDATA("amanzi-output/"+output_file)
Obs_data.getObservationData()
coords = Obs_xml.getAllCoordinates()
for obs in Obs_data.observations.values():
region = obs.region
obs.coordinate = coords[region]
return Obs_data
def plotHantushObservations(Obs_xml, Obs_data, axes1):
colors = ['g','r','b']
i = 0
for obs in Obs_data.observations.values():
drawdown = numpy.array([obs.data])
axes1.scatter(obs.times, drawdown, marker='s', s=25, c=colors[i])
i = i + 1
return colors
def plotHantushAnalytic(filename, axes1, Obs_xml, Obs_data):
colors = ['g','r','b']
mymodel = model_hantush_anisotropic_2d.createFromXML(filename)
tindex =
|
numpy.arange(118)
|
numpy.arange
|
import warnings
import numpy as np
from scipy.stats import norm, lognorm, beta, betaprime, gamma, uniform, gumbel_r
from .util import maxfloat
from .fast_truncnorm import truncnorm
META_NOISE_MODELS = (
'beta', 'beta_std',
'norm', 'lognorm', 'lognorm_varstd', 'betaprime', 'gamma',
'censored_norm', 'censored_gumbel', 'censored_lognorm', 'censored_lognorm_varstd',
'censored_betaprime', 'censored_gamma',
'truncated_norm', 'truncated_norm_lookup', 'truncated_norm_fit',
'truncated_gumbel', 'truncated_gumbel_lookup',
'truncated_lognorm', 'truncated_lognorm_varstd'
)
META_NOISE_MODELS_REPORT = ('beta', 'beta_std')
META_NOISE_MODELS_READOUT = ('lognorm', 'lognorm_varstd', 'betaprime', 'gamma')
def _lognorm_params(mode, stddev):
"""
Compute scipy lognorm's shape and scale for a given mode and SD
The analytical formula is exact and was computed with WolframAlpha.
Parameters
----------
mode : float or array-like
Mode of the distribution.
stddev : float or array-like
Standard deviation of the distribution.
Returns
----------
shape : float
Scipy lognorm shape parameter.
scale : float
Scipy lognorm scale parameter.
"""
mode = np.maximum(1e-5, mode)
a = stddev ** 2 / mode ** 2
x = 1 / 4 * np.sqrt(np.maximum(1e-300, -(16 * (2 / 3) ** (1 / 3) * a) / (
np.sqrt(3) * np.sqrt(256 * a ** 3 + 27 * a ** 2) - 9 * a) ** (1 / 3) +
2 * (2 / 3) ** (2 / 3) * (
np.sqrt(3) * np.sqrt(256 * a ** 3 + 27 * a ** 2) - 9 * a) ** (
1 / 3) + 1)) + \
1 / 2 * np.sqrt(
(4 * (2 / 3) ** (1 / 3) * a) / (np.sqrt(3) * np.sqrt(256 * a ** 3 + 27 * a ** 2) - 9 * a) ** (1 / 3) -
(np.sqrt(3) * np.sqrt(256 * a ** 3 + 27 * a ** 2) - 9 * a) ** (1 / 3) / (2 ** (1 / 3) * 3 ** (2 / 3)) +
1 / (2 * np.sqrt(np.maximum(1e-300, -(16 * (2 / 3) ** (1 / 3) * a) / (
np.sqrt(3) * np.sqrt(256 * a ** 3 + 27 * a ** 2) - 9 * a) ** (1 / 3) +
2 * (2 / 3) ** (2 / 3) * (
np.sqrt(3) * np.sqrt(256 * a ** 3 + 27 * a ** 2) - 9 * a) ** (
1 / 3) + 1))) + 1 / 2) + \
1 / 4
shape = np.sqrt(np.log(x))
scale = mode * x
return shape, scale
class truncated_lognorm: # noqa
"""
Implementation of the truncated lognormal distribution.
Only the upper truncation bound is supported as the lognormal distribution is naturally lower-bounded at zero.
Parameters
----------
loc : float or array-like
Scipy lognorm's loc parameter.
scale : float or array-like
Scipy lognorm's scale parameter.
s : float or array-like
Scipy lognorm's s parameter.
b : float or array-like
Upper truncation bound.
"""
def __init__(self, loc, scale, s, b):
self.loc = loc
self.scale = scale
self.s = s
self.b = b
self.dist = lognorm(loc=loc, scale=scale, s=s)
self.lncdf_b = self.dist.cdf(self.b)
def pdf(self, x):
pdens = (x <= self.b) * self.dist.pdf(x) / self.lncdf_b
return pdens
def cdf(self, x):
cdens = (x > self.b) + (x <= self.b) * self.dist.cdf(x) / self.lncdf_b
return cdens
def rvs(self, size=None):
if size is None:
if hasattr(self.scale, '__len__'):
size = self.scale.shape
else:
size = 1
cdens = uniform(loc=0, scale=self.b).rvs(size)
x = self.cdf_inv(cdens)
return x
def cdf_inv(self, cdens):
x = (cdens >= 1) * self.b + (cdens < 1) * self.dist.ppf(cdens * self.lncdf_b)
return x
class truncated_gumbel: # noqa
"""
Implementation of the truncated Gumbel distribution.
Parameters
----------
loc : float or array-like
Scipy gumbel_r's loc parameter.
scale : float or array-like
Scipy gumbel_r's scale parameter.
a : float or array-like
Lower truncation bound.
b : float or array-like
Upper truncation bound.
"""
def __init__(self, loc, scale, a=0, b=np.inf):
self.loc = loc
self.scale = scale
self.a = a
self.b = b
self.dist = gumbel_r(loc=loc, scale=scale)
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
self.cdf_to_a = self.dist.cdf(self.a)
self.cdf_a_to_b = self.dist.cdf(self.b) - self.cdf_to_a
def pdf(self, x):
pdens = ((x > self.a) & (x < self.b)) * self.dist.pdf(maxfloat(x)).astype(np.float64) / self.cdf_a_to_b
return pdens
def cdf(self, x):
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
cdf_to_x = self.dist.cdf(x)
cdens = (x >= self.b) + ((x > self.a) & (x < self.b)) * (cdf_to_x - self.cdf_to_a) / self.cdf_a_to_b
return cdens
def rvs(self, size=None):
if size is None:
if hasattr(self.scale, '__len__'):
size = self.loc.shape
else:
size = 1
cdens = uniform(loc=self.cdf_to_a, scale=self.cdf_a_to_b).rvs(size)
x = self.cdf_inv(cdens)
return x
def cdf_inv(self, cdens):
x = (cdens > self.cdf_to_a) * self.dist.ppf(cdens)
return x
def get_dist(meta_noise_model, mode, scale, meta_noise_type='noisy_report', lookup_table=None):
"""
Helper function to select appropriately parameterized metacognitive noise distributions.
"""
if meta_noise_model not in META_NOISE_MODELS:
raise ValueError(f"Unkonwn distribution '{meta_noise_model}'.")
elif (meta_noise_type == 'noisy_report') and meta_noise_model in META_NOISE_MODELS_READOUT:
raise ValueError(f"Distribution '{meta_noise_model}' is only valid for noisy-readout models.")
elif (meta_noise_type == 'noisy_readout') and meta_noise_model in META_NOISE_MODELS_REPORT:
raise ValueError(f"Distribution '{meta_noise_model}' is only valid for noisy-report models.")
if meta_noise_model.startswith('censored_'):
distname = meta_noise_model[meta_noise_model.find('_')+1:]
else:
distname = meta_noise_model
if distname == 'norm':
dist = norm(loc=mode, scale=scale)
elif distname == 'gumbel':
dist = gumbel_r(loc=mode, scale=scale * np.sqrt(6) / np.pi)
elif distname == 'lognorm_varstd':
dist = lognorm(loc=0, scale=np.maximum(1e-5, mode) * np.exp(scale ** 2), s=scale)
elif distname == 'lognorm':
shape, scale = _lognorm_params(np.maximum(1e-5, mode), scale)
dist = lognorm(loc=0, scale=scale, s=shape)
elif distname == 'lognorm_varstd':
dist = lognorm(loc=0, scale=np.maximum(1e-5, mode) * np.exp(scale ** 2), s=scale)
elif distname == 'beta':
a = mode * (1 / scale - 2) + 1
b = (1 - mode) * (1 / scale - 2) + 1
dist = beta(a, b)
elif distname == 'beta_std':
mode = np.maximum(1e-5, np.minimum(1-1e-5, mode))
a = 1 + (1 - mode) * mode**2 / scale**2
b = (1/mode - 1) * a - 1/mode + 2
dist = beta(a, b)
elif distname == 'betaprime':
mode = np.minimum(1 / scale - 1 - 1e-3, mode)
a = (mode * (1 / scale + 1) + 1) / (mode + 1)
b = (1 / scale - mode - 1) / (mode + 1)
dist = betaprime(a, b)
elif distname == 'gamma':
a = (mode + np.sqrt(mode**2 + 4*scale**2))**2 / (4*scale**2)
b = (mode + np.sqrt(mode**2 + 4*scale**2)) / (2*scale**2)
dist = gamma(a=a, loc=0, scale=1/b)
elif distname.startswith('truncated_'):
if meta_noise_type == 'noisy_report':
if distname.endswith('_lookup') and (distname.startswith('truncated_norm_') or
distname.startswith('truncated_gumbel_')):
m_ind = np.searchsorted(lookup_table['mode'], mode)
scale = lookup_table['scale'][np.abs(lookup_table['truncscale'][m_ind] - scale).argmin(axis=-1)]
elif distname == 'truncated_norm_fit':
mode_ = mode.copy()
mode_[mode_ < 0.5] = 1 - mode_[mode_ < 0.5]
scale = np.minimum(scale, 1/np.sqrt(12) - 1e-3)
alpha1, beta1, alpha2, beta2, theta = -0.1512684, 4.15388204, -1.01723445, 2.47820677, 0.73799941
scale = scale / (beta1*mode_**alpha1*np.sqrt(1/12-scale**2)) * (mode_ < theta) + \
(np.arctanh(2*np.sqrt(3)*scale) / (beta2*mode_**alpha2)) * (mode_ >= theta)
if distname.startswith('truncated_norm'):
dist = truncnorm(-mode / scale, (1 - mode) / scale, loc=mode, scale=scale)
elif distname.startswith('truncated_gumbel'):
dist = truncated_gumbel(loc=mode, scale=scale * np.sqrt(6) / np.pi, b=1)
elif distname == 'truncated_lognorm':
shape, scale = _lognorm_params(np.maximum(1e-5, mode), scale)
dist = truncated_lognorm(loc=0, scale=scale, s=shape, b=1)
elif distname == 'truncated_lognorm_varstd':
dist = truncated_lognorm(loc=0, scale=np.maximum(1e-5, mode) * np.exp(scale ** 2), s=scale, b=1)
elif meta_noise_type == 'noisy_readout':
if distname.endswith('_lookup') and (distname.startswith('truncated_norm_') or
distname.startswith('truncated_gumbel_')):
m_ind = np.searchsorted(lookup_table['mode'], np.minimum(10, mode)) # atm 10 is the maximum mode
scale = lookup_table['scale'][np.abs(lookup_table['truncscale'][m_ind] - scale).argmin(axis=-1)]
elif distname == 'truncated_norm_fit':
a, b, c, d, e, f = 0.88632051, -1.45129289, 0.25329918, 2.09066054, -1.2262868, 1.47179606
scale = a*scale*(mode+1)**b + c*(mode+1)**f*(np.exp(np.minimum(100, d*scale*(mode+1)**e))-1)
if distname.startswith('truncated_norm'):
dist = truncnorm(-mode / scale, np.inf, loc=mode, scale=scale)
elif distname.startswith('truncated_gumbel'):
dist = truncated_gumbel(loc=mode, scale=scale * np.sqrt(6) / np.pi)
else:
raise ValueError(f"'{meta_noise_type}' is an unknown metacognitive type")
return dist # noqa
def get_dist_mean(distname, mode, scale):
"""
Helper function to get the distribution mean of certain distributions.
"""
if distname == 'gumbel':
mean = mode + np.euler_gamma * scale * np.sqrt(6) / np.pi
elif distname == 'norm':
mean = mode
else:
raise ValueError(f"Distribution {distname} not supported.")
return mean
def get_pdf(x, distname, mode, scale, lb=0, ub=1, meta_noise_type='noisy_report'):
"""
Helper function to get the probability density of a distribution.
"""
if distname.startswith('censored_'):
likelihood_dist = distname[distname.find('_') + 1:]
dist = get_dist(likelihood_dist, mode=mode, scale=scale)
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
cdf_margin_low = dist.cdf(lb)
cdf_margin_high = (1 - dist.cdf(ub))
pdf = ((x > lb) & (x < ub)) * dist.pdf(x) + \
(x <= lb) * cdf_margin_low + \
(x >= ub) * cdf_margin_high
else:
pdf = get_dist(distname, mode, scale, meta_noise_type).pdf(x)
return pdf
def get_likelihood(x, distname, mode, scale, lb=1e-8, ub=1 - 1e-8, binsize_meta=1e-3, logarithm=False):
"""
Helper function to get the likelihood of a distribution.
"""
if distname.startswith('censored_'):
likelihood_dist = distname[distname.find('_') + 1:]
dist = get_dist(likelihood_dist, mode=mode, scale=scale)
if distname.endswith('gumbel'):
x = x.astype(maxfloat)
window = (dist.cdf(
|
np.minimum(1, x + binsize_meta)
|
numpy.minimum
|
#------------------------------------------------------------
# Programmer(s): <NAME> @ SMU
#------------------------------------------------------------
# Copyright (c) 2019, Southern Methodist University.
# All rights reserved.
# For details, see the LICENSE file.
#------------------------------------------------------------
# matplotlib-based plotting and analysis utilities for ARKode
# solver diagnostics
#### Data Structures ####
class ErrorTest:
"""
An ErrorTest object stores, for each error test performed:
index -- the time step index
h -- the time step size
estimate -- the estimate of the local error
errfail -- a flag denoting whether the error test failed
"""
def __init__(self, step, h, dsm):
self.index = step
self.h = h
if (dsm <= 0.0):
self.estimate = 1.e-10
else:
self.estimate = dsm
self.errfail = 0
if (dsm > 1.0):
self.errfail = 1
def Write(self):
print(' ErrorTest: index =',self.index,', h =',self.h,', estimate =',self.estimate,', errfail =',self.errfail)
##########
class AdaptH:
"""
An AdaptH object stores, for each time step adaptivity calculation:
eh[0,1,2] -- the biased error history array
hh[0,1,2] -- the time step history array
h_accuracy[0,1] -- the accuracy step estimates, before
and after applying step size bounds
h_stability[0,1] -- the stability step estimates, before
and after applying cfl & stability bounds
stab_restrict -- flag whether step was stability-limited
eta -- the final time step growth factor
"""
def __init__(self, eh0, eh1, eh2, hh0, hh1, hh2, ha0, hs0, ha1, hs1, eta):
self.eh0 = eh0
self.eh1 = eh1
self.eh2 = eh2
self.hh0 = hh0
self.hh1 = hh1
self.hh2 = hh2
self.h_accuracy0 = ha0
self.h_accuracy1 = ha1
self.h_stability0 = hs0
self.h_stability1 = hs1
self.eta = eta
if (hs0 < ha0):
self.stab_restrict = 1
else:
self.stab_restrict = 0
def Write(self):
print(' AdaptH: errhist =',self.eh0,self.eh1,self.eh2,', stephist =',self.hh0,self.hh1,self.hh2,', ha0 =',self.h_accuracy0,', hs0 =',self.h_stability0,', ha1 =',self.h_accuracy1,', hs1 =',self.h_stability1,', stabrestrict =',self.stabrestrict,', eta =',self.eta)
##########
class StageStep:
"""
A StageStep object stores, for each RK stage of every time step:
step -- the time step index
h -- the time step size
stage -- the stage index
tn -- the stage time
"""
def __init__(self, step, h, stage, tn):
self.step = step
self.h = h
self.stage = stage
self.tn = tn
def Write(self):
print(' StageStep: step =',self.step,', stage =',self.stage)
print(' h =',self.h,', tn =',self.tn)
##########
class TimeStep:
"""
A TimeStep object stores, for every time step:
StageSteps -- array of StageStep objects comprising the step
h_attempts -- array of step sizes attempted (typically only one,
unless convergence or error failures occur)
step -- the time step index
tn -- maximum stage time in step
h_final -- the final successful time step size
ErrTest -- an ErrorTest object for the step
err_fails -- total error test failures in step
"""
def __init__(self):
self.StageSteps = []
self.h_attempts = []
self.step = -1
self.tn = -1.0
self.h_final = -1.0
self.err_fails = 0
def AddStage(self, Stage):
self.step = Stage.step
if (self.h_final != Stage.h):
self.h_final = Stage.h
self.h_attempts.append(Stage.h)
self.StageSteps.append(Stage)
self.tn = max(self.tn,Stage.tn)
def AddErrorTest(self, ETest):
self.ErrTest = ETest
self.err_fails += ETest.errfail
def AddHAdapt(self, HAdapt):
self.HAdapt = HAdapt
def Write(self):
print('TimeStep: step =',self.step,', tn =',self.tn)
print(' h_attempts =',self.h_attempts)
print(' h_final =',self.h_final)
print(' err_fails =',self.err_fails)
for i in range(len(self.StageSteps)):
self.StageSteps[i].Write()
#### Utility functions ####
def load_line(line):
"""
This routine parses a line of the diagnostics output file to
determine what type of data it contains (an RK stage step,
an error test, or a time step adaptivity calculation), and
creates the relevant object for that data line. Each of
these output types are indexed by a specific linetype for
use by the calling routine.
The function returns [linetype, entry].
"""
import shlex
txt = shlex.split(line)
if ("step" in txt):
linetype = 0
step = int(txt[2])
h = float(txt[3])
stage = int(txt[4])
tn = float(txt[5])
entry = StageStep(step, h, stage, tn)
elif ("etest" in txt):
linetype = 3
step = int(txt[2])
h = float(txt[3])
dsm = float(txt[4])
entry = ErrorTest(step, h, dsm)
elif ("adapt" in txt):
linetype = 4
eh0 = float(txt[2])
eh1 = float(txt[3])
eh2 = float(txt[4])
hh0 = float(txt[5])
hh1 = float(txt[6])
hh2 = float(txt[7])
ha0 = float(txt[8])
hs0 = float(txt[9])
ha1 = float(txt[10])
hs1 = float(txt[11])
eta = float(txt[12])
entry = AdaptH(eh0, eh1, eh2, hh0, hh1, hh2, ha0, hs0, ha1, hs1, eta)
else:
linetype = -1
entry = 0
return [linetype, entry]
##########
def load_diags(fname):
"""
This routine opens the diagnostics output file, loads all lines
to create an array of TimeSteps with all of the relevant data
contained therein.
"""
f = open(fname)
step = -1
stage = -1
TimeSteps = []
for line in f:
linetype, entry = load_line(line)
if (linetype == 0): # stage step
if (entry.step != step): # new step
step = entry.step
TimeSteps.append(TimeStep())
TimeSteps[step].AddStage(entry)
stage = entry.stage
elif (linetype == 3): # error test
TimeSteps[step].AddErrorTest(entry)
elif (linetype == 4): # h adaptivity
TimeSteps[step].AddHAdapt(entry)
f.close()
return TimeSteps
##########
def write_diags(TimeSteps):
"""
This routine takes in the array of TimeSteps (returned from
load_diags), and writes out the internal representation of
the time step history to stdout.
"""
for i in range(len(TimeSteps)):
print(' ')
TimeSteps[i].Write()
##########
def plot_h_vs_t(TimeSteps,fname):
"""
This routine takes in the array of TimeSteps (returned from
load_diags), and plots the time step sizes h as a function
of the simulation time t. Failed time steps are marked on
the plot with either a red X or green O, where X corresponds
to an error test failure.
The resulting plot is stored in the file <fname>, that
should include an extension appropriate for the matplotlib
'savefig' command.
"""
import pylab as plt
import numpy as np
hvals = []
tvals = []
EfailH = []
EfailT = []
CfailH = []
CfailT = []
for istep in range(len(TimeSteps)):
# store successful step size and time
hvals.append(TimeSteps[istep].h_final)
tvals.append(TimeSteps[istep].tn)
# account for convergence failures and error test failures
if (TimeSteps[istep].err_fails > 0):
EfailH.append(TimeSteps[istep].h_attempts[0])
EfailT.append(TimeSteps[istep].tn)
# convert data to numpy arrays
h = np.array(hvals)
t = np.array(tvals)
eh = np.array(EfailH)
et = np.array(EfailT)
# generate plot
plt.figure()
plt.semilogy(t,h,'b-')
if (len(eh) > 0):
plt.semilogy(et,eh,'rx')
plt.xlabel('time')
plt.ylabel('step size')
plt.title('Step size versus time')
if (len(eh) > 0):
plt.legend(('successful','error fails'), loc='lower right', shadow=True)
plt.grid()
plt.savefig(fname)
##########
def plot_h_vs_tstep(TimeSteps,fname):
"""
This routine takes in the array of TimeSteps (returned from
load_diags), and plots the time step sizes h as a function
of the time step iteration index. Failed time steps are
marked on the plot a red X if an error test failure occurred.
The resulting plot is stored in the file <fname>, that
should include an extension appropriate for the matplotlib
'savefig' command.
"""
import pylab as plt
import numpy as np
hvals = []
ivals = []
EfailH = []
EfailI = []
for istep in range(len(TimeSteps)):
# store successful step size and time
hvals.append(TimeSteps[istep].h_final)
ivals.append(istep)
# account for convergence failures and error test failures
if (TimeSteps[istep].err_fails > 0):
EfailH.append(TimeSteps[istep].h_attempts[0])
EfailI.append(istep)
# convert data to numpy arrays
h = np.array(hvals)
I = np.array(ivals)
eh = np.array(EfailH)
eI = np.array(EfailI)
# generate plot
plt.figure()
plt.semilogy(I,h,'b-')
if (len(eI) > 0):
plt.semilogy(eI,eh,'rx')
plt.xlabel('time step')
plt.ylabel('step size')
plt.title('Step size versus time step')
if (len(eI) > 0):
plt.legend(('successful','error fails'), loc='lower right', shadow=True)
plt.grid()
plt.savefig(fname)
##########
def plot_oversolve_vs_t(TimeSteps,fname):
"""
This routine takes in the array of TimeSteps (returned from
load_diags), and plots the oversolve as a function of the
simulation time t. We cap the computed oversolve value at
1000 to get more intuitive plots since first few time steps
are purposefully too small.
The resulting plot is stored in the file <fname>, that
should include an extension appropriate for the matplotlib
'savefig' command.
"""
import pylab as plt
import numpy as np
Ovals = []
tvals = []
for istep in range(len(TimeSteps)):
# store oversolve and time
Ovals.append(min(1e3,1.0 / TimeSteps[istep].ErrTest.estimate))
tvals.append(TimeSteps[istep].tn)
# convert data to numpy arrays
O = np.array(Ovals)
t = np.array(tvals)
# generate plot
plt.figure()
plt.semilogy(t,O,'b-')
plt.xlabel('time')
plt.ylabel('oversolve')
plt.title('Oversolve versus time')
plt.grid()
plt.savefig(fname)
##########
def etest_stats(TimeSteps,fptr):
"""
This routine takes in the array of TimeSteps (returned from
load_diags), and computes statistics on how well the time
step adaptivity estimations predicted step values that met
the accuracy requirements.
Note: we ignore steps immediately following an
error test failure, since etamax is bounded above by 1.
The resulting data is appended to the stream corresponding
to fptr (either the result of 'open' or sys.stdout).
"""
import numpy as np
errfails = 0
oversolve10 = 0
oversolve100 = 0
oversolve_max = 0.0
oversolve_min = 1.0e200
oversolves = []
nsteps = len(TimeSteps)
hvals = []
for istep in range(nsteps):
if (TimeSteps[istep].err_fails > 0):
errfails += 1
hvals.append(TimeSteps[istep].h_final)
# if this or previous step had an error test failure, skip oversolve results
ignore = 0
if (istep > 0):
if( (TimeSteps[istep].err_fails > 0) or (TimeSteps[istep-1].err_fails > 0)):
ignore = 1
else:
if(TimeSteps[istep].err_fails > 0):
ignore = 1
if (ignore == 0):
over = 1.0 / TimeSteps[istep].ErrTest.estimate
oversolves.append(over)
if (over > 100.0):
oversolve100 += 1
elif (over > 10.0):
oversolve10 += 1
oversolve_max = max(oversolve_max, over)
oversolve_min = min(oversolve_min, over)
# generate means and percentages
ov = np.array(oversolves)
hv = np.array(hvals)
hval_mean = np.mean(hv)
hval_max = np.max(hv)
hval_min = np.min(hv)
oversolve_mean =
|
np.mean(ov)
|
numpy.mean
|
from .. import util
from ..probabilities import mass
from ..core.data import Observations, Model
import h5py
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
import matplotlib.colors as mpl_clr
import matplotlib.ticker as mpl_tick
import astropy.visualization as astroviz
import logging
__all__ = ['ModelVisualizer', 'CIModelVisualizer', 'ObservationsVisualizer',
'ModelCollection']
def _get_model(theta, observations):
try:
return Model(theta, observations=observations)
except ValueError:
logging.warning(f"Model did not converge with {theta=}")
return None
# --------------------------------------------------------------------------
# Individual model visualizers
# --------------------------------------------------------------------------
class _ClusterVisualizer:
_MARKERS = ('o', '^', 'D', '+', 'x', '*', 's', 'p', 'h', 'v', '1', '2')
# Default xaxis limits for all profiles. Set by inits, can be reset by user
rlims = None
# -----------------------------------------------------------------------
# Artist setups
# -----------------------------------------------------------------------
def _setup_artist(self, fig, ax, *, use_name=True):
'''setup a plot (figure and ax) with one single ax'''
if ax is None:
if fig is None:
# no figure or ax provided, make one here
fig, ax = plt.subplots()
else:
# Figure provided, no ax provided. Try to grab it from the fig
# if that doens't work, create it
cur_axes = fig.axes
if len(cur_axes) > 1:
raise ValueError(f"figure {fig} already has too many axes")
elif len(cur_axes) == 1:
ax = cur_axes[0]
else:
ax = fig.add_subplot()
else:
if fig is None:
# ax is provided, but no figure. Grab it's figure from it
fig = ax.get_figure()
if hasattr(self, 'name') and use_name:
fig.suptitle(self.name)
return fig, ax
def _setup_multi_artist(self, fig, shape, *, allow_blank=True,
use_name=True, constrained_layout=True,
subfig_kw=None, **sub_kw):
'''setup a subplot with multiple axes'''
if subfig_kw is None:
subfig_kw = {}
def create_axes(base, shape):
'''create the axes of `shape` on this base (fig)'''
# make sure shape is a tuple of atleast 1d, at most 2d
if not isinstance(shape, tuple):
# TODO doesnt work on an int
shape = tuple(shape)
if len(shape) == 1:
shape = (shape, 1)
elif len(shape) > 2:
mssg = f"Invalid `shape` for subplots {shape}, must be 2D"
raise ValueError(mssg)
# split into dict of nrows, ncols
shape = dict(zip(("nrows", "ncols"), shape))
# if either of them is also a tuple, means we want columns or rows
# of varying sizes, switch to using subfigures
# TODO what are the chances stuff like `sharex` works correctly?
if isinstance(shape['nrows'], tuple):
subfigs = base.subfigures(ncols=shape['ncols'], nrows=1,
squeeze=False, **subfig_kw)
for ind, sf in enumerate(subfigs.flatten()):
try:
nr = shape['nrows'][ind]
except IndexError:
if allow_blank:
continue
mssg = (f"Number of row entries {shape['nrows']} must "
f"match number of columns ({shape['ncols']})")
raise ValueError(mssg)
sf.subplots(ncols=1, nrows=nr, **sub_kw)
elif isinstance(shape['ncols'], tuple):
subfigs = base.subfigures(nrows=shape['nrows'], ncols=1,
squeeze=False, **subfig_kw)
for ind, sf in enumerate(subfigs.flatten()):
try:
nc = shape['ncols'][ind]
except IndexError:
if allow_blank:
continue
mssg = (f"Number of col entries {shape['ncols']} must "
f"match number of rows ({shape['nrows']})")
raise ValueError(mssg)
sf.subplots(nrows=1, ncols=nc, **sub_kw)
# otherwise just make a simple subplots and return that
else:
base.subplots(**shape, **sub_kw)
return base, base.axes
# ------------------------------------------------------------------
# Create figure, if necessary
# ------------------------------------------------------------------
if fig is None:
fig = plt.figure(constrained_layout=constrained_layout)
# ------------------------------------------------------------------
# If no shape is provided, just return the figure, probably empty
# ------------------------------------------------------------------
if shape is None:
axarr = []
# ------------------------------------------------------------------
# Otherwise attempt to first grab this figures axes, or create them
# ------------------------------------------------------------------
else:
# this fig has axes, check that they match shape
if axarr := fig.axes:
# TODO this won't actually work, cause fig.axes is just a list
if axarr.shape != shape:
mssg = (f"figure {fig} already contains axes with "
f"mismatched shape ({axarr.shape} != {shape})")
raise ValueError(mssg)
else:
fig, axarr = create_axes(fig, shape)
# ------------------------------------------------------------------
# If desired, default to titling the figure based on it's "name"
# ------------------------------------------------------------------
if hasattr(self, 'name') and use_name:
fig.suptitle(self.name)
# ------------------------------------------------------------------
# Ensure the axes are always returned in an array
# ------------------------------------------------------------------
return fig, np.atleast_1d(axarr)
def _set_ylabel(self, ax, label, label_position='left', *,
residual_ax=None):
tick_prms = dict(which='both',
labelright=(label_position == 'right'),
labelleft=(label_position == 'left'))
if label_position == 'top':
ax.set_ylabel('')
ax.set_title(label)
else:
if (unit_label := ax.get_ylabel()) and unit_label != r'$\mathrm{}$':
label += f' [{unit_label}]'
ax.set_ylabel(label)
ax.yaxis.set_label_position(label_position)
ax.yaxis.set_tick_params(**tick_prms)
if residual_ax is not None:
residual_ax.yaxis.set_label_position(label_position)
residual_ax.yaxis.set_tick_params(**tick_prms)
residual_ax.yaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
def _set_xlabel(self, ax, label='Distance from centre', *,
residual_ax=None, remove_all=False):
bottom_ax = ax if residual_ax is None else residual_ax
if unit_label := bottom_ax.get_xlabel():
label += f' [{unit_label}]'
bottom_ax.set_xlabel(label)
# if has residual ax, remove the ticks/labels on the top ax
if residual_ax is not None:
ax.set_xlabel('')
ax.xaxis.set_tick_params(bottom=False, labelbottom=False)
# if desired, simply remove everything
if remove_all:
bottom_ax.set_xlabel('')
bottom_ax.xaxis.set_tick_params(bottom=False, labelbottom=False)
# -----------------------------------------------------------------------
# Unit support
# -----------------------------------------------------------------------
def _support_units(method):
import functools
@functools.wraps(method)
def _unit_decorator(self, *args, **kwargs):
# convert based on median distance parameter
eqvs = util.angular_width(self.d)
with astroviz.quantity_support(), u.set_enabled_equivalencies(eqvs):
return method(self, *args, **kwargs)
return _unit_decorator
# -----------------------------------------------------------------------
# Plotting functionality
# -----------------------------------------------------------------------
def _get_median(self, percs):
'''from an array of data percentiles, return the median array'''
return percs[percs.shape[0] // 2] if percs.ndim > 1 else percs
def _get_err(self, dataset, key):
'''gather the error variables corresponding to `key` from `dataset`'''
try:
return dataset[f'Δ{key}']
except KeyError:
try:
return np.c_[dataset[f'Δ{key},down'], dataset[f'Δ{key},up']].T
except KeyError:
return None
def _plot_model(self, ax, data, intervals=None, *,
x_data=None, x_unit='pc', y_unit=None,
CI_kwargs=None, **kwargs):
CI_kwargs = dict() if CI_kwargs is None else CI_kwargs
# ------------------------------------------------------------------
# Evaluate the shape of the data array to determine confidence
# intervals, if applicable
# ------------------------------------------------------------------
if data is None or data.ndim == 0:
return
elif data.ndim == 1:
data = data.reshape((1, data.size))
if not (data.shape[0] % 2):
mssg = 'Invalid `data`, must have odd-numbered zeroth axis shape'
raise ValueError(mssg)
midpoint = data.shape[0] // 2
if intervals is None:
intervals = midpoint
elif intervals > midpoint:
mssg = f'{intervals}σ is outside stored range of {midpoint}σ'
raise ValueError(mssg)
# ------------------------------------------------------------------
# Convert any units desired
# ------------------------------------------------------------------
x_domain = self.r if x_data is None else x_data
if x_unit:
x_domain = x_domain.to(x_unit)
if y_unit:
data = data.to(y_unit)
# ------------------------------------------------------------------
# Plot the median (assumed to be the middle axis of the intervals)
# ------------------------------------------------------------------
median = data[midpoint]
med_plot, = ax.plot(x_domain, median, **kwargs)
# ------------------------------------------------------------------
# Plot confidence intervals successively from the midpoint
# ------------------------------------------------------------------
output = [med_plot]
CI_kwargs.setdefault('color', med_plot.get_color())
alpha = 0.8 / (intervals + 1)
for sigma in range(1, intervals + 1):
CI = ax.fill_between(
x_domain, data[midpoint + sigma], data[midpoint - sigma],
alpha=(1 - alpha), **CI_kwargs
)
output.append(CI)
alpha += alpha
return output
def _plot_data(self, ax, dataset, y_key, *,
x_key='r', x_unit='pc', y_unit=None,
err_transform=None, **kwargs):
# TODO need to handle colours better
defaultcolour = None
# ------------------------------------------------------------------
# Get data and relevant errors for plotting
# ------------------------------------------------------------------
xdata = dataset[x_key]
ydata = dataset[y_key]
xerr = self._get_err(dataset, x_key)
yerr = self._get_err(dataset, y_key)
# ------------------------------------------------------------------
# Convert any units desired
# ------------------------------------------------------------------
if x_unit is not None:
xdata = xdata.to(x_unit)
if y_unit is not None:
ydata = ydata.to(y_unit)
# ------------------------------------------------------------------
# If given, transform errors based on `err_transform` function
# ------------------------------------------------------------------
if err_transform is not None:
yerr = err_transform(yerr)
# ------------------------------------------------------------------
# Setup default plotting details, style, labels
# ------------------------------------------------------------------
kwargs.setdefault('marker', '.')
kwargs.setdefault('linestyle', 'None')
kwargs.setdefault('color', defaultcolour)
label = dataset.cite()
if 'm' in dataset.mdata:
label += fr' ($m={dataset.mdata["m"]}\ M_\odot$)'
# ------------------------------------------------------------------
# Plot
# ------------------------------------------------------------------
# TODO not sure if I like the mfc=none style,
# mostly due to https://github.com/matplotlib/matplotlib/issues/3400
return ax.errorbar(xdata, ydata, xerr=xerr, yerr=yerr, mfc='none',
label=label, **kwargs)
def _plot_profile(self, ax, ds_pattern, y_key, model_data, *,
y_unit=None, residuals=False, err_transform=None,
res_kwargs=None, **kwargs):
'''figure out what needs to be plotted and call model/data plotters
all **kwargs passed to both _plot_model and _plot_data
model_data dimensions *must* be (mass bins, intervals, r axis)
'''
# TODO we might still want to allow for specific model/data kwargs?
ds_pattern = ds_pattern or ''
strict = kwargs.pop('strict', False)
# Restart marker styles each plotting call
markers = iter(self._MARKERS)
# TODO need to figure out how we handle passed kwargs better
default_clr = kwargs.pop('color', None)
if res_kwargs is None:
res_kwargs = {}
# ------------------------------------------------------------------
# Determine the relevant datasets to the given pattern
# ------------------------------------------------------------------
datasets = self.obs.filter_datasets(ds_pattern)
if strict and ds_pattern and not datasets:
mssg = (f"No datasets matching '{ds_pattern}' exist in {self.obs}."
f"To plot models without data, set `show_obs=False`")
# raise DataError
raise KeyError(mssg)
# ------------------------------------------------------------------
# Iterate over the datasets, keeping track of all relevant masses
# and calling `_plot_data`
# ------------------------------------------------------------------
masses = {}
for key, dset in datasets.items():
mrk = next(markers)
# get mass bin of this dataset, for later model plotting
if 'm' in dset.mdata:
m = dset.mdata['m'] * u.Msun
mass_bin = np.where(self.mj == m)[0][0]
else:
mass_bin = self.star_bin
if mass_bin in masses:
clr = masses[mass_bin][0][0].get_color()
else:
clr = default_clr
# plot the data
try:
line = self._plot_data(ax, dset, y_key, marker=mrk, color=clr,
err_transform=err_transform,
y_unit=y_unit, **kwargs)
except KeyError as err:
if strict:
raise err
else:
# warnings.warn(err.args[0])
continue
masses.setdefault(mass_bin, [])
masses[mass_bin].append(line)
# ------------------------------------------------------------------
# Based on the masses of data plotted, plot the corresponding axes of
# the model data, calling `_plot_model`
# ------------------------------------------------------------------
res_ax = None
if model_data is not None:
# ensure that the data is (mass bin, intervals, r domain)
if len(model_data.shape) != 3:
raise ValueError("invalid model data shape")
# No data plotted, use the star_bin
if not masses:
if model_data.shape[0] > 1:
masses = {self.star_bin: None}
else:
masses = {0: None}
for mbin, errbars in masses.items():
ymodel = model_data[mbin, :, :]
# TODO having model/data be same color is kinda hard to read
# this is why I added mfc=none, but I dont like that either
if errbars is not None:
clr = errbars[0][0].get_color()
else:
clr = default_clr
self._plot_model(ax, ymodel, color=clr, y_unit=y_unit, **kwargs)
if residuals:
res_ax = self._add_residuals(ax, ymodel, errbars,
res_ax=res_ax, y_unit=y_unit,
**res_kwargs)
# Adjust x limits
if self.rlims is not None:
ax.set_xlim(*self.rlims)
return ax, res_ax
# -----------------------------------------------------------------------
# Plot extras
# -----------------------------------------------------------------------
def _add_residuals(self, ax, ymodel, errorbars, percentage=False, *,
show_chi2=False, xmodel=None, y_unit=None, size="15%",
res_ax=None, divider_kwargs=None):
'''
errorbars : a list of outputs from calls to plt.errorbars
'''
from mpl_toolkits.axes_grid1 import make_axes_locatable
if errorbars is None:
errorbars = []
if divider_kwargs is None:
divider_kwargs = {}
# ------------------------------------------------------------------
# Get model data and spline
# ------------------------------------------------------------------
if xmodel is None:
xmodel = self.r
if y_unit is not None:
ymodel = ymodel.to(y_unit)
ymedian = self._get_median(ymodel)
yspline = util.QuantitySpline(xmodel, ymedian)
# ------------------------------------------------------------------
# Setup axes, adding a new smaller axe for the residual underneath,
# if it hasn't already been created (and passed to `res_ax`)
# ------------------------------------------------------------------
if res_ax is None:
divider = make_axes_locatable(ax)
res_ax = divider.append_axes('bottom', size=size, pad=0, sharex=ax)
res_ax.grid()
res_ax.set_xscale(ax.get_xscale())
res_ax.spines['top'].set(**divider_kwargs)
# ------------------------------------------------------------------
# Plot the model line, hopefully centred on zero
# ------------------------------------------------------------------
if percentage:
baseline = 100 * (ymodel - ymedian) / ymodel
else:
baseline = ymodel - ymedian
self._plot_model(res_ax, baseline, color='k')
# ------------------------------------------------------------------
# Get data from the plotted errorbars
# ------------------------------------------------------------------
chi2 = 0.
for errbar in errorbars:
# --------------------------------------------------------------
# Get the actual datapoints, and the hopefully correct units
# --------------------------------------------------------------
xdata, ydata = errbar[0].get_data()
ydata = ydata.to(ymedian.unit)
# --------------------------------------------------------------
# Grab relevant formatting (colours and markers)
# --------------------------------------------------------------
clr = errbar[0].get_color()
mrk = errbar[0].get_marker()
# --------------------------------------------------------------
# Parse the errors from the size of the errorbar lines (messy)
# --------------------------------------------------------------
xerr = yerr = None
if errbar.has_xerr:
xerr_lines = errbar[2][0]
yerr_lines = errbar[2][1] if errbar.has_yerr else None
elif errbar.has_yerr:
xerr_lines, yerr_lines = None, errbar[2][0]
else:
xerr_lines = yerr_lines = None
if xerr_lines:
xerr_segs = xerr_lines.get_segments() << xdata.unit
xerr = u.Quantity([np.abs(seg[:, 0] - xdata[i])
for i, seg in enumerate(xerr_segs)]).T
if yerr_lines:
yerr_segs = yerr_lines.get_segments() << ydata.unit
if percentage:
yerr = 100 * np.array([
np.abs(seg[:, 1] - ydata[i]) / ydata[i]
for i, seg in enumerate(yerr_segs)
]).T
else:
yerr = u.Quantity([np.abs(seg[:, 1] - ydata[i])
for i, seg in enumerate(yerr_segs)]).T
# --------------------------------------------------------------
# Compute the residuals and plot them
# --------------------------------------------------------------
if percentage:
res = 100 * (ydata - yspline(xdata)) / yspline(xdata)
else:
res = ydata - yspline(xdata)
res_ax.errorbar(xdata, res, xerr=xerr, yerr=yerr,
color=clr, marker=mrk, linestyle='none')
# --------------------------------------------------------------
# Optionally compute chi-squared statistic
# --------------------------------------------------------------
if show_chi2:
chi2 += np.sum((res / yerr)**2)
if show_chi2:
fake = plt.Line2D([], [], label=fr"$\chi^2={chi2:.2f}$")
res_ax.legend(handles=[fake], handlelength=0, handletextpad=0)
# ------------------------------------------------------------------
# Label y-axes
# ------------------------------------------------------------------
if percentage:
res_ax.set_ylabel(r'Residuals')
res_ax.yaxis.set_major_formatter(mpl_tick.PercentFormatter())
else:
res_ax.set_ylabel(f'Residuals [{res_ax.get_ylabel()}]')
# ------------------------------------------------------------------
# Set bounds at 100% or less
# ------------------------------------------------------------------
if percentage:
ylim = res_ax.get_ylim()
res_ax.set_ylim(max(ylim[0], -100), min(ylim[1], 100))
return res_ax
def _add_hyperparam(self, ax, ymodel, xdata, ydata, yerr):
# TODO this is still a complete mess
yspline = util.QuantitySpline(self.r, ymodel)
if hasattr(ax, 'aeff_text'):
aeff_str = ax.aeff_text.get_text()
aeff = float(aeff_str[aeff_str.rfind('$') + 1:])
else:
# TODO figure out best place to place this at
ax.aeff_text = ax.text(0.1, 0.3, '')
aeff = 0.
aeff += util.hyperparam_effective(ydata, yspline(xdata), yerr)
ax.aeff_text.set_text(fr'$\alpha_{{eff}}=${aeff:.4e}')
# -----------------------------------------------------------------------
# Observables plotting
# -----------------------------------------------------------------------
@_support_units
def plot_LOS(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc', y_unit='km/s',
label_position='top', blank_xaxis=False,
res_kwargs=None, **kwargs):
fig, ax = self._setup_artist(fig, ax)
ax.set_xscale("log")
if show_obs:
pattern, var = '*velocity_dispersion*', 'σ'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
ax, res_ax = self._plot_profile(ax, pattern, var, self.LOS,
strict=strict, residuals=residuals,
x_unit=x_unit, y_unit=y_unit,
res_kwargs=res_kwargs, **kwargs)
label = 'LOS Velocity Dispersion'
self._set_ylabel(ax, label, label_position, residual_ax=res_ax)
self._set_xlabel(ax, residual_ax=res_ax, remove_all=blank_xaxis)
ax.legend()
return fig
@_support_units
def plot_pm_tot(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc', y_unit='mas/yr',
label_position='top', blank_xaxis=False,
res_kwargs=None, **kwargs):
fig, ax = self._setup_artist(fig, ax)
ax.set_xscale("log")
if show_obs:
pattern, var = '*proper_motion*', 'PM_tot'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
ax, res_ax = self._plot_profile(ax, pattern, var, self.pm_tot,
strict=strict, residuals=residuals,
x_unit=x_unit, y_unit=y_unit,
res_kwargs=res_kwargs, **kwargs)
label = "Total PM Dispersion"
self._set_ylabel(ax, label, label_position, residual_ax=res_ax)
self._set_xlabel(ax, residual_ax=res_ax, remove_all=blank_xaxis)
ax.legend()
return fig
@_support_units
def plot_pm_ratio(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc', label_position='top', blank_xaxis=False,
res_kwargs=None, **kwargs):
fig, ax = self._setup_artist(fig, ax)
ax.set_xscale("log")
if show_obs:
pattern, var = '*proper_motion*', 'PM_ratio'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
ax, res_ax = self._plot_profile(ax, pattern, var, self.pm_ratio,
strict=strict, residuals=residuals,
x_unit=x_unit,
res_kwargs=res_kwargs, **kwargs)
label = "PM Anisotropy"
self._set_ylabel(ax, label, label_position, residual_ax=res_ax)
self._set_xlabel(ax, residual_ax=res_ax, remove_all=blank_xaxis)
ax.legend()
return fig
@_support_units
def plot_pm_T(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc', y_unit='mas/yr',
label_position='top', blank_xaxis=False,
res_kwargs=None, **kwargs):
fig, ax = self._setup_artist(fig, ax)
ax.set_xscale("log")
if show_obs:
pattern, var = '*proper_motion*', 'PM_T'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
ax, res_ax = self._plot_profile(ax, pattern, var, self.pm_T,
strict=strict, residuals=residuals,
x_unit=x_unit, y_unit=y_unit,
res_kwargs=res_kwargs, **kwargs)
label = "Tangential PM Dispersion"
self._set_ylabel(ax, label, label_position, residual_ax=res_ax)
self._set_xlabel(ax, residual_ax=res_ax, remove_all=blank_xaxis)
ax.legend()
return fig
@_support_units
def plot_pm_R(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc', y_unit='mas/yr',
label_position='top', blank_xaxis=False,
res_kwargs=None, **kwargs):
fig, ax = self._setup_artist(fig, ax)
ax.set_xscale("log")
if show_obs:
pattern, var = '*proper_motion*', 'PM_R'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
ax, res_ax = self._plot_profile(ax, pattern, var, self.pm_R,
strict=strict, residuals=residuals,
x_unit=x_unit, y_unit=y_unit,
res_kwargs=res_kwargs, **kwargs)
label = "Radial PM Dispersion"
self._set_ylabel(ax, label, label_position, residual_ax=res_ax)
self._set_xlabel(ax, residual_ax=res_ax, remove_all=blank_xaxis)
ax.legend()
return fig
@_support_units
def plot_number_density(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc', label_position='top',
blank_xaxis=False, res_kwargs=None, **kwargs):
def quad_nuisance(err):
return np.sqrt(err**2 + (self.s2 << err.unit**2))
fig, ax = self._setup_artist(fig, ax)
ax.loglog()
if show_obs:
pattern, var = '*number_density*', 'Σ'
strict = show_obs == 'strict'
kwargs.setdefault('err_transform', quad_nuisance)
else:
pattern = var = None
strict = False
ax, res_ax = self._plot_profile(ax, pattern, var, self.numdens,
strict=strict, residuals=residuals,
x_unit=x_unit,
res_kwargs=res_kwargs, **kwargs)
# bit arbitrary, but probably fine for the most part
ax.set_ylim(bottom=0.5e-4)
label = 'Number Density'
self._set_ylabel(ax, label, label_position, residual_ax=res_ax)
self._set_xlabel(ax, residual_ax=res_ax, remove_all=blank_xaxis)
ax.legend()
return fig
@_support_units
def plot_all(self, fig=None, sharex=True, **kwargs):
'''Plots all the primary profiles (numdens, LOS, PM)
but *not* the mass function, pulsars, or any secondary profiles
(cum-mass, remnants, etc)
'''
# TODO working with residuals here is hard because constrianed_layout
# doesn't seem super aware of them
fig, axes = self._setup_multi_artist(fig, (3, 2), sharex=sharex)
axes = axes.reshape((3, 2))
res_kwargs = dict(size="25%", show_chi2=False, percentage=True)
kwargs.setdefault('res_kwargs', res_kwargs)
# left plots
self.plot_number_density(fig=fig, ax=axes[0, 0], label_position='left',
blank_xaxis=True, **kwargs)
self.plot_LOS(fig=fig, ax=axes[1, 0], label_position='left',
blank_xaxis=True, **kwargs)
self.plot_pm_ratio(fig=fig, ax=axes[2, 0], label_position='left',
**kwargs)
# right plots
self.plot_pm_tot(fig=fig, ax=axes[0, 1], label_position='right',
blank_xaxis=True, **kwargs)
self.plot_pm_T(fig=fig, ax=axes[1, 1], label_position='right',
blank_xaxis=True, **kwargs)
self.plot_pm_R(fig=fig, ax=axes[2, 1], label_position='right',
**kwargs)
# brute force clear out any "residuals" labels
for ax in fig.axes:
if 'Residual' in ax.get_ylabel():
ax.set_ylabel('')
return fig
# ----------------------------------------------------------------------
# Mass Function Plotting
# ----------------------------------------------------------------------
@_support_units
def plot_mass_func(self, fig=None, show_obs=True, show_fields=True, *,
colours=None, PI_legend=False, logscaled=False,
field_kw=None):
# ------------------------------------------------------------------
# Setup axes, splitting into two columns if necessary and adding the
# extra ax for the field plot if desired
# ------------------------------------------------------------------
N_rbins = sum([len(d) for d in self.mass_func.values()])
shape = ((int(np.ceil(N_rbins / 2)), int(np.floor(N_rbins / 2))), 2)
# If adding the fields, include an extra column on the left for it
if show_fields:
shape = ((1, *shape[0]), shape[1] + 1)
fig, axes = self._setup_multi_artist(fig, shape, sharex=True)
axes = axes.T.flatten()
ax_ind = 0
# ------------------------------------------------------------------
# If desired, use the `plot_MF_fields` method to show the fields
# ------------------------------------------------------------------
if show_fields:
ax = axes[ax_ind]
if field_kw is None:
field_kw = {}
field_kw.setdefault('radii', [])
self.plot_MF_fields(fig, ax, **field_kw)
ax_ind += 1
# ------------------------------------------------------------------
# Iterate over each PI, gathering data to plot
# ------------------------------------------------------------------
for PI in sorted(self.mass_func,
key=lambda k: self.mass_func[k][0]['r1']):
bins = self.mass_func[PI]
# Get data for this PI
mf = self.obs[PI]
mbin_mean = (mf['m1'] + mf['m2']) / 2.
mbin_width = mf['m2'] - mf['m1']
N = mf['N'] / mbin_width
ΔN = mf['ΔN'] / mbin_width
# --------------------------------------------------------------
# Iterate over radial bin dicts for this PI
# --------------------------------------------------------------
for rind, rbin in enumerate(bins):
ax = axes[ax_ind]
clr = rbin.get('colour', None)
# ----------------------------------------------------------
# Plot observations
# ----------------------------------------------------------
if show_obs:
r_mask = ((mf['r1'] == rbin['r1'])
& (mf['r2'] == rbin['r2']))
N_data = N[r_mask].value
err_data = ΔN[r_mask].value
err = self.F * err_data
pnts = ax.errorbar(mbin_mean[r_mask], N_data, yerr=err,
fmt='o', color=clr)
clr = pnts[0].get_color()
# ----------------------------------------------------------
# Plot model. Doesn't utilize the `_plot_profile` method, as
# this is *not* a profile, but does use similar, but simpler,
# logic
# ----------------------------------------------------------
# The mass domain is provided explicitly, to support visualizers
# which don't store the entire mass range (e.g. CImodels)
mj = rbin['mj']
dNdm = rbin['dNdm']
midpoint = dNdm.shape[0] // 2
median = dNdm[midpoint]
med_plot, = ax.plot(mj, median, '--', c=clr)
alpha = 0.8 / (midpoint + 1)
for sigma in range(1, midpoint + 1):
ax.fill_between(
mj,
dNdm[midpoint + sigma],
dNdm[midpoint - sigma],
alpha=1 - alpha, color=clr
)
alpha += alpha
if logscaled:
ax.set_xscale('log')
ax.set_xlabel(None)
# ----------------------------------------------------------
# "Label" each bin with it's radial bounds.
# Uses fake text to allow for using loc='best' from `legend`.
# Really this should be a part of plt (see matplotlib#17946)
# ----------------------------------------------------------
r1 = rbin['r1'].to_value('arcmin')
r2 = rbin['r2'].to_value('arcmin')
fake = plt.Line2D([], [], label=f"r = {r1:.2f}'-{r2:.2f}'")
handles = [fake]
leg_kw = {'handlelength': 0, 'handletextpad': 0}
# If this is the first bin, also add a PI tag
if PI_legend and not rind and not show_fields:
pi_fake = plt.Line2D([], [], label=PI)
handles.append(pi_fake)
leg_kw['labelcolor'] = ['k', clr]
ax.legend(handles=handles, **leg_kw)
ax_ind += 1
# ------------------------------------------------------------------
# Put labels on subfigs
# ------------------------------------------------------------------
for sf in fig.subfigs[show_fields:]:
sf.supxlabel(r'Mass [$M_\odot$]')
fig.subfigs[show_fields].supylabel('dN/dm')
return fig
@_support_units
def plot_MF_fields(self, fig=None, ax=None, *, radii=("rh",),
cmap=None, grid=True):
'''plot all mass function fields in this observation
'''
import shapely.geometry as geom
fig, ax = self._setup_artist(fig, ax)
# Centre dot
ax.plot(0, 0, 'kx')
# ------------------------------------------------------------------
# Iterate over each PI and it's radial bins
# ------------------------------------------------------------------
for PI, bins in self.mass_func.items():
for rbin in bins:
# ----------------------------------------------------------
# Plot the field using this `Field` slice's own plotting method
# ----------------------------------------------------------
clr = rbin.get("colour", None)
rbin['field'].plot(ax, fc=clr, alpha=0.7, ec='k', label=PI)
# make this label private so it's only added once to legend
PI = f'_{PI}'
# ------------------------------------------------------------------
# If desired, add a "pseudo" grid in the polar projection, at 2
# arcmin intervals, up to the rt
# ------------------------------------------------------------------
# Ensure the gridlines don't affect the axes scaling
ax.autoscale(False)
if grid:
# TODO this should probably use distance to furthest field
rt = self.rt if hasattr(self, 'rt') else (20 << u.arcmin)
ticks = np.arange(2, rt.to_value('arcmin'), 2)
# make sure this grid matches normal grids
grid_kw = {
'color': plt.rcParams.get('grid.color'),
'linestyle': plt.rcParams.get('grid.linestyle'),
'linewidth': plt.rcParams.get('grid.linewidth'),
'alpha': plt.rcParams.get('grid.alpha'),
'zorder': 0.5
}
for gr in ticks:
circle = np.array(geom.Point(0, 0).buffer(gr).exterior.coords).T
gr_line, = ax.plot(*circle, **grid_kw)
ax.annotate(f'{gr:.0f}"', xy=(circle[0].max(), 0),
color=grid_kw['color'])
# ------------------------------------------------------------------
# Try to plot the various radii quantities from this model, if desired
# ------------------------------------------------------------------
# TODO for CI this could be a CI of rh, ra, rt actually (60)
for r_type in radii:
# This is to explicitly avoid very ugly exceptions from geom
if r_type not in {'rh', 'ra', 'rt'}:
mssg = f'radii must be one of {{rh, ra, rt}}, not `{r_type}`'
raise TypeError(mssg)
radius = getattr(self, r_type).to_value('arcmin')
circle = np.array(geom.Point(0, 0).buffer(radius).exterior.coords).T
ax.plot(*circle, ls='--')
ax.text(0, circle[1].max(), r_type)
# ------------------------------------------------------------------
# Add plot labels and legends
# ------------------------------------------------------------------
ax.set_xlabel('RA [arcmin]')
ax.set_ylabel('DEC [arcmin]')
# TODO figure out a better way of handling this always using best? (75)
ax.legend(loc='upper left' if grid else 'best')
return fig
# -----------------------------------------------------------------------
# Model plotting
# -----------------------------------------------------------------------
@_support_units
def plot_density(self, fig=None, ax=None, kind='all', *,
x_unit='pc', label_position='left'):
if kind == 'all':
kind = {'MS', 'tot', 'BH', 'WD', 'NS'}
fig, ax = self._setup_artist(fig, ax)
# ax.set_title('Surface Mass Density')
# Total density
if 'tot' in kind:
kw = {"label": "Total", "color": "tab:cyan"}
self._plot_profile(ax, None, None, self.rho_tot,
x_unit=x_unit, **kw)
# Total Remnant density
if 'rem' in kind:
kw = {"label": "Remnants", "color": "tab:purple"}
self._plot_profile(ax, None, None, self.rho_rem,
x_unit=x_unit, **kw)
# Main sequence density
if 'MS' in kind:
kw = {"label": "Main-sequence stars", "color": "tab:orange"}
self._plot_profile(ax, None, None, self.rho_MS,
x_unit=x_unit, **kw)
if 'WD' in kind:
kw = {"label": "White Dwarfs", "color": "tab:green"}
self._plot_profile(ax, None, None, self.rho_WD,
x_unit=x_unit, **kw)
if 'NS' in kind:
kw = {"label": "Neutron Stars", "color": "tab:red"}
self._plot_profile(ax, None, None, self.rho_NS,
x_unit=x_unit, **kw)
# Black hole density
if 'BH' in kind:
kw = {"label": "Black Holes", "color": "tab:gray"}
self._plot_profile(ax, None, None, self.rho_BH,
x_unit=x_unit, **kw)
ax.set_yscale("log")
ax.set_xscale("log")
self._set_ylabel(ax, 'Mass Density', label_position)
self._set_xlabel(ax)
fig.legend(loc='upper center', ncol=6,
bbox_to_anchor=(0.5, 1.), fancybox=True)
return fig
@_support_units
def plot_surface_density(self, fig=None, ax=None, kind='all', *,
x_unit='pc', label_position='left'):
if kind == 'all':
kind = {'MS', 'tot', 'BH', 'WD', 'NS'}
fig, ax = self._setup_artist(fig, ax)
# ax.set_title('Surface Mass Density')
# Total density
if 'tot' in kind:
kw = {"label": "Total", "color": "tab:cyan"}
self._plot_profile(ax, None, None, self.Sigma_tot,
x_unit=x_unit, **kw)
# Total Remnant density
if 'rem' in kind:
kw = {"label": "Remnants", "color": "tab:purple"}
self._plot_profile(ax, None, None, self.Sigma_rem,
x_unit=x_unit, **kw)
# Main sequence density
if 'MS' in kind:
kw = {"label": "Main-sequence stars", "color": "tab:orange"}
self._plot_profile(ax, None, None, self.Sigma_MS,
x_unit=x_unit, **kw)
if 'WD' in kind:
kw = {"label": "White Dwarfs", "color": "tab:green"}
self._plot_profile(ax, None, None, self.Sigma_WD,
x_unit=x_unit, **kw)
if 'NS' in kind:
kw = {"label": "Neutron Stars", "color": "tab:red"}
self._plot_profile(ax, None, None, self.Sigma_NS,
x_unit=x_unit, **kw)
# Black hole density
if 'BH' in kind:
kw = {"label": "Black Holes", "color": "tab:gray"}
self._plot_profile(ax, None, None, self.Sigma_BH,
x_unit=x_unit, **kw)
ax.set_yscale("log")
ax.set_xscale("log")
self._set_ylabel(ax, 'Surface Mass Density', label_position)
self._set_xlabel(ax)
fig.legend(loc='upper center', ncol=6,
bbox_to_anchor=(0.5, 1.), fancybox=True)
return fig
@_support_units
def plot_cumulative_mass(self, fig=None, ax=None, kind='all', *,
x_unit='pc', label_position='left'):
if kind == 'all':
kind = {'MS', 'tot', 'BH', 'WD', 'NS'}
fig, ax = self._setup_artist(fig, ax)
# ax.set_title('Cumulative Mass')
# Total density
if 'tot' in kind:
kw = {"label": "Total", "color": "tab:cyan"}
self._plot_profile(ax, None, None, self.cum_M_tot,
x_unit=x_unit, **kw)
# Main sequence density
if 'MS' in kind:
kw = {"label": "Main-sequence stars", "color": "tab:orange"}
self._plot_profile(ax, None, None, self.cum_M_MS,
x_unit=x_unit, **kw)
if 'WD' in kind:
kw = {"label": "White Dwarfs", "color": "tab:green"}
self._plot_profile(ax, None, None, self.cum_M_WD,
x_unit=x_unit, **kw)
if 'NS' in kind:
kw = {"label": "Neutron Stars", "color": "tab:red"}
self._plot_profile(ax, None, None, self.cum_M_NS,
x_unit=x_unit, **kw)
# Black hole density
if 'BH' in kind:
kw = {"label": "Black Holes", "color": "tab:gray"}
self._plot_profile(ax, None, None, self.cum_M_BH,
x_unit=x_unit, **kw)
ax.set_yscale("log")
ax.set_xscale("log")
self._set_ylabel(ax, rf'$M_{{enc}}$', label_position)
self._set_xlabel(ax)
fig.legend(loc='upper center', ncol=5,
bbox_to_anchor=(0.5, 1.), fancybox=True)
return fig
@_support_units
def plot_remnant_fraction(self, fig=None, ax=None, *, show_total=True,
x_unit='pc', label_position='left'):
'''Fraction of mass in remnants vs MS stars, like in baumgardt'''
fig, ax = self._setup_artist(fig, ax)
ax.set_title("Remnant Fraction")
ax.set_xscale("log")
self._plot_profile(ax, None, None, self.frac_M_MS,
x_unit=x_unit, label="Main-sequence stars")
self._plot_profile(ax, None, None, self.frac_M_rem,
x_unit=x_unit, label="Remnants")
label = r"Mass fraction $M_{MS}/M_{tot}$, $M_{remn.}/M_{tot}$"
self._set_ylabel(ax, label, label_position)
self._set_xlabel(ax)
ax.set_ylim(0.0, 1.0)
if show_total:
from matplotlib.offsetbox import AnchoredText
tot = AnchoredText(fr'$f_{{\mathrm{{remn}}}}={self.f_rem:.2f}$',
frameon=True, loc='upper center')
tot.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(tot)
ax.legend()
return fig
# -----------------------------------------------------------------------
# Goodness of fit statistics
# -----------------------------------------------------------------------
@_support_units
def _compute_profile_chi2(self, ds_pattern, y_key, model_data, *,
x_key='r', err_transform=None, reduced=True):
'''compute chi2 for this dataset (pattern)'''
chi2 = 0.
# ensure that the data is (mass bin, intervals, r domain)
if len(model_data.shape) != 3:
raise ValueError("invalid model data shape")
# ------------------------------------------------------------------
# Determine the relevant datasets to the given pattern
# ------------------------------------------------------------------
ds_pattern = ds_pattern or ''
datasets = self.obs.filter_datasets(ds_pattern)
# ------------------------------------------------------------------
# Iterate over the datasets, computing chi2 for each
# ------------------------------------------------------------------
for dset in datasets.values():
# --------------------------------------------------------------
# get mass bin of this dataset
# --------------------------------------------------------------
if 'm' in dset.mdata:
m = dset.mdata['m'] * u.Msun
mass_bin = np.where(self.mj == m)[0][0]
else:
mass_bin = self.star_bin
# --------------------------------------------------------------
# get data values
# --------------------------------------------------------------
try:
xdata = dset[x_key] # x_key='r'
ydata = dset[y_key]
except KeyError:
continue
yerr = self._get_err(dset, y_key)
if err_transform is not None:
yerr = err_transform(yerr)
yerr = yerr.to(ydata.unit)
# --------------------------------------------------------------
# get model values
# --------------------------------------------------------------
xmodel = self.r.to(xdata.unit)
ymedian = self._get_median(model_data[mass_bin, :, :])
# TEMPORRAY FIX FOR RATIO
# ymedian[np.isnan(ymedian)] = 1.0 << ymedian.unit
ymodel = util.QuantitySpline(xmodel, ymedian)(xdata).to(ydata.unit)
# --------------------------------------------------------------
# compute chi2
# --------------------------------------------------------------
denom = (ydata.size - 13) if reduced else 1.
chi2 += np.nansum(((ymodel - ydata) / yerr)**2) / denom
return chi2
@_support_units
def _compute_massfunc_chi2(self, *, reduced=True):
chi2 = 0.
# ------------------------------------------------------------------
# Iterate over each PI, gathering data
# ------------------------------------------------------------------
for PI in sorted(self.mass_func,
key=lambda k: self.mass_func[k][0]['r1']):
bins = self.mass_func[PI]
# Get data for this PI
mf = self.obs[PI]
mbin_mean = (mf['m1'] + mf['m2']) / 2.
mbin_width = mf['m2'] - mf['m1']
N = mf['N'] / mbin_width
ΔN = mf['ΔN'] / mbin_width
# --------------------------------------------------------------
# Iterate over radial bin dicts for this PI
# --------------------------------------------------------------
for rind, rbin in enumerate(bins):
# ----------------------------------------------------------
# Get data
# ----------------------------------------------------------
r_mask = ((mf['r1'] == rbin['r1']) & (mf['r2'] == rbin['r2']))
xdata = mbin_mean[r_mask]
ydata = N[r_mask].value
yerr = self.F * ΔN[r_mask].value
# ----------------------------------------------------------
# Get model
# ----------------------------------------------------------
xmodel = rbin['mj']
ymedian = self._get_median(rbin['dNdm'])
ymodel = util.QuantitySpline(xmodel, ymedian)(xdata)
# TODO really should get this Nparam dynamically, if some fixed
denom = (ydata.size - 13) if reduced else 1.
chi2 += np.sum(((ymodel - ydata) / yerr)**2) / denom
return chi2
@property
def chi2(self):
'''compute chi2 between median model and all datasets
Be cognizant that this is only the median model chi2, and not
necessarily useful for actual statistics
'''
# TODO seems to produce alot of infs?
def numdens_nuisance(err):
return np.sqrt(err**2 + (self.s2 << err.unit**2))
all_components = [
{'ds_pattern': '*velocity_dispersion*', 'y_key': 'σ',
'model_data': self.LOS},
{'ds_pattern': '*proper_motion*', 'y_key': 'PM_tot',
'model_data': self.pm_tot},
{'ds_pattern': '*proper_motion*', 'y_key': 'PM_ratio',
'model_data': self.pm_ratio},
{'ds_pattern': '*proper_motion*', 'y_key': 'PM_T',
'model_data': self.pm_T},
{'ds_pattern': '*proper_motion*', 'y_key': 'PM_R',
'model_data': self.pm_R},
{'ds_pattern': '*number_density*', 'y_key': 'Σ',
'model_data': self.numdens, 'err_transform': numdens_nuisance},
]
chi2 = 0.
for comp in all_components:
chi2 += self._compute_profile_chi2(**comp)
chi2 += self._compute_massfunc_chi2()
return chi2
class ModelVisualizer(_ClusterVisualizer):
'''
class for making, showing, saving all the plots related to a single model
'''
@classmethod
def from_chain(cls, chain, observations, method='median'):
'''
create a Visualizer instance based on a chain, y taking the median
of the chain parameters
'''
reduc_methods = {'median': np.median, 'mean': np.mean}
# if 3d (Niters, Nwalkers, Nparams)
# if 2d (Nwalkers, Nparams)
# if 1d (Nparams)
chain = chain.reshape((-1, chain.shape[-1]))
theta = reduc_methods[method](chain, axis=0)
return cls(Model(theta, observations), observations)
@classmethod
def from_theta(cls, theta, observations):
'''
create a Visualizer instance based on a theta, see `Model` for allowed
theta types
'''
return cls(Model(theta, observations), observations)
def __init__(self, model, observations=None):
self.model = model
self.obs = observations if observations else model.observations
self.name = observations.cluster
self.rh = model.rh
self.ra = model.ra
self.rt = model.rt
self.F = model.F
self.s2 = model.theta['s2']
self.d = model.d
self.r = model.r
self.rlims = (9e-3, self.r.max().value + 5) << self.r.unit
self._2πr = 2 * np.pi * model.r
self.star_bin = model.nms - 1
self.mj = model.mj
self.f_rem = np.sum(model.Mj[model._remnant_bins]) / model.M
self.LOS = np.sqrt(self.model.v2pj)[:, np.newaxis, :]
self.pm_T = np.sqrt(model.v2Tj)[:, np.newaxis, :]
self.pm_R = np.sqrt(model.v2Rj)[:, np.newaxis, :]
self.pm_tot = np.sqrt(0.5 * (self.pm_T**2 + self.pm_R**2))
self.pm_ratio = self.pm_T / self.pm_R
self._init_numdens(model, observations)
self._init_massfunc(model, observations)
self._init_surfdens(model, observations)
self._init_dens(model, observations)
self._init_mass_frac(model, observations)
self._init_cum_mass(model, observations)
# TODO alot of these init functions could be more homogenous
@_ClusterVisualizer._support_units
def _init_numdens(self, model, observations):
model_nd = model.Sigmaj / model.mj[:, np.newaxis]
nd = np.empty(model_nd.shape)[:, np.newaxis, :] << model_nd.unit
# Check for observational numdens profiles, to compute scaling factor K
if obs_nd := observations.filter_datasets('*number_density'):
if len(obs_nd) > 1:
mssg = ('Too many number density datasets, '
'computing scaling factor using only final dataset')
logging.warning(mssg)
obs_nd = list(obs_nd.values())[-1]
obs_r = obs_nd['r'].to(model.r.unit)
for mbin in range(model_nd.shape[0]):
nd_interp = util.QuantitySpline(model.r, model_nd[mbin, :])
K = (np.nansum(obs_nd['Σ'] * nd_interp(obs_r) / obs_nd['Σ']**2)
/ np.nansum(nd_interp(obs_r)**2 / obs_nd['Σ']**2))
nd[mbin, 0, :] = K * model_nd[mbin, :]
else:
mssg = 'No number density datasets found, setting K=1'
logging.info(mssg)
nd[:, 0, :] = model_nd[:, :]
self.numdens = nd
@_ClusterVisualizer._support_units
def _init_massfunc(self, model, observations, *, cmap=None):
'''
sets self.mass_func as a dict of PI's, where each PI has a list of
subdicts. Each subdict represents a single radial slice (within this PI)
and contains the radii, the mass func values, and the field slice
'''
cmap = cmap or plt.cm.rainbow
self.mass_func = {}
cen = (observations.mdata['RA'], observations.mdata['DEC'])
PI_list = observations.filter_datasets('*mass_function*')
densityj = [util.QuantitySpline(model.r, model.Sigmaj[j])
for j in range(model.nms)]
for i, (key, mf) in enumerate(PI_list.items()):
self.mass_func[key] = []
clr = cmap(i / len(PI_list))
field = mass.Field.from_dataset(mf, cen=cen)
rbins = np.unique(np.c_[mf['r1'], mf['r2']], axis=0)
rbins.sort(axis=0)
for r_in, r_out in rbins:
this_slc = {'r1': r_in, 'r2': r_out}
field_slice = field.slice_radially(r_in, r_out)
this_slc['field'] = field_slice
this_slc['colour'] = clr
this_slc['dNdm'] = np.empty((1, model.nms))
this_slc['mj'] = model.mj[:model.nms]
sample_radii = field_slice.MC_sample(300).to(u.pc)
for j in range(model.nms):
Nj = field_slice.MC_integrate(densityj[j], sample_radii)
widthj = (model.mj[j] * model.mes_widths[j])
this_slc['dNdm'][0, j] = (Nj / widthj).value
self.mass_func[key].append(this_slc)
@_ClusterVisualizer._support_units
def _init_dens(self, model, observations):
shp = (np.newaxis, np.newaxis, slice(None))
self.rho_tot = np.sum(model.rhoj, axis=0)[shp]
self.rho_MS = np.sum(model.rhoj[model._star_bins], axis=0)[shp]
self.rho_rem = np.sum(model.rhoj[model._remnant_bins], axis=0)[shp]
self.rho_BH = np.sum(model.BH_rhoj, axis=0)[shp]
self.rho_WD = np.sum(model.WD_rhoj, axis=0)[shp]
self.rho_NS = np.sum(model.NS_rhoj, axis=0)[shp]
@_ClusterVisualizer._support_units
def _init_surfdens(self, model, observations):
shp = (np.newaxis, np.newaxis, slice(None))
self.Sigma_tot = np.sum(model.Sigmaj, axis=0)[shp]
self.Sigma_MS = np.sum(model.Sigmaj[model._star_bins], axis=0)[shp]
self.Sigma_rem = np.sum(model.Sigmaj[model._remnant_bins], axis=0)[shp]
self.Sigma_BH = np.sum(model.BH_Sigmaj, axis=0)[shp]
self.Sigma_WD = np.sum(model.WD_Sigmaj, axis=0)[shp]
self.Sigma_NS = np.sum(model.NS_Sigmaj, axis=0)[shp]
@_ClusterVisualizer._support_units
def _init_mass_frac(self, model, observations):
int_MS = util.QuantitySpline(self.r, self._2πr * self.Sigma_MS)
int_rem = util.QuantitySpline(self.r, self._2πr * self.Sigma_rem)
int_tot = util.QuantitySpline(self.r, self._2πr * self.Sigma_tot)
mass_MS = np.empty((1, 1, self.r.size))
mass_rem = np.empty((1, 1, self.r.size))
mass_tot = np.empty((1, 1, self.r.size))
# TODO the rbins at the end always mess up fractional stuff, drop to 0
mass_MS[0, 0, 0] = mass_rem[0, 0, 0] = mass_tot[0, 0, 0] = np.nan
for i in range(1, self.r.size - 2):
mass_MS[0, 0, i] = int_MS.integral(self.r[i], self.r[i + 1]).value
mass_rem[0, 0, i] = int_rem.integral(self.r[i], self.r[i + 1]).value
mass_tot[0, 0, i] = int_tot.integral(self.r[i], self.r[i + 1]).value
self.frac_M_MS = mass_MS / mass_tot
self.frac_M_rem = mass_rem / mass_tot
@_ClusterVisualizer._support_units
def _init_cum_mass(self, model, observations):
int_tot = util.QuantitySpline(self.r, self._2πr * self.Sigma_tot)
int_MS = util.QuantitySpline(self.r, self._2πr * self.Sigma_MS)
int_BH = util.QuantitySpline(self.r, self._2πr * self.Sigma_BH)
int_WD = util.QuantitySpline(self.r, self._2πr * self.Sigma_WD)
int_NS = util.QuantitySpline(self.r, self._2πr * self.Sigma_NS)
cum_tot = np.empty((1, 1, self.r.size)) << u.Msun
cum_MS = np.empty((1, 1, self.r.size)) << u.Msun
cum_BH = np.empty((1, 1, self.r.size)) << u.Msun
cum_WD = np.empty((1, 1, self.r.size)) << u.Msun
cum_NS = np.empty((1, 1, self.r.size)) << u.Msun
for i in range(0, self.r.size):
cum_tot[0, 0, i] = int_tot.integral(model.r[0], model.r[i])
cum_MS[0, 0, i] = int_MS.integral(model.r[0], model.r[i])
cum_BH[0, 0, i] = int_BH.integral(model.r[0], model.r[i])
cum_WD[0, 0, i] = int_WD.integral(model.r[0], model.r[i])
cum_NS[0, 0, i] = int_NS.integral(model.r[0], model.r[i])
self.cum_M_tot = cum_tot
self.cum_M_MS = cum_MS
self.cum_M_WD = cum_WD
self.cum_M_NS = cum_NS
self.cum_M_BH = cum_BH
class CIModelVisualizer(_ClusterVisualizer):
'''
class for making, showing, saving all the plots related to a bunch of models
in the form of confidence intervals
'''
@_ClusterVisualizer._support_units
def plot_f_rem(self, fig=None, ax=None, bins='auto', color='b'):
fig, ax = self._setup_artist(fig, ax)
color = mpl_clr.to_rgb(color)
facecolor = color + (0.33, )
ax.hist(self.f_rem, histtype='stepfilled',
bins=bins, ec=color, fc=facecolor, lw=2)
return fig
@_ClusterVisualizer._support_units
def plot_BH_mass(self, fig=None, ax=None, bins='auto', color='b'):
fig, ax = self._setup_artist(fig, ax)
color = mpl_clr.to_rgb(color)
facecolor = color + (0.33, )
ax.hist(self.BH_mass, histtype='stepfilled',
bins=bins, ec=color, fc=facecolor, lw=2)
return fig
@_ClusterVisualizer._support_units
def plot_BH_num(self, fig=None, ax=None, bins='auto', color='b'):
fig, ax = self._setup_artist(fig, ax)
color = mpl_clr.to_rgb(color)
facecolor = color + (0.33, )
ax.hist(self.BH_num, histtype='stepfilled',
bins=bins, ec=color, fc=facecolor, lw=2)
return fig
def __init__(self, observations):
self.obs = observations
self.name = observations.cluster
@classmethod
def from_chain(cls, chain, observations, N=100, *,
verbose=False, pool=None):
import functools
viz = cls(observations)
viz.N = N
# ------------------------------------------------------------------
# Get info about the chain and set of models
# ------------------------------------------------------------------
# Flatten walkers, if not already
chain = chain.reshape((-1, chain.shape[-1]))[-N:]
median_chain = np.median(chain, axis=0)
# TODO get these indices more dynamically
viz.F = median_chain[7]
viz.s2 = median_chain[6]
viz.d = median_chain[12] << u.kpc
# Setup the radial domain to interpolate everything onto
# We estimate the maximum radius needed will be given by the model with
# the largest value of the truncation parameter "g". This should be a
# valid enough assumption for our needs. While we have it, we'll also
# use this model to grab the other values we need, which shouldn't
# change much between models, so using this extreme model is okay.
# warning: in very large N samples, this g might be huge, and lead to a
# very large rt. I'm not really sure yet how that might affect the CIs
# or plots
# TODO https://github.com/nmdickson/GCfit/issues/100
huge_model = Model(chain[np.argmax(chain[:, 4])], viz.obs)
viz.rt = huge_model.rt
viz.r = np.r_[0, np.geomspace(1e-5, viz.rt.value, num=99)] << u.pc
viz.rlims = (9e-3, viz.r.max().value + 5) << viz.r.unit
# Assume that this example model has same nms bin as all models
# This approximation isn't exactly correct (especially when Ndot != 0),
# but close enough for plots
viz.star_bin = 0
# mj only contains nms and tracer bins (the only ones we plot anyways)
mj_MS = huge_model.mj[huge_model._star_bins][-1]
mj_tracer = huge_model.mj[huge_model._tracer_bins]
viz.mj = np.r_[mj_MS, mj_tracer]
# ------------------------------------------------------------------
# Setup the final full parameters arrays with dims of
# [mass bins, intervals (from percentile of models), radial bins] for
# all "profile" datasets
# ------------------------------------------------------------------
Nr = viz.r.size
# velocities
vel_unit = np.sqrt(huge_model.v2Tj).unit
Nm = 1 + len(mj_tracer)
vpj = np.full((Nm, N, Nr), np.nan) << vel_unit
vTj, vRj, vtotj = vpj.copy(), vpj.copy(), vpj.copy()
vaj = np.full((Nm, N, Nr), np.nan) << u.dimensionless_unscaled
# mass density
rho_unit = huge_model.rhoj.unit
rho_tot = np.full((1, N, Nr), np.nan) << rho_unit
rho_MS, rho_BH = rho_tot.copy(), rho_tot.copy()
rho_WD, rho_NS = rho_tot.copy(), rho_tot.copy()
# surface density
Sigma_unit = huge_model.Sigmaj.unit
Sigma_tot = np.full((1, N, Nr), np.nan) << Sigma_unit
Sigma_MS, Sigma_BH = Sigma_tot.copy(), Sigma_tot.copy()
Sigma_WD, Sigma_NS = Sigma_tot.copy(), Sigma_tot.copy()
# Cumulative mass
mass_unit = huge_model.M.unit
cum_M_tot = np.full((1, N, Nr), np.nan) << mass_unit
cum_M_MS, cum_M_BH = cum_M_tot.copy(), cum_M_tot.copy()
cum_M_WD, cum_M_NS = cum_M_tot.copy(), cum_M_tot.copy()
# Mass Fraction
frac_M_MS = np.full((1, N, Nr), np.nan) << u.dimensionless_unscaled
frac_M_rem = frac_M_MS.copy()
f_rem = np.full(N, np.nan) << u.dimensionless_unscaled
# number density
numdens = np.full((1, N, Nr), np.nan) << u.pc**-2
# mass function
massfunc = viz._prep_massfunc(viz.obs)
# massfunc = np.empty((N, N_rbins, huge_model.nms))
for rbins in massfunc.values():
for rslice in rbins:
rslice['mj'] = huge_model.mj[:huge_model.nms]
rslice['dNdm'] = np.full((N, huge_model.nms), np.nan)
# BH mass
BH_mass = np.full(N, np.nan) << u.Msun
BH_num = np.full(N, np.nan) << u.dimensionless_unscaled
# ------------------------------------------------------------------
# Setup iteration and pooling
# ------------------------------------------------------------------
get_model = functools.partial(_get_model, observations=viz.obs)
try:
_map = map if pool is None else pool.imap_unordered
except AttributeError:
mssg = ("Invalid pool, currently only support pools with an "
"`imap_unordered` method")
raise ValueError(mssg)
if verbose:
import tqdm
loader = tqdm.tqdm(enumerate(_map(get_model, chain)), total=N)
else:
loader = enumerate(_map(get_model, chain))
# ------------------------------------------------------------------
# iterate over all models in the sample and compute/store their
# relevant parameters
# ------------------------------------------------------------------
for model_ind, model in loader:
if model is None:
# TODO would be better to extend chain so N are still computed
# for now this ind will be filled with nan
continue
equivs = util.angular_width(model.d)
# Velocities
# convoluted way of going from a slice to a list of indices
tracers = list(range(len(model.mj))[model._tracer_bins])
for i, mass_bin in enumerate([model.nms - 1] + tracers):
slc = (i, model_ind, slice(None))
vTj[slc], vRj[slc], vtotj[slc], \
vaj[slc], vpj[slc] = viz._init_velocities(model, mass_bin)
slc = (0, model_ind, slice(None))
# Mass Densities
rho_MS[slc], rho_tot[slc], rho_BH[slc], \
rho_WD[slc], rho_NS[slc] = viz._init_dens(model)
# Surface Densities
Sigma_MS[slc], Sigma_tot[slc], Sigma_BH[slc], \
Sigma_WD[slc], Sigma_NS[slc] = viz._init_surfdens(model)
# Cumulative Mass distribution
cum_M_MS[slc], cum_M_tot[slc], cum_M_BH[slc], \
cum_M_WD[slc], cum_M_NS[slc] = viz._init_cum_mass(model)
# Number Densities
numdens[slc] = viz._init_numdens(model, equivs=equivs)
# Mass Functions
for rbins in massfunc.values():
for rslice in rbins:
mf = rslice['dNdm']
mf[model_ind, ...] = viz._init_dNdm(model, rslice, equivs)
# Mass Fractions
frac_M_MS[slc], frac_M_rem[slc] = viz._init_mass_frac(model)
f_rem[model_ind] = np.sum(model.Mj[model._remnant_bins]) / model.M
# Black holes
BH_mass[model_ind] = np.sum(model.BH_Mj)
BH_num[model_ind] = np.sum(model.BH_Nj)
# ------------------------------------------------------------------
# compute and store the percentiles and medians
# ------------------------------------------------------------------
q = [97.72, 84.13, 50., 15.87, 2.28]
axes = (1, 0, 2) # `np.percentile` messes up the dimensions
perc = np.nanpercentile
viz.pm_T = np.transpose(perc(vTj, q, axis=1), axes)
viz.pm_R = np.transpose(perc(vRj, q, axis=1), axes)
viz.pm_tot = np.transpose(perc(vtotj, q, axis=1), axes)
viz.pm_ratio = np.transpose(perc(vaj, q, axis=1), axes)
viz.LOS = np.transpose(perc(vpj, q, axis=1), axes)
viz.rho_MS = np.transpose(perc(rho_MS, q, axis=1), axes)
viz.rho_tot = np.transpose(perc(rho_tot, q, axis=1), axes)
viz.rho_BH = np.transpose(perc(rho_BH, q, axis=1), axes)
viz.rho_WD = np.transpose(perc(rho_WD, q, axis=1), axes)
viz.rho_NS = np.transpose(perc(rho_NS, q, axis=1), axes)
viz.Sigma_MS = np.transpose(perc(Sigma_MS, q, axis=1), axes)
viz.Sigma_tot = np.transpose(perc(Sigma_tot, q, axis=1), axes)
viz.Sigma_BH = np.transpose(perc(Sigma_BH, q, axis=1), axes)
viz.Sigma_WD = np.transpose(perc(Sigma_WD, q, axis=1), axes)
viz.Sigma_NS = np.transpose(perc(Sigma_NS, q, axis=1), axes)
viz.cum_M_MS = np.transpose(perc(cum_M_MS, q, axis=1), axes)
viz.cum_M_tot = np.transpose(perc(cum_M_tot, q, axis=1), axes)
viz.cum_M_BH = np.transpose(perc(cum_M_BH, q, axis=1), axes)
viz.cum_M_WD = np.transpose(perc(cum_M_WD, q, axis=1), axes)
viz.cum_M_NS = np.transpose(perc(cum_M_NS, q, axis=1), axes)
viz.numdens = np.transpose(perc(numdens, q, axis=1), axes)
viz.mass_func = massfunc
for rbins in viz.mass_func.values():
for rslice in rbins:
rslice['dNdm'] = perc(rslice['dNdm'], q, axis=0)
viz.frac_M_MS = perc(frac_M_MS, q, axis=1)
viz.frac_M_rem = perc(frac_M_rem, q, axis=1)
viz.f_rem = f_rem
viz.BH_mass = BH_mass
viz.BH_num = BH_num
return viz
def _init_velocities(self, model, mass_bin):
vT = np.sqrt(model.v2Tj[mass_bin])
vT_interp = util.QuantitySpline(model.r, vT)
vT = vT_interp(self.r)
vR = np.sqrt(model.v2Rj[mass_bin])
vR_interp = util.QuantitySpline(model.r, vR)
vR = vR_interp(self.r)
vtot = np.sqrt(0.5 * (model.v2Tj[mass_bin] + model.v2Rj[mass_bin]))
vtot_interp = util.QuantitySpline(model.r, vtot)
vtot = vtot_interp(self.r)
va = np.sqrt(model.v2Tj[mass_bin] / model.v2Rj[mass_bin])
finite = np.isnan(va)
va_interp = util.QuantitySpline(model.r[~finite], va[~finite], ext=3)
va = va_interp(self.r)
vp = np.sqrt(model.v2pj[mass_bin])
vp_interp = util.QuantitySpline(model.r, vp)
vp = vp_interp(self.r)
return vT, vR, vtot, va, vp
def _init_dens(self, model):
rho_MS = np.sum(model.rhoj[:model.nms], axis=0)
rho_MS_interp = util.QuantitySpline(model.r, rho_MS)
rho_MS = rho_MS_interp(self.r)
rho_tot = np.sum(model.rhoj, axis=0)
rho_tot_interp = util.QuantitySpline(model.r, rho_tot)
rho_tot = rho_tot_interp(self.r)
rho_BH = np.sum(model.BH_rhoj, axis=0)
rho_BH_interp = util.QuantitySpline(model.r, rho_BH)
rho_BH = rho_BH_interp(self.r)
rho_WD = np.sum(model.WD_rhoj, axis=0)
rho_WD_interp = util.QuantitySpline(model.r, rho_WD)
rho_WD = rho_WD_interp(self.r)
rho_NS = np.sum(model.NS_rhoj, axis=0)
rho_NS_interp = util.QuantitySpline(model.r, rho_NS)
rho_NS = rho_NS_interp(self.r)
return rho_MS, rho_tot, rho_BH, rho_WD, rho_NS
def _init_surfdens(self, model):
Sigma_MS = np.sum(model.Sigmaj[:model.nms], axis=0)
Sigma_MS_interp = util.QuantitySpline(model.r, Sigma_MS)
Sigma_MS = Sigma_MS_interp(self.r)
Sigma_tot = np.sum(model.Sigmaj, axis=0)
Sigma_tot_interp = util.QuantitySpline(model.r, Sigma_tot)
Sigma_tot = Sigma_tot_interp(self.r)
Sigma_BH = np.sum(model.BH_Sigmaj, axis=0)
Sigma_BH_interp = util.QuantitySpline(model.r, Sigma_BH)
Sigma_BH = Sigma_BH_interp(self.r)
Sigma_WD = np.sum(model.WD_Sigmaj, axis=0)
Sigma_WD_interp = util.QuantitySpline(model.r, Sigma_WD)
Sigma_WD = Sigma_WD_interp(self.r)
Sigma_NS = np.sum(model.NS_Sigmaj, axis=0)
Sigma_NS_interp = util.QuantitySpline(model.r, Sigma_NS)
Sigma_NS = Sigma_NS_interp(self.r)
return Sigma_MS, Sigma_tot, Sigma_BH, Sigma_WD, Sigma_NS
def _init_cum_mass(self, model):
# TODO it seems like the integrated mass is a bit less than total Mj?
_2πr = 2 * np.pi * model.r
cum_M_MS = _2πr * np.sum(model.Sigmaj[:model.nms], axis=0)
cum_M_MS_interp = util.QuantitySpline(model.r, cum_M_MS)
cum_M_MS = [cum_M_MS_interp.integral(self.r[0], ri) for ri in self.r]
cum_M_tot = _2πr * n
|
p.sum(model.Sigmaj, axis=0)
|
numpy.sum
|
#!/usr/bin/env python
"""Implementation of dataset structure for dataloader from pytorch.
Two ways of training: with ground truth labels and with labels from current
segmentation given the algorithm"""
__all__ = ['load_data']
__author__ = '<NAME>'
__date__ = 'August 2018'
from torch.utils.data import Dataset
import torch
import numpy as np
import re
import logging
from ute.utils.mapping import GroundTruth
from ute.utils.arg_pars import opt
from ute.utils.logging_setup import logger
from ute.utils.util_functions import join_data
class FeatureDataset(Dataset):
def __init__(self, root_dir, end, subaction='coffee', videos=None,
features=None):
"""
Filling out the _feature_list parameter. This is a list of [video name,
index frame in video, ground truth, feature vector] for each frame of each video
:param root_dir: root directory where exists folder 'ascii' with features or pure
video files
:param end: extension of files for current video representation (could be 'gz',
'txt', 'avi')
"""
self._logger = logging.getLogger('basic')
self._logger.debug('FeatureDataset')
self._root_dir = root_dir
self._feature_list = None
self._end = end
self._old2new = {}
self._videoname2idx = {}
self._idx2videoname = {}
self._videos = videos
self._subaction = subaction
self._features = features
self.gt_map = GroundTruth()
self._with_predictions()
subactions = np.unique(self._feature_list[..., 2])
for idx, old in enumerate(subactions):
self._old2new[int(old)] = idx
def index2name(self):
return self._idx2videoname
def _with_predictions(self):
self._logger.debug('__init__')
for video_idx, video in enumerate(self._videos):
filename = re.match(r'[\.\/\w]*\/(\w+).\w+', video.path)
if filename is None:
logging.ERROR('Check paths videos, template to extract video name'
' does not match')
filename = filename.group(1)
self._videoname2idx[filename] = video_idx
self._idx2videoname[video_idx] = filename
names = np.asarray([video_idx] * video.n_frames).reshape((-1, 1))
idxs = np.asarray(list(range(0, video.n_frames))).reshape((-1, 1))
if opt.gt_training:
gt_file =
|
np.asarray(video._gt)
|
numpy.asarray
|
import time
from collections import deque
import erdos
import numpy as np
import pylot.prediction.utils
from pylot.prediction.messages import PredictionMessage
from pylot.prediction.obstacle_prediction import ObstaclePrediction
from pylot.utils import Location, Transform, time_epoch_ms
import torch
try:
from pylot.prediction.prediction.r2p2.r2p2_model import R2P2
except ImportError:
raise Exception('Error importing R2P2.')
class R2P2PredictorOperator(erdos.Operator):
"""Wrapper operator for R2P2 ego-vehicle prediction module.
Args:
point_cloud_stream (:py:class:`erdos.ReadStream`, optional): Stream on
which point cloud messages are received.
tracking_stream (:py:class:`erdos.ReadStream`):
Stream on which
:py:class:`~pylot.perception.messages.ObstacleTrajectoriesMessage`
are received.
prediction_stream (:py:class:`erdos.ReadStream`): Stream on which
:py:class:`~pylot.prediction.messages.PredictionMessage`
messages are published.
lidar_setup (:py:class:`pylot.drivers.sensor_setup.LidarSetup`): Setup
of the lidar. This setup is used to get the maximum range of the
lidar.
"""
def __init__(self, point_cloud_stream, tracking_stream, prediction_stream,
flags, lidar_setup):
print("WARNING: R2P2 predicts only vehicle trajectories")
self._logger = erdos.utils.setup_logging(self.config.name,
self.config.log_file_name)
self._csv_logger = erdos.utils.setup_csv_logging(
self.config.name + '-csv', self.config.csv_log_file_name)
self._flags = flags
self._device = torch.device('cuda')
self._r2p2_model = R2P2().to(self._device)
state_dict = torch.load(flags.r2p2_model_path)
self._r2p2_model.load_state_dict(state_dict)
point_cloud_stream.add_callback(self.on_point_cloud_update)
tracking_stream.add_callback(self.on_trajectory_update)
erdos.add_watermark_callback([point_cloud_stream, tracking_stream],
[prediction_stream], self.on_watermark)
self._lidar_setup = lidar_setup
self._point_cloud_msgs = deque()
self._tracking_msgs = deque()
@staticmethod
def connect(point_cloud_stream, tracking_stream):
prediction_stream = erdos.WriteStream()
return [prediction_stream]
def destroy(self):
self._logger.warn('destroying {}'.format(self.config.name))
def on_watermark(self, timestamp, prediction_stream):
self._logger.debug('@{}: received watermark'.format(timestamp))
if timestamp.is_top:
return
point_cloud_msg = self._point_cloud_msgs.popleft()
tracking_msg = self._tracking_msgs.popleft()
start_time = time.time()
nearby_trajectories, nearby_vehicle_ego_transforms, \
nearby_trajectories_tensor, binned_lidars_tensor = \
self._preprocess_input(tracking_msg, point_cloud_msg)
num_predictions = len(nearby_trajectories)
self._logger.info(
'@{}: Getting R2P2 predictions for {} vehicles'.format(
timestamp, num_predictions))
if num_predictions == 0:
prediction_stream.send(PredictionMessage(timestamp, []))
return
# Run the forward pass.
z = torch.tensor(
np.random.normal(size=(num_predictions,
self._flags.prediction_num_future_steps,
2))).to(torch.float32).to(self._device)
model_start_time = time.time()
prediction_array, _ = self._r2p2_model.forward(
z, nearby_trajectories_tensor, binned_lidars_tensor)
model_runtime = (time.time() - model_start_time) * 1000
self._csv_logger.debug("{},{},{},{:.4f}".format(
time_epoch_ms(), timestamp.coordinates[0],
'r2p2-modelonly-runtime', model_runtime))
prediction_array = prediction_array.cpu().detach().numpy()
obstacle_predictions_list = self._postprocess_predictions(
prediction_array, nearby_trajectories,
nearby_vehicle_ego_transforms)
runtime = (time.time() - start_time) * 1000
self._csv_logger.debug("{},{},{},{:.4f}".format(
time_epoch_ms(), timestamp.coordinates[0], 'r2p2-runtime',
runtime))
prediction_stream.send(
PredictionMessage(timestamp, obstacle_predictions_list))
def _preprocess_input(self, tracking_msg, point_cloud_msg):
nearby_vehicle_trajectories, nearby_vehicle_ego_transforms = \
tracking_msg.get_nearby_obstacles_info(
self._flags.prediction_radius,
lambda t: t.obstacle.is_vehicle())
point_cloud = point_cloud_msg.point_cloud.points
num_nearby_vehicles = len(nearby_vehicle_trajectories)
if num_nearby_vehicles == 0:
return [], [], [], []
# Pad and rotate the trajectory of each nearby vehicle to its
# coordinate frame. Also, remove the z-coordinate of the trajectory.
nearby_trajectories_tensor = [] # Pytorch tensor for network input.
for i in range(num_nearby_vehicles):
cur_trajectory = nearby_vehicle_trajectories[
i].get_last_n_transforms(self._flags.prediction_num_past_steps)
cur_trajectory = np.stack(
[[point.location.x, point.location.y, point.location.z]
for point in cur_trajectory])
rotated_trajectory = nearby_vehicle_ego_transforms[
i].inverse_transform_points(cur_trajectory)[:, :2]
nearby_trajectories_tensor.append(rotated_trajectory)
nearby_trajectories_tensor =
|
np.stack(nearby_trajectories_tensor)
|
numpy.stack
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for spectral_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.signal import spectral_ops
from tensorflow.python.ops.signal import window_ops
from tensorflow.python.platform import test
class SpectralOpsTest(test.TestCase):
@staticmethod
def _np_hann_periodic_window(length):
if length == 1:
return np.ones(1)
odd = length % 2
if not odd:
length += 1
window = 0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))
if not odd:
window = window[:-1]
return window
@staticmethod
def _np_frame(data, window_length, hop_length):
num_frames = 1 + int(np.floor((len(data) - window_length) // hop_length))
shape = (num_frames, window_length)
strides = (data.strides[0] * hop_length, data.strides[0])
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@staticmethod
def _np_stft(data, fft_length, hop_length, window_length):
frames = SpectralOpsTest._np_frame(data, window_length, hop_length)
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return np.fft.rfft(frames * window, fft_length)
@staticmethod
def _np_inverse_stft(stft, fft_length, hop_length, window_length):
frames = np.fft.irfft(stft, fft_length)
# Pad or truncate frames's inner dimension to window_length.
frames = frames[..., :window_length]
frames = np.pad(frames, [[0, 0]] * (frames.ndim - 1) +
[[0, max(0, window_length - frames.shape[-1])]], "constant")
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return SpectralOpsTest._np_overlap_add(frames * window, hop_length)
@staticmethod
def _np_overlap_add(stft, hop_length):
num_frames, window_length = np.shape(stft)
# Output length will be one complete window, plus another hop_length's
# worth of points for each additional window.
output_length = window_length + (num_frames - 1) * hop_length
output = np.zeros(output_length)
for i in range(num_frames):
output[i * hop_length:i * hop_length + window_length] += stft[i,]
return output
def _compare(self, signal, frame_length, frame_step, fft_length):
with self.cached_session(use_gpu=True) as sess:
actual_stft = spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
signal_ph = array_ops.placeholder(dtype=dtypes.as_dtype(signal.dtype))
actual_stft_from_ph = spectral_ops.stft(
signal_ph, frame_length, frame_step, fft_length, pad_end=False)
actual_inverse_stft = spectral_ops.inverse_stft(
actual_stft, frame_length, frame_step, fft_length)
actual_stft, actual_stft_from_ph, actual_inverse_stft = sess.run(
[actual_stft, actual_stft_from_ph, actual_inverse_stft],
feed_dict={signal_ph: signal})
actual_stft_ph = array_ops.placeholder(dtype=actual_stft.dtype)
actual_inverse_stft_from_ph = sess.run(
spectral_ops.inverse_stft(
actual_stft_ph, frame_length, frame_step, fft_length),
feed_dict={actual_stft_ph: actual_stft})
# Confirm that there is no difference in output when shape/rank is fully
# unknown or known.
self.assertAllClose(actual_stft, actual_stft_from_ph)
self.assertAllClose(actual_inverse_stft, actual_inverse_stft_from_ph)
expected_stft = SpectralOpsTest._np_stft(
signal, fft_length, frame_step, frame_length)
self.assertAllClose(expected_stft, actual_stft, 1e-4, 1e-4)
expected_inverse_stft = SpectralOpsTest._np_inverse_stft(
expected_stft, fft_length, frame_step, frame_length)
self.assertAllClose(
expected_inverse_stft, actual_inverse_stft, 1e-4, 1e-4)
def test_shapes(self):
with self.session(use_gpu=True):
signal = np.zeros((512,)).astype(np.float32)
# If fft_length is not provided, the smallest enclosing power of 2 of
# frame_length (8) is used.
stft = spectral_ops.stft(signal, frame_length=7, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
fft_length=16, pad_end=True)
self.assertAllEqual([64, 9], stft.shape.as_list())
self.assertAllEqual([64, 9], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=16, frame_step=8,
fft_length=8, pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = np.zeros((32, 9)).astype(np.complex64)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length=8,
fft_length=16, frame_step=8)
expected_length = (stft.shape[0] - 1) * 8 + 8
self.assertAllEqual([256], inverse_stft.shape.as_list())
self.assertAllEqual([expected_length], self.evaluate(inverse_stft).shape)
def test_stft_and_inverse_stft(self):
"""Test that spectral_ops.stft/inverse_stft match a NumPy implementation."""
# Tuples of (signal_length, frame_length, frame_step, fft_length).
test_configs = [
(512, 64, 32, 64),
(512, 64, 64, 64),
(512, 72, 64, 64),
(512, 64, 25, 64),
(512, 25, 15, 36),
(123, 23, 5, 42),
]
for signal_length, frame_length, frame_step, fft_length in test_configs:
signal = np.random.random(signal_length).astype(np.float32)
self._compare(signal, frame_length, frame_step, fft_length)
def test_stft_round_trip(self):
# Tuples of (signal_length, frame_length, frame_step, fft_length,
# threshold, corrected_threshold).
test_configs = [
# 87.5% overlap.
(4096, 256, 32, 256, 1e-5, 1e-6),
# 75% overlap.
(4096, 256, 64, 256, 1e-5, 1e-6),
# Odd frame hop.
(4096, 128, 25, 128, 1e-3, 1e-6),
# Odd frame length.
(4096, 127, 32, 128, 1e-3, 1e-6),
# 50% overlap.
(4096, 128, 64, 128, 0.40, 1e-6),
]
for (signal_length, frame_length, frame_step, fft_length, threshold,
corrected_threshold) in test_configs:
# Generate a random white Gaussian signal.
signal = random_ops.random_normal([signal_length])
with self.cached_session(use_gpu=True) as sess:
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,
pad_end=False)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,
fft_length)
inverse_stft_corrected = spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length,
window_fn=spectral_ops.inverse_stft_window_fn(frame_step))
signal, inverse_stft, inverse_stft_corrected = sess.run(
[signal, inverse_stft, inverse_stft_corrected])
# Truncate signal to the size of inverse stft.
signal = signal[:inverse_stft.shape[0]]
# Ignore the frame_length samples at either edge.
signal = signal[frame_length:-frame_length]
inverse_stft = inverse_stft[frame_length:-frame_length]
inverse_stft_corrected = inverse_stft_corrected[
frame_length:-frame_length]
# Check that the inverse and original signal are close up to a scale
# factor.
inverse_stft_scaled = inverse_stft / np.mean(np.abs(inverse_stft))
signal_scaled = signal / np.mean(
|
np.abs(signal)
|
numpy.abs
|
"""
Data getters:
δ Lyr Cluster:
get_deltalyr_kc19_gaia_data
get_deltalyr_kc19_comovers
get_deltalyr_kc19_cleansubset
get_autorotation_dataframe
Kepler 1627:
get_kep1627_kepler_lightcurve
get_kep1627_muscat_lightcuve
get_keplerfieldfootprint_dict
get_flare_df
get_becc_limits
get_koi7368_lightcurve
Get cluster datasets (useful for HR diagrams!):
get_gaia_catalog_of_nearby_stars
get_clustermembers_cg18_subset
get_mutau_members
get_ScoOB2_members
get_BPMG_members
get_gaia_catalog_of_nearby_stars
Supplement a set of Gaia stars with extinctions and corrected photometry:
supplement_gaia_stars_extinctions_corrected_photometry
Supplement columns:
append_phot_binary_column
append_phot_membershipexclude_column
Clean Gaia sources based on photometry:
get_clean_gaia_photometric_sources
All-sky photometric surveys (GALEX/2MASS)
get_galex_data
get_2mass_data
Proposal/RM-related:
get_simulated_RM_data
One-offs to get the Stephenson-1 and RSG-5 information:
get_candidate_stephenson1_member_list
get_candidate_rsg5_member_list
supplement_sourcelist_with_gaiainfo
"""
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import os, collections, pickle
import numpy as np, pandas as pd
from glob import glob
from copy import deepcopy
from datetime import datetime
from collections import OrderedDict
from numpy import array as nparr
from astropy.io import fits
from astropy import units as u
from astropy.table import Table
from astroquery.vizier import Vizier
from astroquery.xmatch import XMatch
from astropy.coordinates import SkyCoord
import cdips.utils.lcutils as lcu
import cdips.lcproc.detrend as dtr
import cdips.lcproc.mask_orbit_edges as moe
from cdips.utils.catalogs import (
get_cdips_catalog, get_tic_star_information
)
from cdips.utils.gaiaqueries import (
query_neighborhood, given_source_ids_get_gaia_data,
given_dr2_sourceids_get_edr3_xmatch
)
from rudolf.paths import DATADIR, RESULTSDIR, PHOTDIR, LOCALDIR
def get_flare_df():
from rudolf.plotting import _get_detrended_flare_data
flaredir = os.path.join(RESULTSDIR, 'flares')
# read data
method = 'itergp'
cachepath = os.path.join(flaredir, f'flare_checker_cache_{method}.pkl')
c = _get_detrended_flare_data(cachepath, method)
flpath = os.path.join(flaredir, f'fldict_{method}.csv')
df = pd.read_csv(flpath)
FL_AMP_CUTOFF = 5e-3
sel = df.ampl_rec > FL_AMP_CUTOFF
sdf = df[sel]
return sdf
def get_kep1627_kepler_lightcurve(lctype='longcadence'):
"""
Collect and stitch available Kepler quarters, after median-normalizing in
each quater.
"""
assert lctype in ['longcadence', 'shortcadence',
'longcadence_byquarter', 'koi7368', 'koi7368_byquarter']
if lctype in ['longcadence', 'longcadence_byquarter']:
lcfiles = glob(os.path.join(DATADIR, 'phot', 'kplr*_llc.fits'))
elif lctype == 'shortcadence':
lcfiles = glob(os.path.join(DATADIR, 'phot', 'full_MAST_sc', 'MAST_*',
'Kepler', 'kplr006184894*', 'kplr*_slc.fits'))
elif lctype in ['koi7368', 'koi7368_byquarter']:
lcfiles = glob(os.path.join(DATADIR, 'KOI_7368', 'phot',
'kplr010736489*_llc.fits'))
else:
raise NotImplementedError('could do short cadence here too')
assert len(lcfiles) > 1
timelist,f_list,ferr_list,qual_list,texp_list = [],[],[],[],[]
for lcfile in lcfiles:
hdul = fits.open(lcfile)
d = hdul[1].data
yval = 'PDCSAP_FLUX'
time = d['TIME']
_f, _f_err = d[yval], d[yval+'_ERR']
flux = (_f/np.nanmedian(_f) - 1) # normalize around zero for GP regression
flux_err = _f_err/np.nanmedian(_f)
qual = d['SAP_QUALITY']
sel = np.isfinite(time) & np.isfinite(flux) & np.isfinite(flux_err)
texp = np.nanmedian(np.diff(time[sel]))
timelist.append(time[sel])
f_list.append(flux[sel])
ferr_list.append(flux_err[sel])
qual_list.append(qual[sel])
texp_list.append(texp)
if (
lctype == 'longcadence' or lctype == 'shortcadence' or
lctype == 'koi7368'
):
time = np.hstack(timelist)
flux = np.hstack(f_list)
flux_err = np.hstack(ferr_list)
qual = np.hstack(qual_list)
texp = np.nanmedian(texp_list)
elif lctype == 'longcadence_byquarter' or lctype == 'koi7368_byquarter':
return (
timelist,
f_list,
ferr_list,
qual_list,
texp_list
)
# require ascending time
s = np.argsort(time)
flux = flux[s]
flux_err = flux_err[s]
qual = qual[s]
time = time[s]
assert np.all(np.diff(time) > 0)
return (
time.astype(np.float64),
flux.astype(np.float64),
flux_err.astype(np.float64),
qual,
texp
)
def get_kep1627_muscat_lightcuve():
"""
Collect MuSCAT data. Return an ordered dict with keys 'muscat_b' and values
time, flux, flux_err.
"""
datasets = OrderedDict()
bandpasses = 'g,r,i,z'.split(',')
# nb. the files from the team contain airmass, dx, dy, FWHM, and peak ADU
# too. not needed in our approach
for bp in bandpasses:
lcpath = glob(
os.path.join(PHOTDIR, 'MUSCAT3', f'*muscat3_{bp}_*csv')
)[0]
df = pd.read_csv(lcpath)
# converting b/c theano only understands float64
_time, _flux, _fluxerr = (
nparr(df.BJD_TDB).astype(np.float64),
nparr(df.Flux).astype(np.float64),
nparr(df.Err).astype(np.float64)
)
_texp = np.nanmedian(np.diff(_time))
key = f'muscat3_{bp}'
datasets[key] = [_time, _flux, _fluxerr, _texp]
return datasets
def get_candidate_stephenson1_member_list():
outpath = os.path.join(DATADIR, 'gaia', 'stephenson1_kc19.csv')
if not os.path.exists(outpath):
# Kounkel & Covey 2019 Stephenson1 candidate member list.
csvpath = os.path.join(DATADIR, 'gaia', 'string_table1.csv')
df = pd.read_csv(csvpath)
sdf = df[np.array(df.group_id).astype(int) == 73]
sdf['source_id'].to_csv(outpath, index=False)
return pd.read_csv(outpath)
def get_candidate_rsg5_member_list():
outpath = os.path.join(DATADIR, 'gaia', 'rsg5_kc19.csv')
if not os.path.exists(outpath):
# Kounkel & Covey 2019 Stephenson1 candidate member list.
csvpath = os.path.join(DATADIR, 'gaia', 'string_table1.csv')
df = pd.read_csv(csvpath)
sdf = df[np.array(df.group_id).astype(int) == 96]
sdf['source_id'].to_csv(outpath, index=False)
return pd.read_csv(outpath)
def supplement_sourcelist_with_gaiainfo(df, groupname='stephenson1'):
dr2_source_ids = np.array(df.source_id).astype(np.int64)
dr2_x_edr3_df = given_dr2_sourceids_get_edr3_xmatch(
dr2_source_ids, groupname, overwrite=True,
enforce_all_sourceids_viable=True
)
# Take the closest (proper motion and epoch-corrected) angular distance as
# THE single match.
get_edr3_xm = lambda _df: (
_df.sort_values(by='angular_distance').
drop_duplicates(subset='dr2_source_id', keep='first')
)
s_edr3 = get_edr3_xm(dr2_x_edr3_df)
edr3_source_ids = np.array(s_edr3.dr3_source_id).astype(np.int64)
# get gaia dr2 data
df_dr2 = given_source_ids_get_gaia_data(dr2_source_ids, groupname, n_max=10000,
overwrite=True,
enforce_all_sourceids_viable=True,
savstr='',
gaia_datarelease='gaiadr2')
df_edr3 = given_source_ids_get_gaia_data(edr3_source_ids, groupname,
n_max=10000, overwrite=True,
enforce_all_sourceids_viable=True,
savstr='',
gaia_datarelease='gaiaedr3')
outpath_dr2 = os.path.join(DATADIR, 'gaia', f'{groupname}_kc19_dr2.csv')
outpath_edr3 = os.path.join(DATADIR, 'gaia', f'{groupname}_kc19_edr3.csv')
outpath_dr2xedr3 = os.path.join(DATADIR, 'gaia', f'{groupname}_kc19_dr2xedr3.csv')
df_dr2.to_csv(outpath_dr2, index=False)
df_edr3.to_csv(outpath_edr3, index=False)
dr2_x_edr3_df.to_csv(outpath_dr2xedr3, index=False)
def get_deltalyr_kc19_gaia_data(return_all_targets=0):
"""
Get all Kounkel & Covey 2019 "Stephenson 1" members.
"""
outpath_dr2 = os.path.join(DATADIR, 'gaia', 'stephenson1_kc19_dr2.csv')
outpath_edr3 = os.path.join(DATADIR, 'gaia', 'stephenson1_kc19_edr3.csv')
if not os.path.exists(outpath_dr2):
df = get_candidate_stephenson1_member_list()
supplement_sourcelist_with_gaiainfo(df, groupname='stephenson1')
df_dr2 = pd.read_csv(outpath_dr2)
df_edr3 = pd.read_csv(outpath_edr3)
trgt_id = "2103737241426734336" # Kepler 1627
trgt_df = df_edr3[df_edr3.source_id.astype(str) == trgt_id]
if not return_all_targets:
return df_dr2, df_edr3, trgt_df
trgt_id_dict = {
'KOI-7368': "2128840912955018368",
'KOI-7913': "2106235301785454208",
'Kepler-1643': "2082142734285082368" # aka. KOI-6186
}
koi_df_dict = {}
for k,trgt_id in trgt_id_dict.items():
_df = df_edr3[df_edr3.source_id.astype(str) == trgt_id]
if len(_df) == 0:
print(f'Running single-object EDR3 search for {trgt_id}...')
_df = given_source_ids_get_gaia_data(
np.array([trgt_id]).astype(np.int64), str(trgt_id), n_max=2,
overwrite=False, enforce_all_sourceids_viable=True, savstr='',
gaia_datarelease='gaiaedr3')
assert len(_df) > 0
koi_df_dict[k] = _df
return df_dr2, df_edr3, trgt_df, koi_df_dict
def get_rsg5_kc19_gaia_data():
"""
Get all Kounkel & Covey 2019 "RSG_5" (Theia 96) members.
"""
outpath_dr2 = os.path.join(DATADIR, 'gaia', 'rsg5_kc19_dr2.csv')
outpath_edr3 = os.path.join(DATADIR, 'gaia', 'rsg5_kc19_edr3.csv')
if not os.path.exists(outpath_dr2):
df = get_candidate_rsg5_member_list()
supplement_sourcelist_with_gaiainfo(df, groupname='rsg5')
df_dr2 = pd.read_csv(outpath_dr2)
df_edr3 = pd.read_csv(outpath_edr3)
return df_dr2, df_edr3
def get_deltalyr_kc19_comovers():
"""
Get the kinematic neighbors of Kepler 1627. The contents of this file are
EDR3 properties.
made by plot_XYZvtang.py
sel = (
(df_edr3.delta_pmdec_prime_km_s > -5)
&
(df_edr3.delta_pmdec_prime_km_s < 2)
&
(df_edr3.delta_pmra_prime_km_s > -4)
&
(df_edr3.delta_pmra_prime_km_s < 2)
)
"""
raise NotImplementedError('this is deprecated. use get_deltalyr_kc19_cleansubset.')
csvpath = os.path.join(RESULTSDIR, 'tables',
'stephenson1_edr3_XYZvtang_candcomovers.csv')
return pd.read_csv(csvpath)
def get_deltalyr_kc19_cleansubset():
"""
To make this subset, I took the KC19 members (EDR3-crossedmatched).
Then, I ran them through plot_XYZvtang, which created
"stephenson1_edr3_XYZvtang_allphysical.csv"
Then, I opened it up in glue. I made a selection lasso in kinematic
velocity space, in XZ, and XY position space.
"""
csvpath = os.path.join(RESULTSDIR,
'glue_stephenson1_edr3_XYZvtang_allphysical',
'set0_select_kinematic_YX_ZX.csv')
return pd.read_csv(csvpath)
def get_set1_koi7368():
"""
As for get_deltalyr_kc19_cleansubset, but for the KOI 7368 neighbors.
"""
csvpath = os.path.join(RESULTSDIR,
'glue_stephenson1_edr3_XYZvtang_allphysical',
'set1_select_kinematic_YX_ZX.csv')
return pd.read_csv(csvpath)
def get_gaia_catalog_of_nearby_stars():
fitspath = os.path.join(
DATADIR, 'nearby_stars', 'GaiaCollaboration_2021_GCNS.fits'
)
hl = fits.open(fitspath)
d = hl[1].data
df = Table(d).to_pandas()
COLDICT = {
'GaiaEDR3': 'edr3_source_id',
'RA_ICRS': 'ra',
'DE_ICRS': 'dec',
'Plx': 'parallax',
'pmRA': 'pmra',
'pmDE': 'pmdec',
'Gmag': 'phot_g_mean_mag',
'BPmag': 'phot_bp_mean_mag',
'RPmag': 'phot_rp_mean_mag'
}
df = df.rename(columns=COLDICT)
return df
def get_clustermembers_cg18_subset(clustername):
"""
e.g., IC_2602, or "Melotte_22" for Pleaides.
"""
csvpath = '/Users/luke/local/cdips/catalogs/cdips_targets_v0.6_nomagcut_gaiasources.csv'
df = pd.read_csv(csvpath, sep=',')
sel0 = (
df.cluster.str.contains(clustername)
&
df.reference_id.str.contains('CantatGaudin2018a')
)
sdf = df[sel0]
sel = []
for ix, r in sdf.iterrows():
c = np.array(r['cluster'].split(','))
ref = np.array(r['reference_id'].split(','))
this = np.any(
np.in1d(
'CantatGaudin2018a',
ref[np.argwhere( c == clustername ).flatten()]
)
)
sel.append(this)
sel = np.array(sel).astype(bool)
csdf = sdf[sel]
if 'l' not in csdf:
_c = SkyCoord(ra=nparr(csdf.ra)*u.deg, dec=nparr(csdf.dec)*u.deg)
csdf['l'] = _c.galactic.l.value
csdf['b'] = _c.galactic.b.value
return csdf
def get_BPMG_members():
"""
BPMG = beta pic moving group
This retrieves BPMG members from Ujjwal+2020. I considered also requiring
Gagne+18 matches, but this yielded only 25 stars.
"""
csvpath = '/Users/luke/local/cdips/catalogs/cdips_targets_v0.6_nomagcut_gaiasources.csv'
df = pd.read_csv(csvpath, sep=',')
sel0 = (
df.cluster.str.contains('BPMG')
&
df.reference_id.str.contains('Ujjwal2020')
)
sdf = df[sel0]
if 'l' not in sdf:
_c = SkyCoord(ra=nparr(sdf.ra)*u.deg, dec=nparr(sdf.dec)*u.deg)
sdf['l'] = _c.galactic.l.value
sdf['b'] = _c.galactic.b.value
return sdf
def get_mutau_members():
tablepath = os.path.join(DATADIR, 'cluster',
'Gagne_2020_mutau_table12_apjabb77et12_mrt.txt')
df = Table.read(tablepath, format='ascii.cds').to_pandas()
sel = (
(df['Memb-Type'] == 'IM')
|
(df['Memb-Type'] == 'CM')
#|
#(df['Memb-Type'] == 'LM')
)
sdf = df[sel]
#Gagne's tables are pretty annoying
RAh, RAm, RAs = nparr(sdf['Hour']), nparr(sdf['Minute']), nparr(sdf['Second'])
RA_hms = [str(rah).zfill(2)+'h'+
str(ram).zfill(2)+'m'+
str(ras).zfill(2)+'s'
for rah,ram,ras in zip(RAh, RAm, RAs)]
DEd, DEm, DEs = nparr(sdf['Degree']),nparr(sdf['Arcminute']),nparr(sdf['Arcsecond'])
DEsign = nparr(sdf['Sign'])
DEsign[DEsign != '-'] = '+'
DE_dms = [str(desgn)+
str(ded).zfill(2)+'d'+
str(dem).zfill(2)+'m'+
str(des).zfill(2)+'s'
for desgn,ded,dem,des in zip(DEsign, DEd, DEm, DEs)]
coords = SkyCoord(ra=RA_hms, dec=DE_dms, frame='icrs')
RA = coords.ra.value
dec = coords.dec.value
sdf['ra'] = RA
sdf['dec'] = dec
_c = SkyCoord(ra=nparr(sdf.ra)*u.deg, dec=nparr(sdf.dec)*u.deg)
sdf['l'] = _c.galactic.l.value
sdf['b'] = _c.galactic.b.value
# columns
COLDICT = {
'plx': 'parallax',
'pmRA': 'pmra',
'pmDE': 'pmdec',
'Gmag': 'phot_g_mean_mag',
'BPmag': 'phot_bp_mean_mag',
'RPmag': 'phot_rp_mean_mag'
}
sdf = sdf.rename(columns=COLDICT)
return sdf
def get_ScoOB2_members():
from cdips.catalogbuild.vizier_xmatch_utils import get_vizier_table_as_dataframe
vizier_search_str = "Damiani J/A+A/623/A112"
whichcataloglist = "J/A+A/623/A112"
srccolumns = 'DR2Name|RAJ2000|DEJ2000|GLON|GLAT|Plx|pmGLON|pmGLAT|Sel|Pop'
dstcolumns = 'source_id|ra|dec|l|b|parallax|pm_l|pl_b|Sel|Pop'
# ScoOB2_PMS
df0 = get_vizier_table_as_dataframe(
vizier_search_str, srccolumns, dstcolumns,
whichcataloglist=whichcataloglist, table_num=0
)
# ScoOB2_UMS
df1 = get_vizier_table_as_dataframe(
vizier_search_str, srccolumns, dstcolumns,
whichcataloglist=whichcataloglist, table_num=1
)
gdf0 = given_source_ids_get_gaia_data(
np.array(df0.source_id).astype(np.int64), 'ScoOB2_PMS_Damiani19',
n_max=12000, overwrite=False
)
gdf1 = given_source_ids_get_gaia_data(
np.array(df1.source_id).astype(np.int64), 'ScoOB2_UMS_Damiani19',
n_max=12000, overwrite=False
)
selcols = ['source_id', 'Sel', 'Pop']
mdf0 = gdf0.merge(df0[selcols], on='source_id', how='inner')
mdf1 = gdf1.merge(df1[selcols], on='source_id', how='inner')
assert len(mdf0) == 10839
assert len(mdf1) == 3598
df = pd.concat((mdf0, mdf1)).reset_index()
# proper-motion and v_tang selected...
# require population to be UCL-1, since:
# Counter({'': 1401, 'D2b': 3510, 'IC2602': 260, 'D1': 2058, 'LCC-1': 84,
# 'UCL-2': 51, 'D2a': 750, 'UCL-3': 47, 'Lup III': 69, 'UCL-1':
# 551, 'USC-D2': 1210, 'USC-f': 347, 'USC-n': 501})
sel = (df.Sel == 'pv')
sel &= (df.Pop == 'UCL-1')
sdf = df[sel]
return sdf
ORIENTATIONTRUTHDICT = {
'prograde': 0,
'retrograde': -150,
'polar': 85
}
def get_simulated_RM_data(orientation, makeplot=1):
#
# https://github.com/gummiks/rmfit. Hirano+11,+12 implementation by
# <NAME>.
#
from rmfit import RMHirano
assert orientation in ['prograde', 'retrograde', 'polar']
lam = ORIENTATIONTRUTHDICT[orientation]
t_cadence = 20/(24*60) # 15 minutes, in days
T0 = 2454953.790531
P = 7.20281
aRs = 12.03
i = 86.138
vsini = 20
rprs = 0.0433
e = 0.
w = 90.
# lam = 0
u = [0.515, 0.23]
beta = 4
sigma = vsini / 1.31 # assume sigma is vsini/1.31 (see Hirano et al. 2010)
times = np.arange(-2.5/24+T0,2.5/24+t_cadence+T0,t_cadence)
R = RMHirano(lam,vsini,P,T0,aRs,i,rprs,e,w,u,beta,sigma,supersample_factor=7,exp_time=t_cadence,limb_dark='quadratic')
rm = R.evaluate(times)
return times, rm
def get_keplerfieldfootprint_dict():
kep = pd.read_csv(
os.path.join(DATADIR, 'skychart', 'kepler_field_footprint.csv')
)
# we want the corner points, not the mid-points
is_mipoint = ((kep['row']==535) & (kep['column']==550))
kep = kep[~is_mipoint]
kep_coord = SkyCoord(
np.array(kep['ra'])*u.deg, np.array(kep['dec'])*u.deg, frame='icrs'
)
kep_elon = kep_coord.barycentrictrueecliptic.lon.value
kep_elat = kep_coord.barycentrictrueecliptic.lat.value
kep['elon'] = kep_elon
kep['elat'] = kep_elat
kep_d = {}
for module in
|
np.unique(kep['module'])
|
numpy.unique
|
import math
import cv2
import numpy as np
from utils.colors import get_zenburn_colors
def length_scaling(depth):
return math.exp(-depth/5)
def recursive_branch(image, initial_length, depth, angle,
start_point, zen_colors=None,
MAX_DEPTH=15, SIGMA_ANGLE=0.1):
if zen_colors is None:
zen_colors = get_zenburn_colors()
length = initial_length * length_scaling(depth)
length = np.random.normal(length, length/9)
end_x = int(start_point[0] + length * math.cos(angle))
end_y = int(start_point[1] - length * math.sin(angle))
end_point = (end_x, end_y)
start_index = int(((depth - 1) / MAX_DEPTH) * len(zen_colors))
end_index = int((depth / MAX_DEPTH) * len(zen_colors))
colors = zen_colors[min(
|
np.random.randint(start_index, end_index)
|
numpy.random.randint
|
""" impyute.deletion.complete_case """
import numpy as np
def complete_case(data):
""" Return only data rows with all columns
Parameters
----------
data: numpy.ndarray
Data to impute.
Returns
-------
numpy.ndarray
Imputed data.
"""
return data[~
|
np.isnan(data)
|
numpy.isnan
|
#!/usr/bin/env python
import numpy as np
import scipy.spatial
# Local modules
import raveutils as ru
def trimesh_from_point_cloud(cloud):
"""
Convert a point cloud into a convex hull trimesh
Parameters
----------
cloud: array_like
The input point cloud. It can be ``pcl.Cloud`` or :obj:`numpy.array`
Returns
-------
vertices: array_like
The trimesh vertices
faces: array_like
The trimesh faces
See Also
--------
:obj:`scipy.spatial.ConvexHull`: For more details about convex hulls
"""
points =
|
np.asarray(cloud)
|
numpy.asarray
|
from dnetworks.layers.recurrent import RNNCell
import numpy as np
import dnetworks
from dnetworks.layers import (
LinearLayer,
ConstantPad,
Conv2D,
MaxPooling2D,
AveragePooling2D,
RNNCell
)
from .test_utils import (
test_parameters_linearlayer,
test_parameters_convolution,
test_parameters_rnncell
)
def test_linearlayer():
"""
Tests the linear layer class.
"""
weights, bias, A, dZ = test_parameters_linearlayer()
layer = LinearLayer(3, 2)
layer.weights = weights
layer.bias = bias
expected_Z = np.array([[-13.], [50.5]])
expected_dA = np.array([[4.5],[3.], [7.6]])
obtained_Z = layer.forward(A)
obtained_dA = layer.backward(dZ)
np.testing.assert_almost_equal(expected_Z, obtained_Z)
np.testing.assert_almost_equal(expected_dA, obtained_dA)
def test_paddinglayer():
"""
Tests padding layer class.
"""
X = np.array([[1, 1, 1], [1, 1, 1]])
expected = np.array(
[[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
)
padding = 1
dimension = 2
constant = 0
padclass = ConstantPad(padding, dimension, constant)
obtained = padclass.pad(X)
np.testing.assert_almost_equal(expected, obtained)
def test_conv2d():
"""
Test Convolutional 2D layer class.
"""
image, weights, bias = test_parameters_convolution()
expected_Z = np.array(
[[[[17]], [[26]], [[11]], [[ 5]]],
[[[39]], [[32]], [[28]], [[ 8]]],
[[[29]], [[46]], [[24]], [[17]]],
[[[43]], [[49]], [[50]], [[29]]]]
)
expected_dA = np.array(
[[[[326.]], [[475.]], [[296.]], [[159.]]],
[[[488.]], [[555.]], [[491.]], [[201.]]],
[[[522.]], [[798.]], [[628.]], [[378.]]],
[[[316.]], [[392.]], [[370.]], [[190.]]]]
)
expected_dW = np.array(
[[[[771, 927, 626], [833, 1200, 739], [529, 749, 569]]]]
)
expected_db = np.array([[453]])
in_channels = 1
out_channels = 1
kernel_size = (3, 3)
stride = 1
padding = 1
convolution = Conv2D(
in_channels, out_channels, kernel_size, stride, padding
)
convolution.weights = weights
convolution.bias = bias
obtained_Z = convolution.forward(image)
obtained_dA = convolution.backward(obtained_Z)
obtained_dW = convolution.dW
obtained_db = convolution.db
np.testing.assert_almost_equal(expected_Z, obtained_Z)
np.testing.assert_almost_equal(expected_dA, obtained_dA)
np.testing.assert_almost_equal(expected_dW, obtained_dW)
np.testing.assert_almost_equal(expected_db, obtained_db)
def test_maxpooling2d():
"""
Tests Max pooling layer class.
"""
image, _, _ = test_parameters_convolution()
expected_Z = np.array(
[[[[4]], [[4]]], [[[4]], [[4]]]]
)
expected_dA = np.array(
[[[[ 0.]], [[ 8.]], [[ 0.]], [[ 0.]]],
[[[ 0.]], [[ 0.]], [[ 0.]], [[ 0.]]],
[[[ 8.]], [[16.]], [[ 0.]], [[ 0.]]],
[[[ 4.]], [[ 0.]], [[ 8.]], [[ 0.]]]]
)
kernel_size = (3, 3)
stride = 1
padding = 0
convolution = MaxPooling2D(kernel_size, stride, padding)
obtained_Z = convolution.forward(image)
obtained_dA = convolution.backward(obtained_Z)
|
np.testing.assert_almost_equal(expected_Z, obtained_Z)
|
numpy.testing.assert_almost_equal
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
_DETECTRON_OPS_LIB = 'libcaffe2_detectron_ops_gpu.so'
_CMAKE_INSTALL_PREFIX = '/usr/local'
HIGHEST_BACKBONE_LVL = 5
LOWEST_BACKBONE_LVL = 2
import argparse
import cv2
# import glob
import copy
import logging
import os
import sys
import six
import time
import importlib
import pprint
import contextlib
import re
import scipy.sparse
import collections
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
# sys.path.append('/root/pmt/thirdparty/densepose/np') # path to where detectron
# bash this with model missing in 'filename' and add the path to sys
# find / -type f -iname "filename*"
# sys.path.append('path/to/where/cafffe2/is')
from caffe2.python import workspace
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import dyndep
from caffe2.python import scope
from caffe2.python import cnn
from caffe2.python import muji
from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags
from pycocotools import mask as COCOmask
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from .assets.config import assert_and_infer_cfg
from .assets.config import cfg
from .assets.config import merge_cfg_from_file
from .assets.config import load_cfg
import cython_bbox as cython_bbox
import cython_nms as cython_nms
from collections import defaultdict
from collections import OrderedDict
from six import string_types
from six.moves import cPickle as pickle
from six.moves import urllib
from glob import glob
from scipy.io import loadmat
from matplotlib.patches import Polygon
box_utils_bbox_overlaps = cython_bbox.bbox_overlaps
bbox_overlaps = cython_bbox.bbox_overlaps
logger = logging.getLogger(__name__)
FpnLevelInfo = collections.namedtuple(
'FpnLevelInfo',
['blobs', 'dims', 'spatial_scales']
)
def _progress_bar(count, total):
"""Report download progress.
Credit:
https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113
"""
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write(
' [{}] {}% of {:.1f}MB file \r'.
format(bar, percents, total / 1024 / 1024)
)
sys.stdout.flush()
if count >= total:
sys.stdout.write('\n')
def download_url(
url, dst_file_path, chunk_size=8192, progress_hook=_progress_bar
):
"""Download url and write it to dst_file_path.
Credit:
https://stackoverflow.com/questions/2028517/python-urllib2-progress-hook
"""
response = urllib.request.urlopen(url)
if six.PY2:
total_size = response.info().getheader('Content-Length').strip()
else:
total_size = response.info().get('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
with open(dst_file_path, 'wb') as f:
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
if progress_hook:
progress_hook(bytes_so_far, total_size)
f.write(chunk)
return bytes_so_far
def get_class_string(class_index, score, dataset):
class_text = dataset.classes[class_index] if dataset is not None else \
'id{:d}'.format(class_index)
return class_text + ' {:0.2f}'.format(score).lstrip('0')
def kp_connections(keypoints):
kp_lines = [
[keypoints.index('left_eye'), keypoints.index('right_eye')],
[keypoints.index('left_eye'), keypoints.index('nose')],
[keypoints.index('right_eye'), keypoints.index('nose')],
[keypoints.index('right_eye'), keypoints.index('right_ear')],
[keypoints.index('left_eye'), keypoints.index('left_ear')],
[keypoints.index('right_shoulder'), keypoints.index('right_elbow')],
[keypoints.index('right_elbow'), keypoints.index('right_wrist')],
[keypoints.index('left_shoulder'), keypoints.index('left_elbow')],
[keypoints.index('left_elbow'), keypoints.index('left_wrist')],
[keypoints.index('right_hip'), keypoints.index('right_knee')],
[keypoints.index('right_knee'), keypoints.index('right_ankle')],
[keypoints.index('left_hip'), keypoints.index('left_knee')],
[keypoints.index('left_knee'), keypoints.index('left_ankle')],
[keypoints.index('right_shoulder'), keypoints.index('left_shoulder')],
[keypoints.index('right_hip'), keypoints.index('left_hip')],
]
return kp_lines
def colormap(rgb=False):
color_list = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.167, 0.000, 0.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.286, 0.286, 0.286,
0.429, 0.429, 0.429,
0.571, 0.571, 0.571,
0.714, 0.714, 0.714,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000
]
).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
if not rgb:
color_list = color_list[:, ::-1]
return color_list
def keypoint_utils_get_keypoints():
"""Get the COCO keypoints and their left/right flip coorespondence map."""
# Keypoints are not available in the COCO json for the test split, so we
# provide them here.
keypoints = [
'nose',
'left_eye',
'right_eye',
'left_ear',
'right_ear',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hip',
'right_hip',
'left_knee',
'right_knee',
'left_ankle',
'right_ankle'
]
keypoint_flip_map = {
'left_eye': 'right_eye',
'left_ear': 'right_ear',
'left_shoulder': 'right_shoulder',
'left_elbow': 'right_elbow',
'left_wrist': 'right_wrist',
'left_hip': 'right_hip',
'left_knee': 'right_knee',
'left_ankle': 'right_ankle'
}
return keypoints, keypoint_flip_map
def convert_from_cls_format(cls_boxes, cls_segms, cls_keyps):
"""Convert from the class boxes/segms/keyps format generated by the testing
code.
"""
box_list = [b for b in cls_boxes if len(b) > 0]
if len(box_list) > 0:
boxes = np.concatenate(box_list)
else:
boxes = None
if cls_segms is not None:
segms = [s for slist in cls_segms for s in slist]
else:
segms = None
if cls_keyps is not None:
keyps = [k for klist in cls_keyps for k in klist]
else:
keyps = None
classes = []
for j in range(len(cls_boxes)):
classes += [j] * len(cls_boxes[j])
return boxes, segms, keyps, classes
def vis_utils_vis_one_image(
im, boxes, segms=None, keypoints=None, body_uv=None, thresh=0.9,
kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,
ext='pdf'):
"""Visual debugging of detections."""
if isinstance(boxes, list):
boxes, segms, keypoints, classes = convert_from_cls_format(
boxes, segms, keypoints)
if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
return
dataset_keypoints, _ = keypoint_utils_get_keypoints()
if segms is not None and len(segms) > 0:
masks = mask_util.decode(segms)
color_list = colormap(rgb=True) / 255
kp_lines = kp_connections(dataset_keypoints)
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
# Display in largest to smallest order to reduce occlusion
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
sorted_inds = np.argsort(-areas)
IUV_fields = body_uv[1]
#
All_Coords = np.zeros(im.shape)
All_inds = np.zeros([im.shape[0],im.shape[1]])
K = 26
##
inds = np.argsort(boxes[:,4])
##
for i, ind in enumerate(inds):
entry = boxes[ind,:]
if entry[4] > 0.65:
entry=entry[0:4].astype(int)
####
output = IUV_fields[ind]
####
All_Coords_Old = All_Coords[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2],:]
All_Coords_Old[All_Coords_Old==0]=output.transpose([1,2,0])[All_Coords_Old==0]
All_Coords[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2],:]= All_Coords_Old
###
CurrentMask = (output[0,:,:]>0).astype(np.float32)
All_inds_old = All_inds[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2]]
All_inds_old[All_inds_old==0] = CurrentMask[All_inds_old==0]*i
All_inds[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2]] = All_inds_old
#
All_Coords[:,:,1:3] = 255. * All_Coords[:,:,1:3]
All_Coords[All_Coords>255] = 255.
All_Coords = All_Coords.astype(np.uint8)
return All_Coords
def envu_get_detectron_ops_lib():
"""Retrieve Detectron ops library."""
# Candidate prefixes for detectron ops lib path
prefixes = [_CMAKE_INSTALL_PREFIX, sys.prefix, sys.exec_prefix] + sys.path
# Candidate subdirs for detectron ops lib
subdirs = ['lib', 'torch/lib']
# Try to find detectron ops lib
for prefix in prefixes:
for subdir in subdirs:
ops_path = os.path.join(prefix, subdir, _DETECTRON_OPS_LIB)
if os.path.exists(ops_path):
#print('Found Detectron ops lib: {}'.format(ops_path))
return ops_path
raise Exception('Detectron ops lib not found')
def c2_utils_import_detectron_ops():
"""Import Detectron ops."""
detectron_ops_lib = envu_get_detectron_ops_lib()
dyndep.InitOpsLibrary(detectron_ops_lib)
def dummy_datasets_get_coco_dataset():
"""A dummy COCO dataset that includes only the 'classes' field."""
ds = AttrDict()
classes = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
ds.classes = {i: name for i, name in enumerate(classes)}
return ds
def im_detect_body_uv(model, im_scale, boxes):
"""Compute body uv predictions."""
M = cfg.BODY_UV_RCNN.HEATMAP_SIZE
P = cfg.BODY_UV_RCNN.NUM_PATCHES
if boxes.shape[0] == 0:
pred_body_uvs = np.zeros((0, P, M, M), np.float32)
return pred_body_uvs
inputs = {'body_uv_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'body_uv_rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.body_uv_net.Proto().name)
AnnIndex = workspace.FetchBlob(core.ScopedName('AnnIndex')).squeeze()
Index_UV = workspace.FetchBlob(core.ScopedName('Index_UV')).squeeze()
U_uv = workspace.FetchBlob(core.ScopedName('U_estimated')).squeeze()
V_uv = workspace.FetchBlob(core.ScopedName('V_estimated')).squeeze()
# In case of 1
if AnnIndex.ndim == 3:
AnnIndex = np.expand_dims(AnnIndex, axis=0)
if Index_UV.ndim == 3:
Index_UV = np.expand_dims(Index_UV, axis=0)
if U_uv.ndim == 3:
U_uv = np.expand_dims(U_uv, axis=0)
if V_uv.ndim == 3:
V_uv = np.expand_dims(V_uv, axis=0)
K = cfg.BODY_UV_RCNN.NUM_PATCHES + 1
outputs = []
for ind, entry in enumerate(boxes):
# Compute ref box width and height
bx = int(max(entry[2] - entry[0], 1))
by = int(max(entry[3] - entry[1], 1))
# preds[ind] axes are CHW; bring p axes to WHC
CurAnnIndex = np.swapaxes(AnnIndex[ind], 0, 2)
CurIndex_UV = np.swapaxes(Index_UV[ind], 0, 2)
CurU_uv = np.swapaxes(U_uv[ind], 0, 2)
CurV_uv = np.swapaxes(V_uv[ind], 0, 2)
# Resize p from (HEATMAP_SIZE, HEATMAP_SIZE, c) to (int(bx), int(by), c)
CurAnnIndex = cv2.resize(CurAnnIndex, (by, bx))
CurIndex_UV = cv2.resize(CurIndex_UV, (by, bx))
CurU_uv = cv2.resize(CurU_uv, (by, bx))
CurV_uv = cv2.resize(CurV_uv, (by, bx))
# Bring Cur_Preds axes back to CHW
CurAnnIndex = np.swapaxes(CurAnnIndex, 0, 2)
CurIndex_UV = np.swapaxes(CurIndex_UV, 0, 2)
CurU_uv = np.swapaxes(CurU_uv, 0, 2)
CurV_uv = np.swapaxes(CurV_uv, 0, 2)
# Removed squeeze calls due to singleton dimension issues
CurAnnIndex = np.argmax(CurAnnIndex, axis=0)
CurIndex_UV = np.argmax(CurIndex_UV, axis=0)
CurIndex_UV = CurIndex_UV * (CurAnnIndex>0).astype(np.float32)
output = np.zeros([3, int(by), int(bx)], dtype=np.float32)
output[0] = CurIndex_UV
for part_id in range(1, K):
CurrentU = CurU_uv[part_id]
CurrentV = CurV_uv[part_id]
output[1, CurIndex_UV==part_id] = CurrentU[CurIndex_UV==part_id]
output[2, CurIndex_UV==part_id] = CurrentV[CurIndex_UV==part_id]
outputs.append(output)
num_classes = cfg.MODEL.NUM_CLASSES
cls_bodys = [[] for _ in range(num_classes)]
person_idx = keypoint_utils_get_person_class_index()
cls_bodys[person_idx] = outputs
return cls_bodys
def compute_oks(src_keypoints, src_roi, dst_keypoints, dst_roi):
"""Compute OKS for predicted keypoints wrt gt_keypoints.
src_keypoints: 4xK
src_roi: 4x1
dst_keypoints: Nx4xK
dst_roi: Nx4
"""
sigmas = np.array([
.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87,
.87, .89, .89]) / 10.0
vars = (sigmas * 2)**2
# area
src_area = (src_roi[2] - src_roi[0] + 1) * (src_roi[3] - src_roi[1] + 1)
# measure the per-keypoint distance if keypoints visible
dx = dst_keypoints[:, 0, :] - src_keypoints[0, :]
dy = dst_keypoints[:, 1, :] - src_keypoints[1, :]
e = (dx**2 + dy**2) / vars / (src_area + np.spacing(1)) / 2
e = np.sum(np.exp(-e), axis=1) / e.shape[1]
return e
def keypoint_utils_nms_oks(kp_predictions, rois, thresh):
"""Nms based on kp predictions."""
scores = np.mean(kp_predictions[:, 2, :], axis=1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
ovr = compute_oks(
kp_predictions[i], rois[i], kp_predictions[order[1:]],
rois[order[1:]])
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def scores_to_probs(scores):
"""Transforms CxHxW of scores to probabilities spatially."""
channels = scores.shape[0]
for c in range(channels):
temp = scores[c, :, :]
max_score = temp.max()
temp = np.exp(temp - max_score) / np.sum(np.exp(temp - max_score))
scores[c, :, :] = temp
return scores
def keypoint_utils_heatmaps_to_keypoints(maps, rois):
"""Extract predicted keypoint locations from heatmaps. Output has shape
(#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob)
for each keypoint.
"""
# This function converts a discrete image coordinate in a HEATMAP_SIZE x
# HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain
# consistency with keypoints_to_heatmap_labels by using the conversion from
# Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a
# continuous coordinate.
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = rois[:, 2] - rois[:, 0]
heights = rois[:, 3] - rois[:, 1]
widths = np.maximum(widths, 1)
heights = np.maximum(heights, 1)
widths_ceil = np.ceil(widths)
heights_ceil = np.ceil(heights)
# NCHW to NHWC for use with OpenCV
maps = np.transpose(maps, [0, 2, 3, 1])
min_size = cfg.KRCNN.INFERENCE_MIN_SIZE
xy_preds = np.zeros(
(len(rois), 4, cfg.KRCNN.NUM_KEYPOINTS), dtype=np.float32)
for i in range(len(rois)):
if min_size > 0:
roi_map_width = int(np.maximum(widths_ceil[i], min_size))
roi_map_height = int(np.maximum(heights_ceil[i], min_size))
else:
roi_map_width = widths_ceil[i]
roi_map_height = heights_ceil[i]
width_correction = widths[i] / roi_map_width
height_correction = heights[i] / roi_map_height
roi_map = cv2.resize(
maps[i], (roi_map_width, roi_map_height),
interpolation=cv2.INTER_CUBIC)
# Bring back to CHW
roi_map = np.transpose(roi_map, [2, 0, 1])
roi_map_probs = scores_to_probs(roi_map.copy())
w = roi_map.shape[2]
for k in range(cfg.KRCNN.NUM_KEYPOINTS):
pos = roi_map[k, :, :].argmax()
x_int = pos % w
y_int = (pos - x_int) // w
assert (roi_map_probs[k, y_int, x_int] ==
roi_map_probs[k, :, :].max())
x = (x_int + 0.5) * width_correction
y = (y_int + 0.5) * height_correction
xy_preds[i, 0, k] = x + offset_x[i]
xy_preds[i, 1, k] = y + offset_y[i]
xy_preds[i, 2, k] = roi_map[k, y_int, x_int]
xy_preds[i, 3, k] = roi_map_probs[k, y_int, x_int]
return xy_preds
def keypoint_utils_get_person_class_index():
"""Index of the person class in COCO."""
return 1
def keypoint_results(cls_boxes, pred_heatmaps, ref_boxes):
num_classes = cfg.MODEL.NUM_CLASSES
cls_keyps = [[] for _ in range(num_classes)]
person_idx = keypoint_utils_get_person_class_index()
xy_preds = keypoint_utils_heatmaps_to_keypoints(pred_heatmaps, ref_boxes)
# NMS OKS
if cfg.KRCNN.NMS_OKS:
keep = keypoint_utils_nms_oks(xy_preds, ref_boxes, 0.3)
xy_preds = xy_preds[keep, :, :]
ref_boxes = ref_boxes[keep, :]
pred_heatmaps = pred_heatmaps[keep, :, :, :]
cls_boxes[person_idx] = cls_boxes[person_idx][keep, :]
kps = [xy_preds[i] for i in range(xy_preds.shape[0])]
cls_keyps[person_idx] = kps
return cls_keyps
def im_detect_keypoints(model, im_scale, boxes):
"""Infer instance keypoint poses. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scales (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_heatmaps (ndarray): R x J x M x M array of keypoint location
logits (softmax inputs) for each of the J keypoint types output
by the network (must be processed by keypoint_results to convert
into point predictions in the original image coordinate space)
"""
M = cfg.KRCNN.HEATMAP_SIZE
if boxes.shape[0] == 0:
pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32)
return pred_heatmaps
inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'keypoint_rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.keypoint_net.Proto().name)
pred_heatmaps = workspace.FetchBlob(core.ScopedName('kps_score')).squeeze()
# In case of 1
if pred_heatmaps.ndim == 3:
pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0)
return pred_heatmaps
def combine_heatmaps_size_dep(hms_ts, ds_ts, us_ts, boxes, heur_f):
"""Combines heatmaps while taking object sizes into account."""
assert len(hms_ts) == len(ds_ts) and len(ds_ts) == len(us_ts), \
'All sets of hms must be tagged with downscaling and upscaling flags'
# Classify objects into small+medium and large based on their box areas
areas = box_utils_boxes_area(boxes)
sm_objs = areas < cfg.TEST.KPS_AUG.AREA_TH
l_objs = areas >= cfg.TEST.KPS_AUG.AREA_TH
# Combine heatmaps computed under different transformations for each object
hms_c = np.zeros_like(hms_ts[0])
for i in range(hms_c.shape[0]):
hms_to_combine = []
for hms_t, ds_t, us_t in zip(hms_ts, ds_ts, us_ts):
# Discard downscaling predictions for small and medium objects
if sm_objs[i] and ds_t:
continue
# Discard upscaling predictions for large objects
if l_objs[i] and us_t:
continue
hms_to_combine.append(hms_t[i])
hms_c[i] = heur_f(hms_to_combine)
return hms_c
def im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=False
):
"""Detects keypoints at the given width-relative aspect ratio."""
# Perform keypoint detectionon the transformed image
im_ar = image_utils_aspect_ratio_rel(im, aspect_ratio)
boxes_ar = box_utils_aspect_ratio(boxes, aspect_ratio)
if hflip:
heatmaps_ar = im_detect_keypoints_hflip(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes_ar
)
else:
im_scale = im_conv_body_only(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE
)
heatmaps_ar = im_detect_keypoints(model, im_scale, boxes_ar)
return heatmaps_ar
def im_detect_keypoints_scale(
model, im, target_scale, target_max_size, boxes, hflip=False
):
"""Computes keypoint predictions at the given scale."""
if hflip:
heatmaps_scl = im_detect_keypoints_hflip(
model, im, target_scale, target_max_size, boxes
)
else:
im_scale = im_conv_body_only(model, im, target_scale, target_max_size)
heatmaps_scl = im_detect_keypoints(model, im_scale, boxes)
return heatmaps_scl
def get_keypoints():
"""Get the COCO keypoints and their left/right flip coorespondence map."""
# Keypoints are not available in the COCO json for the test split, so we
# provide them here.
keypoints = [
'nose',
'left_eye',
'right_eye',
'left_ear',
'right_ear',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hip',
'right_hip',
'left_knee',
'right_knee',
'left_ankle',
'right_ankle'
]
keypoint_flip_map = {
'left_eye': 'right_eye',
'left_ear': 'right_ear',
'left_shoulder': 'right_shoulder',
'left_elbow': 'right_elbow',
'left_wrist': 'right_wrist',
'left_hip': 'right_hip',
'left_knee': 'right_knee',
'left_ankle': 'right_ankle'
}
return keypoints, keypoint_flip_map
def keypoint_utils_flip_heatmaps(heatmaps):
"""Flip heatmaps horizontally."""
keypoints, flip_map = get_keypoints()
heatmaps_flipped = heatmaps.copy()
for lkp, rkp in flip_map.items():
lid = keypoints.index(lkp)
rid = keypoints.index(rkp)
heatmaps_flipped[:, rid, :, :] = heatmaps[:, lid, :, :]
heatmaps_flipped[:, lid, :, :] = heatmaps[:, rid, :, :]
heatmaps_flipped = heatmaps_flipped[:, :, :, ::-1]
return heatmaps_flipped
def im_detect_keypoints_hflip(model, im, target_scale, target_max_size, boxes):
"""Computes keypoint predictions on the horizontally flipped image.
Function signature is the same as for im_detect_keypoints_aug.
"""
# Compute keypoints for the flipped image
im_hf = im[:, ::-1, :]
boxes_hf = box_utils_flip_boxes(boxes, im.shape[1])
im_scale = im_conv_body_only(model, im_hf, target_scale, target_max_size)
heatmaps_hf = im_detect_keypoints(model, im_scale, boxes_hf)
# Invert the predicted keypoints
heatmaps_inv = keypoint_utils_flip_heatmaps(heatmaps_hf)
return heatmaps_inv
def im_detect_keypoints_aug(model, im, boxes):
"""Computes keypoint predictions with test-time augmentations.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): BGR image to test
boxes (ndarray): R x 4 array of bounding boxes
Returns:
heatmaps (ndarray): R x J x M x M array of keypoint location logits
"""
# Collect heatmaps predicted under different transformations
heatmaps_ts = []
# Tag predictions computed under downscaling and upscaling transformations
ds_ts = []
us_ts = []
def add_heatmaps_t(heatmaps_t, ds_t=False, us_t=False):
heatmaps_ts.append(heatmaps_t)
ds_ts.append(ds_t)
us_ts.append(us_t)
# Compute the heatmaps for the original image (identity transform)
im_scale = im_conv_body_only(model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
heatmaps_i = im_detect_keypoints(model, im_scale, boxes)
add_heatmaps_t(heatmaps_i)
# Perform keypoints detection on the horizontally flipped image
if cfg.TEST.KPS_AUG.H_FLIP:
heatmaps_hf = im_detect_keypoints_hflip(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes
)
add_heatmaps_t(heatmaps_hf)
# Compute detections at different scales
for scale in cfg.TEST.KPS_AUG.SCALES:
ds_scl = scale < cfg.TEST.SCALE
us_scl = scale > cfg.TEST.SCALE
heatmaps_scl = im_detect_keypoints_scale(
model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes
)
add_heatmaps_t(heatmaps_scl, ds_scl, us_scl)
if cfg.TEST.KPS_AUG.SCALE_H_FLIP:
heatmaps_scl_hf = im_detect_keypoints_scale(
model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes, hflip=True
)
add_heatmaps_t(heatmaps_scl_hf, ds_scl, us_scl)
# Compute keypoints at different aspect ratios
for aspect_ratio in cfg.TEST.KPS_AUG.ASPECT_RATIOS:
heatmaps_ar = im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes
)
add_heatmaps_t(heatmaps_ar)
if cfg.TEST.KPS_AUG.ASPECT_RATIO_H_FLIP:
heatmaps_ar_hf = im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=True
)
add_heatmaps_t(heatmaps_ar_hf)
# Select the heuristic function for combining the heatmaps
if cfg.TEST.KPS_AUG.HEUR == 'HM_AVG':
np_f = np.mean
elif cfg.TEST.KPS_AUG.HEUR == 'HM_MAX':
np_f = np.amax
else:
raise NotImplementedError(
'Heuristic {} not supported'.format(cfg.TEST.KPS_AUG.HEUR)
)
def heur_f(hms_ts):
return np_f(hms_ts, axis=0)
# Combine the heatmaps
if cfg.TEST.KPS_AUG.SCALE_SIZE_DEP:
heatmaps_c = combine_heatmaps_size_dep(
heatmaps_ts, ds_ts, us_ts, boxes, heur_f
)
else:
heatmaps_c = heur_f(heatmaps_ts)
return heatmaps_c
def box_utils_expand_boxes(boxes, scale):
"""Expand an array of boxes by a given scale."""
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def segm_results(cls_boxes, masks, ref_boxes, im_h, im_w):
num_classes = cfg.MODEL.NUM_CLASSES
cls_segms = [[] for _ in range(num_classes)]
mask_ind = 0
# To work around an issue with cv2.resize (it seems to automatically pad
# with repeated border values), we manually zero-pad the masks by 1 pixel
# prior to resizing back to the original image resolution. This prevents
# "top hat" artifacts. We therefore need to expand the reference boxes by an
# appropriate factor.
M = cfg.MRCNN.RESOLUTION
scale = (M + 2.0) / M
ref_boxes = box_utils_expand_boxes(ref_boxes, scale)
ref_boxes = ref_boxes.astype(np.int32)
padded_mask = np.zeros((M + 2, M + 2), dtype=np.float32)
# skip j = 0, because it's the background class
for j in range(1, num_classes):
segms = []
for _ in range(cls_boxes[j].shape[0]):
if cfg.MRCNN.CLS_SPECIFIC_MASK:
padded_mask[1:-1, 1:-1] = masks[mask_ind, j, :, :]
else:
padded_mask[1:-1, 1:-1] = masks[mask_ind, 0, :, :]
ref_box = ref_boxes[mask_ind, :]
w = ref_box[2] - ref_box[0] + 1
h = ref_box[3] - ref_box[1] + 1
w = np.maximum(w, 1)
h = np.maximum(h, 1)
mask = cv2.resize(padded_mask, (w, h))
mask = np.array(mask > cfg.MRCNN.THRESH_BINARIZE, dtype=np.uint8)
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
x_0 = max(ref_box[0], 0)
x_1 = min(ref_box[2] + 1, im_w)
y_0 = max(ref_box[1], 0)
y_1 = min(ref_box[3] + 1, im_h)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - ref_box[1]):(y_1 - ref_box[1]),
(x_0 - ref_box[0]):(x_1 - ref_box[0])
]
# Get RLE encoding used by the COCO evaluation API
rle = mask_util.encode(
np.array(im_mask[:, :, np.newaxis], order='F')
)[0]
segms.append(rle)
mask_ind += 1
cls_segms[j] = segms
assert mask_ind == masks.shape[0]
return cls_segms
def im_detect_mask(model, im_scale, boxes):
"""Infer instance segmentation masks. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scales (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_masks (ndarray): R x K x M x M array of class specific soft masks
output by the network (must be processed by segm_results to convert
into hard masks in the original image coordinate space)
"""
M = cfg.MRCNN.RESOLUTION
if boxes.shape[0] == 0:
pred_masks = np.zeros((0, M, M), np.float32)
return pred_masks
inputs = {'mask_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'mask_rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.mask_net.Proto().name)
# Fetch masks
pred_masks = workspace.FetchBlob(
core.ScopedName('mask_fcn_probs')
).squeeze()
if cfg.MRCNN.CLS_SPECIFIC_MASK:
pred_masks = pred_masks.reshape([-1, cfg.MODEL.NUM_CLASSES, M, M])
else:
pred_masks = pred_masks.reshape([-1, 1, M, M])
return pred_masks
def im_detect_mask_aspect_ratio(model, im, aspect_ratio, boxes, hflip=False):
"""Computes mask detections at the given width-relative aspect ratio."""
# Perform mask detection on the transformed image
im_ar = image_utils_aspect_ratio_rel(im, aspect_ratio)
boxes_ar = box_utils_aspect_ratio(boxes, aspect_ratio)
if hflip:
masks_ar = im_detect_mask_hflip(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes_ar
)
else:
im_scale = im_conv_body_only(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE
)
masks_ar = im_detect_mask(model, im_scale, boxes_ar)
return masks_ar
def im_detect_mask_scale(
model, im, target_scale, target_max_size, boxes, hflip=False
):
"""Computes masks at the given scale."""
if hflip:
masks_scl = im_detect_mask_hflip(
model, im, target_scale, target_max_size, boxes
)
else:
im_scale = im_conv_body_only(model, im, target_scale, target_max_size)
masks_scl = im_detect_mask(model, im_scale, boxes)
return masks_scl
def im_detect_mask_hflip(model, im, target_scale, target_max_size, boxes):
"""Performs mask detection on the horizontally flipped image.
Function signature is the same as for im_detect_mask_aug.
"""
# Compute the masks for the flipped image
im_hf = im[:, ::-1, :]
boxes_hf = box_utils_flip_boxes(boxes, im.shape[1])
im_scale = im_conv_body_only(model, im_hf, target_scale, target_max_size)
masks_hf = im_detect_mask(model, im_scale, boxes_hf)
# Invert the predicted soft masks
masks_inv = masks_hf[:, :, :, ::-1]
return masks_inv
def im_conv_body_only(model, im, target_scale, target_max_size):
"""Runs `model.conv_body_net` on the given image `im`."""
im_blob, im_scale, _im_info = blob_utils_get_image_blob(
im, target_scale, target_max_size
)
workspace.FeedBlob(core.ScopedName('data'), im_blob)
workspace.RunNet(model.conv_body_net.Proto().name)
return im_scale
def im_detect_mask_aug(model, im, boxes):
"""Performs mask detection with test-time augmentations.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): BGR image to test
boxes (ndarray): R x 4 array of bounding boxes
Returns:
masks (ndarray): R x K x M x M array of class specific soft masks
"""
assert not cfg.TEST.MASK_AUG.SCALE_SIZE_DEP, \
'Size dependent scaling not implemented'
# Collect masks computed under different transformations
masks_ts = []
# Compute masks for the original image (identity transform)
im_scale_i = im_conv_body_only(model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
masks_i = im_detect_mask(model, im_scale_i, boxes)
masks_ts.append(masks_i)
# Perform mask detection on the horizontally flipped image
if cfg.TEST.MASK_AUG.H_FLIP:
masks_hf = im_detect_mask_hflip(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes
)
masks_ts.append(masks_hf)
# Compute detections at different scales
for scale in cfg.TEST.MASK_AUG.SCALES:
max_size = cfg.TEST.MASK_AUG.MAX_SIZE
masks_scl = im_detect_mask_scale(model, im, scale, max_size, boxes)
masks_ts.append(masks_scl)
if cfg.TEST.MASK_AUG.SCALE_H_FLIP:
masks_scl_hf = im_detect_mask_scale(
model, im, scale, max_size, boxes, hflip=True
)
masks_ts.append(masks_scl_hf)
# Compute masks at different aspect ratios
for aspect_ratio in cfg.TEST.MASK_AUG.ASPECT_RATIOS:
masks_ar = im_detect_mask_aspect_ratio(model, im, aspect_ratio, boxes)
masks_ts.append(masks_ar)
if cfg.TEST.MASK_AUG.ASPECT_RATIO_H_FLIP:
masks_ar_hf = im_detect_mask_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=True
)
masks_ts.append(masks_ar_hf)
# Combine the predicted soft masks
if cfg.TEST.MASK_AUG.HEUR == 'SOFT_AVG':
masks_c = np.mean(masks_ts, axis=0)
elif cfg.TEST.MASK_AUG.HEUR == 'SOFT_MAX':
masks_c = np.amax(masks_ts, axis=0)
elif cfg.TEST.MASK_AUG.HEUR == 'LOGIT_AVG':
def logit(y):
return -1.0 * np.log((1.0 - y) / np.maximum(y, 1e-20))
logit_masks = [logit(y) for y in masks_ts]
logit_masks = np.mean(logit_masks, axis=0)
masks_c = 1.0 / (1.0 + np.exp(-logit_masks))
else:
raise NotImplementedError(
'Heuristic {} not supported'.format(cfg.TEST.MASK_AUG.HEUR)
)
return masks_c
def box_utils_box_voting(top_dets, all_dets, thresh, scoring_method='ID', beta=1.0):
"""Apply bounding-box voting to refine `top_dets` by voting with `all_dets`.
See: https://arxiv.org/abs/1505.01749. Optional score averaging (not in the
referenced paper) can be applied by setting `scoring_method` appropriately.
"""
# top_dets is [N, 5] each row is [x1 y1 x2 y2, sore]
# all_dets is [N, 5] each row is [x1 y1 x2 y2, sore]
top_dets_out = top_dets.copy()
top_boxes = top_dets[:, :4]
all_boxes = all_dets[:, :4]
all_scores = all_dets[:, 4]
top_to_all_overlaps = bbox_overlaps(top_boxes, all_boxes)
for k in range(top_dets_out.shape[0]):
inds_to_vote = np.where(top_to_all_overlaps[k] >= thresh)[0]
boxes_to_vote = all_boxes[inds_to_vote, :]
ws = all_scores[inds_to_vote]
top_dets_out[k, :4] = np.average(boxes_to_vote, axis=0, weights=ws)
if scoring_method == 'ID':
# Identity, nothing to do
pass
elif scoring_method == 'TEMP_AVG':
# Average probabilities (considered as P(detected class) vs.
# P(not the detected class)) after smoothing with a temperature
# hyperparameter.
P = np.vstack((ws, 1.0 - ws))
P_max = np.max(P, axis=0)
X = np.log(P / P_max)
X_exp = np.exp(X / beta)
P_temp = X_exp / np.sum(X_exp, axis=0)
P_avg = P_temp[0].mean()
top_dets_out[k, 4] = P_avg
elif scoring_method == 'AVG':
# Combine new probs from overlapping boxes
top_dets_out[k, 4] = ws.mean()
elif scoring_method == 'IOU_AVG':
P = ws
ws = top_to_all_overlaps[k, inds_to_vote]
P_avg = np.average(P, weights=ws)
top_dets_out[k, 4] = P_avg
elif scoring_method == 'GENERALIZED_AVG':
P_avg = np.mean(ws**beta)**(1.0 / beta)
top_dets_out[k, 4] = P_avg
elif scoring_method == 'QUASI_SUM':
top_dets_out[k, 4] = ws.sum() / float(len(ws))**beta
else:
raise NotImplementedError(
'Unknown scoring method {}'.format(scoring_method)
)
return top_dets_out
def box_utils_soft_nms(
dets, sigma=0.5, overlap_thresh=0.3, score_thresh=0.001, method='linear'
):
"""Apply the soft NMS algorithm from https://arxiv.org/abs/1704.04503."""
if dets.shape[0] == 0:
return dets, []
methods = {'hard': 0, 'linear': 1, 'gaussian': 2}
assert method in methods, 'Unknown soft_nms method: {}'.format(method)
dets, keep = cython_nms.soft_nms(
np.ascontiguousarray(dets, dtype=np.float32),
np.float32(sigma),
np.float32(overlap_thresh),
np.float32(score_thresh),
np.uint8(methods[method])
)
return dets, keep
def box_results_with_nms_and_limit(scores, boxes):
"""Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS).
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`.
"""
num_classes = cfg.MODEL.NUM_CLASSES
cls_boxes = [[] for _ in range(num_classes)]
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
for j in range(1, num_classes):
inds = np.where(scores[:, j] > cfg.TEST.SCORE_THRESH)[0]
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4:(j + 1) * 4]
dets_j = np.hstack((boxes_j, scores_j[:, np.newaxis])).astype(
np.float32, copy=False
)
if cfg.TEST.SOFT_NMS.ENABLED:
nms_dets, _ = box_utils_soft_nms(
dets_j,
sigma=cfg.TEST.SOFT_NMS.SIGMA,
overlap_thresh=cfg.TEST.NMS,
score_thresh=0.0001,
method=cfg.TEST.SOFT_NMS.METHOD
)
else:
keep = box_utils_nms(dets_j, cfg.TEST.NMS)
nms_dets = dets_j[keep, :]
# Refine the post-NMS boxes using bounding-box voting
if cfg.TEST.BBOX_VOTE.ENABLED:
nms_dets = box_utils_box_voting(
nms_dets,
dets_j,
cfg.TEST.BBOX_VOTE.VOTE_TH,
scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD
)
cls_boxes[j] = nms_dets
# Limit to max_per_image detections **over all classes**
if cfg.TEST.DETECTIONS_PER_IM > 0:
image_scores = np.hstack(
[cls_boxes[j][:, -1] for j in range(1, num_classes)]
)
if len(image_scores) > cfg.TEST.DETECTIONS_PER_IM:
image_thresh = np.sort(image_scores)[-cfg.TEST.DETECTIONS_PER_IM]
for j in range(1, num_classes):
keep = np.where(cls_boxes[j][:, -1] >= image_thresh)[0]
cls_boxes[j] = cls_boxes[j][keep, :]
im_results = np.vstack([cls_boxes[j] for j in range(1, num_classes)])
boxes = im_results[:, :-1]
scores = im_results[:, -1]
return scores, boxes, cls_boxes
def _add_multilevel_rois_for_test(blobs, name):
"""Distributes a set of RoIs across FPN pyramid levels by creating new level
specific RoI blobs.
Arguments:
blobs (dict): dictionary of blobs
name (str): a key in 'blobs' identifying the source RoI blob
Returns:
[by ref] blobs (dict): new keys named by `name + 'fpn' + level`
are added to dict each with a value that's an R_level x 5 ndarray of
RoIs (see _get_rois_blob for format)
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn_map_rois_to_fpn_levels(blobs[name][:, 1:5], lvl_min, lvl_max)
fpn_add_multilevel_roi_blobs(
blobs, name, blobs[name], lvls, lvl_min, lvl_max
)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (ndarray): image pyramid levels used by each projected RoI
"""
rois = im_rois.astype(np.float, copy=False) * scales
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
return rois, levels
def _get_rois_blob(im_rois, im_scale):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid with columns
[level, x1, y1, x2, y2]
"""
rois, levels = _project_im_rois(im_rois, im_scale)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _get_blobs(im, rois, target_scale, target_max_size):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale, blobs['im_info'] = \
blob_utils_get_image_blob(im, target_scale, target_max_size)
if rois is not None:
blobs['rois'] = _get_rois_blob(rois, im_scale)
return blobs, im_scale
def im_detect_bbox(model, im, target_scale, target_max_size, boxes=None):
"""Bounding box object detection for an image with given box proposals.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals in 0-indexed
[x1, y1, x2, y2] format, or None if using RPN
Returns:
scores (ndarray): R x K array of object class scores for K classes
(K includes background as object category 0)
boxes (ndarray): R x 4*K array of predicted bounding boxes
im_scales (list): list of image scales used in the input blob (as
returned by _get_blobs and for use with im_detect_mask, etc.)
"""
inputs, im_scale = _get_blobs(im, boxes, target_scale, target_max_size)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(inputs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(
hashes, return_index=True, return_inverse=True
)
inputs['rois'] = inputs['rois'][index, :]
boxes = boxes[index, :]
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS and not cfg.MODEL.FASTER_RCNN:
_add_multilevel_rois_for_test(inputs, 'rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.net.Proto().name)
# Read out blobs
if cfg.MODEL.FASTER_RCNN:
rois = workspace.FetchBlob(core.ScopedName('rois'))
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scale
# Softmax class probabilities
scores = workspace.FetchBlob(core.ScopedName('cls_prob')).squeeze()
# In case there is 1 proposal
scores = scores.reshape([-1, scores.shape[-1]])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = workspace.FetchBlob(core.ScopedName('bbox_pred')).squeeze()
# In case there is 1 proposal
box_deltas = box_deltas.reshape([-1, box_deltas.shape[-1]])
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
# Remove predictions for bg class (compat with MSRA code)
box_deltas = box_deltas[:, -4:]
pred_boxes = box_utils_bbox_transform(
boxes, box_deltas, cfg.MODEL.BBOX_REG_WEIGHTS
)
pred_boxes = box_utils_clip_tiled_boxes(pred_boxes, im.shape)
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
pred_boxes = np.tile(pred_boxes, (1, scores.shape[1]))
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes, im_scale
def box_utils_aspect_ratio(boxes, aspect_ratio):
"""Perform width-relative aspect ratio transformation."""
boxes_ar = boxes.copy()
boxes_ar[:, 0::4] = aspect_ratio * boxes[:, 0::4]
boxes_ar[:, 2::4] = aspect_ratio * boxes[:, 2::4]
return boxes_ar
def image_utils_aspect_ratio_rel(im, aspect_ratio):
"""Performs width-relative aspect ratio transformation."""
im_h, im_w = im.shape[:2]
im_ar_w = int(round(aspect_ratio * im_w))
im_ar = cv2.resize(im, dsize=(im_ar_w, im_h))
return im_ar
def im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals=None, hflip=False
):
"""Computes bbox detections at the given width-relative aspect ratio.
Returns predictions in the original image space.
"""
# Compute predictions on the transformed image
im_ar = image_utils_aspect_ratio_rel(im, aspect_ratio)
if not cfg.MODEL.FASTER_RCNN:
box_proposals_ar = box_utils_aspect_ratio(box_proposals, aspect_ratio)
else:
box_proposals_ar = None
if hflip:
scores_ar, boxes_ar, _ = im_detect_bbox_hflip(
model,
im_ar,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
box_proposals=box_proposals_ar
)
else:
scores_ar, boxes_ar, _ = im_detect_bbox(
model,
im_ar,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
boxes=box_proposals_ar
)
# Invert the detected boxes
boxes_inv = box_utils_aspect_ratio(boxes_ar, 1.0 / aspect_ratio)
return scores_ar, boxes_inv
def im_detect_bbox_scale(
model, im, target_scale, target_max_size, box_proposals=None, hflip=False
):
"""Computes bbox detections at the given scale.
Returns predictions in the original image space.
"""
if hflip:
scores_scl, boxes_scl, _ = im_detect_bbox_hflip(
model, im, target_scale, target_max_size, box_proposals=box_proposals
)
else:
scores_scl, boxes_scl, _ = im_detect_bbox(
model, im, target_scale, target_max_size, boxes=box_proposals
)
return scores_scl, boxes_scl
def box_utils_flip_boxes(boxes, im_width):
"""Flip boxes horizontally."""
boxes_flipped = boxes.copy()
boxes_flipped[:, 0::4] = im_width - boxes[:, 2::4] - 1
boxes_flipped[:, 2::4] = im_width - boxes[:, 0::4] - 1
return boxes_flipped
def im_detect_bbox_hflip(
model, im, target_scale, target_max_size, box_proposals=None
):
"""Performs bbox detection on the horizontally flipped image.
Function signature is the same as for im_detect_bbox.
"""
# Compute predictions on the flipped image
im_hf = im[:, ::-1, :]
im_width = im.shape[1]
if not cfg.MODEL.FASTER_RCNN:
box_proposals_hf = box_utils_flip_boxes(box_proposals, im_width)
else:
box_proposals_hf = None
scores_hf, boxes_hf, im_scale = im_detect_bbox(
model, im_hf, target_scale, target_max_size, boxes=box_proposals_hf
)
# Invert the detections computed on the flipped image
boxes_inv = box_utils_flip_boxes(boxes_hf, im_width)
return scores_hf, boxes_inv, im_scale
def im_detect_bbox_aug(model, im, box_proposals=None):
"""Performs bbox detection with test-time augmentations.
Function signature is the same as for im_detect_bbox.
"""
assert not cfg.TEST.BBOX_AUG.SCALE_SIZE_DEP, \
'Size dependent scaling not implemented'
assert not cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION' or \
cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION', \
'Coord heuristic must be union whenever score heuristic is union'
assert not cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION' or \
cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', \
'Score heuristic must be union whenever coord heuristic is union'
assert not cfg.MODEL.FASTER_RCNN or \
cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', \
'Union heuristic must be used to combine Faster RCNN predictions'
# Collect detections computed under different transformations
scores_ts = []
boxes_ts = []
def add_preds_t(scores_t, boxes_t):
scores_ts.append(scores_t)
boxes_ts.append(boxes_t)
# Perform detection on the horizontally flipped image
if cfg.TEST.BBOX_AUG.H_FLIP:
scores_hf, boxes_hf, _ = im_detect_bbox_hflip(
model,
im,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
box_proposals=box_proposals
)
add_preds_t(scores_hf, boxes_hf)
# Compute detections at different scales
for scale in cfg.TEST.BBOX_AUG.SCALES:
max_size = cfg.TEST.BBOX_AUG.MAX_SIZE
scores_scl, boxes_scl = im_detect_bbox_scale(
model, im, scale, max_size, box_proposals
)
add_preds_t(scores_scl, boxes_scl)
if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:
scores_scl_hf, boxes_scl_hf = im_detect_bbox_scale(
model, im, scale, max_size, box_proposals, hflip=True
)
add_preds_t(scores_scl_hf, boxes_scl_hf)
# Perform detection at different aspect ratios
for aspect_ratio in cfg.TEST.BBOX_AUG.ASPECT_RATIOS:
scores_ar, boxes_ar = im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals
)
add_preds_t(scores_ar, boxes_ar)
if cfg.TEST.BBOX_AUG.ASPECT_RATIO_H_FLIP:
scores_ar_hf, boxes_ar_hf = im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals, hflip=True
)
add_preds_t(scores_ar_hf, boxes_ar_hf)
# Compute detections for the original image (identity transform) last to
# ensure that the Caffe2 workspace is populated with blobs corresponding
# to the original image on return (postcondition of im_detect_bbox)
scores_i, boxes_i, im_scale_i = im_detect_bbox(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals
)
add_preds_t(scores_i, boxes_i)
# Combine the predicted scores
if cfg.TEST.BBOX_AUG.SCORE_HEUR == 'ID':
scores_c = scores_i
elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'AVG':
scores_c = np.mean(scores_ts, axis=0)
elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION':
scores_c = np.vstack(scores_ts)
else:
raise NotImplementedError(
'Score heur {} not supported'.format(cfg.TEST.BBOX_AUG.SCORE_HEUR)
)
# Combine the predicted boxes
if cfg.TEST.BBOX_AUG.COORD_HEUR == 'ID':
boxes_c = boxes_i
elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'AVG':
boxes_c = np.mean(boxes_ts, axis=0)
elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION':
boxes_c = np.vstack(boxes_ts)
else:
raise NotImplementedError(
'Coord heur {} not supported'.format(cfg.TEST.BBOX_AUG.COORD_HEUR)
)
return scores_c, boxes_c, im_scale_i
def im_list_to_blob(ims):
"""Convert a list of images into a network input. Assumes images were
prepared using prep_im_for_blob or equivalent: i.e.
- BGR channel order
- pixel means subtracted
- resized to the desired input size
- float32 numpy ndarray format
Output is a 4D HCHW tensor of the images concatenated along axis 0 with
shape.
"""
if not isinstance(ims, list):
ims = [ims]
max_shape = np.array([im.shape for im in ims]).max(axis=0)
# Pad the image so they can be divisible by a stride
if cfg.FPN.FPN_ON:
stride = float(cfg.FPN.COARSEST_STRIDE)
max_shape[0] = int(np.ceil(max_shape[0] / stride) * stride)
max_shape[1] = int(np.ceil(max_shape[1] / stride) * stride)
num_images = len(ims)
blob = np.zeros(
(num_images, max_shape[0], max_shape[1], 3), dtype=np.float32
)
for i in range(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
# Move channels (axis 3) to axis 1
# Axis order will become: (batch elem, channel, height, width)
channel_swap = (0, 3, 1, 2)
blob = blob.transpose(channel_swap)
return blob
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Prepare an image for use as a network input blob. Specially:
- Subtract per-channel pixel mean
- Convert to float32
- Rescale to each of the specified target size (capped at max_size)
Returns a list of transformed images, one for each target size. Also returns
the scale factors that were used to compute each returned image.
"""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than max_size
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(
im,
None,
None,
fx=im_scale,
fy=im_scale,
interpolation=cv2.INTER_LINEAR
)
return im, im_scale
def blob_utils_get_image_blob(im, target_scale, target_max_size):
"""Convert an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale (float): image scale (target size) / (original size)
im_info (ndarray)
"""
processed_im, im_scale = prep_im_for_blob(
im, cfg.PIXEL_MEANS, target_scale, target_max_size
)
blob = im_list_to_blob(processed_im)
# NOTE: this height and width may be larger than actual scaled input image
# due to the FPN.COARSEST_STRIDE related padding in im_list_to_blob. We are
# maintaining this behavior for now to make existing results exactly
# reproducible (in practice using the true input image height and width
# yields nearly the same results, but they are sometimes slightly different
# because predictions near the edge of the image will be pruned more
# aggressively).
height, width = blob.shape[2], blob.shape[3]
im_info = np.hstack((height, width, im_scale))[np.newaxis, :]
return blob, im_scale, im_info.astype(np.float32)
def _scale_enum(anchor, scales):
"""Enumerate a set of anchors for each scale wrt an anchor."""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack(
(
x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)
)
)
return anchors
def _whctrs(anchor):
"""Return width, height, x center, and y center for an anchor (window)."""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _ratio_enum(anchor, ratios):
"""Enumerate a set of anchors for each aspect ratio wrt an anchor."""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _generate_anchors(base_size, scales, aspect_ratios):
"""Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, base_size - 1, base_size - 1) window.
"""
anchor = np.array([1, 1, base_size, base_size], dtype=np.float) - 1
anchors = _ratio_enum(anchor, aspect_ratios)
anchors = np.vstack(
[_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]
)
return anchors
def generate_anchors(
stride=16, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)
):
"""Generates a matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors
are centered on stride / 2, have (approximate) sqrt areas of the specified
sizes, and aspect ratios as given.
"""
return _generate_anchors(
stride,
np.array(sizes, dtype=np.float) / stride,
np.array(aspect_ratios, dtype=np.float)
)
def _create_cell_anchors():
"""
Generate all types of anchors for all fpn levels/scales/aspect ratios.
This function is called only once at the beginning of inference.
"""
k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL
scales_per_octave = cfg.RETINANET.SCALES_PER_OCTAVE
aspect_ratios = cfg.RETINANET.ASPECT_RATIOS
anchor_scale = cfg.RETINANET.ANCHOR_SCALE
A = scales_per_octave * len(aspect_ratios)
anchors = {}
for lvl in range(k_min, k_max + 1):
# create cell anchors array
stride = 2. ** lvl
cell_anchors = np.zeros((A, 4))
a = 0
for octave in range(scales_per_octave):
octave_scale = 2 ** (octave / float(scales_per_octave))
for aspect in aspect_ratios:
anchor_sizes = (stride * octave_scale * anchor_scale, )
anchor_aspect_ratios = (aspect, )
cell_anchors[a, :] = generate_anchors(
stride=stride, sizes=anchor_sizes,
aspect_ratios=anchor_aspect_ratios)
a += 1
anchors[lvl] = cell_anchors
return anchors
def test_retinanet_im_detect_bbox(model, im, timers=None):
"""Generate RetinaNet detections on a single image."""
if timers is None:
timers = defaultdict(Timer)
# Although anchors are input independent and could be precomputed,
# recomputing them per image only brings a small overhead
anchors = _create_cell_anchors()
timers['im_detect_bbox'].tic()
k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL
A = cfg.RETINANET.SCALES_PER_OCTAVE * len(cfg.RETINANET.ASPECT_RATIOS)
inputs = {}
inputs['data'], im_scale, inputs['im_info'] = \
blob_utils_get_image_blob(im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
cls_probs, box_preds = [], []
for lvl in range(k_min, k_max + 1):
suffix = 'fpn{}'.format(lvl)
cls_probs.append(core.ScopedName('retnet_cls_prob_{}'.format(suffix)))
box_preds.append(core.ScopedName('retnet_bbox_pred_{}'.format(suffix)))
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v.astype(np.float32, copy=False))
workspace.RunNet(model.net.Proto().name)
cls_probs = workspace.FetchBlobs(cls_probs)
box_preds = workspace.FetchBlobs(box_preds)
# here the boxes_all are [x0, y0, x1, y1, score]
boxes_all = defaultdict(list)
cnt = 0
for lvl in range(k_min, k_max + 1):
# create cell anchors array
stride = 2. ** lvl
cell_anchors = anchors[lvl]
# fetch per level probability
cls_prob = cls_probs[cnt]
box_pred = box_preds[cnt]
cls_prob = cls_prob.reshape((
cls_prob.shape[0], A, int(cls_prob.shape[1] / A),
cls_prob.shape[2], cls_prob.shape[3]))
box_pred = box_pred.reshape((
box_pred.shape[0], A, 4, box_pred.shape[2], box_pred.shape[3]))
cnt += 1
if cfg.RETINANET.SOFTMAX:
cls_prob = cls_prob[:, :, 1::, :, :]
cls_prob_ravel = cls_prob.ravel()
# In some cases [especially for very small img sizes], it's possible that
# candidate_ind is empty if we impose threshold 0.05 at all levels. This
# will lead to errors since no detections are found for this image. Hence,
# for lvl 7 which has small spatial resolution, we take the threshold 0.0
th = cfg.RETINANET.INFERENCE_TH if lvl < k_max else 0.0
candidate_inds = np.where(cls_prob_ravel > th)[0]
if (len(candidate_inds) == 0):
continue
pre_nms_topn = min(cfg.RETINANET.PRE_NMS_TOP_N, len(candidate_inds))
inds = np.argpartition(
cls_prob_ravel[candidate_inds], -pre_nms_topn)[-pre_nms_topn:]
inds = candidate_inds[inds]
inds_5d = np.array(np.unravel_index(inds, cls_prob.shape)).transpose()
classes = inds_5d[:, 2]
anchor_ids, y, x = inds_5d[:, 1], inds_5d[:, 3], inds_5d[:, 4]
scores = cls_prob[:, anchor_ids, classes, y, x]
boxes = np.column_stack((x, y, x, y)).astype(dtype=np.float32)
boxes *= stride
boxes += cell_anchors[anchor_ids, :]
if not cfg.RETINANET.CLASS_SPECIFIC_BBOX:
box_deltas = box_pred[0, anchor_ids, :, y, x]
else:
box_cls_inds = classes * 4
box_deltas = np.vstack(
[box_pred[0, ind:ind + 4, yi, xi]
for ind, yi, xi in zip(box_cls_inds, y, x)]
)
pred_boxes = (
box_utils_bbox_transform(boxes, box_deltas)
if cfg.TEST.BBOX_REG else boxes)
pred_boxes /= im_scale
pred_boxes = box_utils_clip_tiled_boxes(pred_boxes, im.shape)
box_scores = np.zeros((pred_boxes.shape[0], 5))
box_scores[:, 0:4] = pred_boxes
box_scores[:, 4] = scores
for cls in range(1, cfg.MODEL.NUM_CLASSES):
inds =
|
np.where(classes == cls - 1)
|
numpy.where
|
"""
Integrates the Multi-Fidelity Co-Kriging method described in [LeGratiet2013].
(Author: <NAME> <EMAIL>)
This code was implemented using the package scikit-learn as basis.
(Author: <NAME>, <EMAIL>)
OpenMDAO adaptation. Regression and correlation functions were directly copied
from scikit-learn package here to avoid scikit-learn dependency.
(Author: <NAME>, <EMAIL>)
ISAE/DMSM - ONERA/DCPS
"""
import numpy as np
from numpy import atleast_2d as array2d
from scipy import linalg
from scipy.optimize import minimize
from scipy.spatial.distance import squareform
from openmdao.surrogate_models.surrogate_model import MultiFiSurrogateModel
import logging
_logger = logging.getLogger()
MACHINE_EPSILON = np.finfo(np.double).eps # machine precision
NUGGET = 10. * MACHINE_EPSILON # nugget for robustness
INITIAL_RANGE_DEFAULT = 0.3 # initial range for optimizer
TOLERANCE_DEFAULT = 1e-6 # stopping criterion for MLE optimization
THETA0_DEFAULT = 0.5
THETAL_DEFAULT = 1e-5
THETAU_DEFAULT = 50
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
def solve_triangular(x, y, lower=True):
"""Solve triangular."""
return linalg.solve(x, y)
def constant_regression(x):
"""
Zero order polynomial (constant, p = 1) regression model.
x --> f(x) = 1
Parameters
----------
x : array_like
Input data.
Returns
-------
array_like
Constant regression output.
"""
x = np.asarray(x, dtype=np.float)
n_eval = x.shape[0]
f = np.ones([n_eval, 1])
return f
def linear_regression(x):
"""
First order polynomial (linear, p = n+1) regression model.
x --> f(x) = [ 1, x_1, ..., x_n ].T
Parameters
----------
x : array_like
Input data.
Returns
-------
array_like
Linear regression output.
"""
x = np.asarray(x, dtype=np.float)
n_eval = x.shape[0]
f = np.hstack([np.ones([n_eval, 1]), x])
return f
def squared_exponential_correlation(theta, d):
"""
Squared exponential correlation model (Radial Basis Function).
(Infinitely differentiable stochastic process, very smooth)::
n
theta, dx --> r(theta, dx) = exp( sum - theta_i * (dx_i)^2 )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(-theta[0] * np.sum(d ** 2, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(-np.sum(theta.reshape(1, n_features) * d ** 2, axis=1))
def l1_cross_distances(X, Y=None):
"""
Compute the nonzero componentwise L1 cross-distances between the vectors in X and Y.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like
An array with shape (n_samples_Y, n_features).
Returns
-------
array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
"""
X = array2d(X)
if Y is None:
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):])
else:
Y = array2d(Y)
n_samples_X, n_features_X = X.shape
n_samples_Y, n_features_Y = Y.shape
if n_features_X != n_features_Y:
raise ValueError("X and Y must have the same dimensions.")
n_features = n_features_X
n_nonzero_cross_dist = n_samples_X * n_samples_Y
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples_X):
ll_0 = ll_1
ll_1 = ll_0 + n_samples_Y # - k - 1
D[ll_0:ll_1] = np.abs(X[k] - Y)
return D
class MultiFiCoKriging(object):
"""
Integrate the Multi-Fidelity Co-Kriging method described in [LeGratiet2013].
Parameters
----------
regr : str or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis for Universal Kriging purpose.
regr is assumed to be the same for all levels of code.
Default assumes a simple constant regression trend.
Available built-in regression models are:
'constant', 'linear'.
rho_regr : str or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. Defines the regression function for the
autoregressive parameter rho.
rho_regr is assumed to be the same for all levels of code.
Default assumes a simple constant regression trend.
Available built-in regression models are:
'constant', 'linear'.
normalize : bool, optional
When true, normalize X and Y so that the mean is at zero.
theta : double, array_like or list, optional
Value of correlation parameters if they are known; no optimization is run.
Default is None, so that optimization is run.
if double: value is replicated for all features and all levels.
if array_like: an array with shape (n_features, ) for
isotropic calculation. It is replicated for all levels.
if list: a list of nlevel arrays specifying value for each level.
theta0 : double, array_like or list, optional
Starting point for the maximum likelihood estimation of the
best set of parameters.
Default is None and meaning use of the default 0.5*np.ones(n_features)
if double: value is replicated for all features and all levels.
if array_like: an array with shape (n_features, ) for
isotropic calculation. It is replicated for all levels.
if list: a list of nlevel arrays specifying value for each level.
thetaL : double, array_like or list, optional
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None meaning use of the default 1e-5*np.ones(n_features).
if double: value is replicated for all features and all levels.
if array_like: An array with shape matching theta0's. It is replicated
for all levels of code.
if list: a list of nlevel arrays specifying value for each level.
thetaU : double, array_like or list, optional
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None meaning use of default value 50*np.ones(n_features).
if double: value is replicated for all features and all levels.
if array_like: An array with shape matching theta0's. It is replicated
for all levels of code.
if list: a list of nlevel arrays specifying value for each level.
Attributes
----------
corr : object
Correlation function to use, default is squared_exponential_correlation.
n_features : ndarray
Number of features for each fidelity level.
n_samples : ndarray
Number of samples for each fidelity level.
nlevel : int
Number of fidelity levels.
normalize : bool, optional
When true, normalize X and Y so that the mean is at zero.
regr : str or callable
A regression function returning an array of outputs of the linear
regression functional basis for Universal Kriging purpose.
regr is assumed to be the same for all levels of code.
Default assumes a simple constant regression trend.
Available built-in regression models are:
'constant', 'linear'
rho_regr : str or callable or None
A regression function returning an array of outputs of the linear
regression functional basis. Defines the regression function for the
autoregressive parameter rho.
rho_regr is assumed to be the same for all levels of code.
Default assumes a simple constant regression trend.
Available built-in regression models are:
'constant', 'linear'
theta : double, array_like or list or None
Value of correlation parameters if they are known; no optimization is run.
Default is None, so that optimization is run.
if double: value is replicated for all features and all levels.
if array_like: an array with shape (n_features, ) for
isotropic calculation. It is replicated for all levels.
if list: a list of nlevel arrays specifying value for each level
theta0 : double, array_like or list or None
Starting point for the maximum likelihood estimation of the
best set of parameters.
Default is None and meaning use of the default 0.5*np.ones(n_features)
if double: value is replicated for all features and all levels.
if array_like: an array with shape (n_features, ) for
isotropic calculation. It is replicated for all levels.
if list: a list of nlevel arrays specifying value for each level
thetaL : double, array_like or list or None
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None meaning use of the default 1e-5*np.ones(n_features).
if double: value is replicated for all features and all levels.
if array_like: An array with shape matching theta0's. It is replicated
for all levels of code.
if list: a list of nlevel arrays specifying value for each level
thetaU : double, array_like or list or None
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None meaning use of default value 50*np.ones(n_features).
if double: value is replicated for all features and all levels.
if array_like: An array with shape matching theta0's. It is replicated
for all levels of code.
if list: a list of nlevel arrays specifying value for each level
X_mean : float
Mean of the low fidelity training data for X.
X_std : float
Standard deviation of the low fidelity training data for X.
y_mean : float
Mean of the low fidelity training data for y.
y_std : float
Standard deviation of the low fidelity training data for y.
_nfev : int
Number of function evaluations.
Notes
-----
Implementation is based on the Package Scikit-Learn
(Author: <NAME>, <EMAIL>) which translates
the DACE Matlab toolbox, see [NLNS2002]_.
References
----------
.. [NLNS2002] <NAME>, <NAME>, and <NAME>.
`DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME> (1992). "Screening, predicting, and computer experiments."
`Technometrics,` 34(1) 15--25.
http://www.jstor.org/pss/1269548
.. [LeGratiet2013] <NAME> (2013). "Multi-fidelity Gaussian process
regression for computer experiments."
PhD thesis, Universite Paris-Diderot-Paris VII.
.. [TBKH2011] <NAME>., <NAME>., <NAME>., & <NAME>. (2011).
"The development of a hybridized particle swarm for kriging hyperparameter
tuning." `Engineering optimization`, 43(6), 675-699.
Examples
--------
>>> from openmdao.surrogate_models.multifi_cokriging import MultiFiCoKriging
>>> import numpy as np
>>> # Xe: DOE for expensive code (nested in Xc)
>>> # Xc: DOE for cheap code
>>> # ye: expensive response
>>> # yc: cheap response
>>> Xe = np.array([[0],[0.4],[1]])
>>> Xc = np.vstack((np.array([[0.1],[0.2],[0.3],[0.5],[0.6],[0.7],[0.8],[0.9]]),Xe))
>>> ye = ((Xe*6-2)**2)*np.sin((Xe*6-2)*2)
>>> yc = 0.5*((Xc*6-2)**2)*np.sin((Xc*6-2)*2)+(Xc-0.5)*10. - 5
>>> model = MultiFiCoKriging(theta0=1, thetaL=1e-5, thetaU=50.)
>>> model.fit([Xc, Xe], [yc, ye])
>>> # Prediction on x=0.05
>>> np.abs(float(model.predict([0.05])[0])- ((0.05*6-2)**2)*np.sin((0.05*6-2)*2)) < 0.05
True
"""
_regression_types = {
'constant': constant_regression,
'linear': linear_regression
}
def __init__(self, regr='constant', rho_regr='constant', normalize=True,
theta=None, theta0=None, thetaL=None, thetaU=None):
"""
Initialize all attributes.
"""
self.corr = squared_exponential_correlation
self.regr = regr
self.rho_regr = rho_regr
self.theta = theta
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.X_mean = 0
self.X_std = 1
self.y_mean = 0
self.y_std = 1
self.n_features = None
self.n_samples = None
self.nlevel = None
self._nfev = 0
def _build_R(self, lvl, theta):
"""
Build the correlation matrix with given theta for the specified level.
Parameters
----------
lvl : int
Level of fidelity
theta : array_like
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta``).
Returns
-------
ndarray
Correlation matrix.
"""
D = self.D[lvl]
n_samples = self.n_samples[lvl]
R = np.eye(n_samples) * (1. + NUGGET)
corr = squareform(self.corr(theta, D))
R = R + corr
return R
def fit(self, X, y, initial_range=INITIAL_RANGE_DEFAULT, tol=TOLERANCE_DEFAULT):
"""
Implement the Multi-Fidelity co-kriging model fitting method.
Parameters
----------
X : list of double array_like elements
A list of arrays with the input at which observations were made, from lowest
fidelity to highest fidelity. Designs must be nested
with X[i] = np.vstack([..., X[i+1]).
y : list of double array_like elements
A list of arrays with the observations of the scalar output to be predicted,
from lowest fidelity to highest fidelity.
initial_range : float
Initial range for the optimizer.
tol : float
Optimizer terminates when the tolerance tol is reached.
"""
# Run input checks
# Transforms floats and arrays in lists to have a multifidelity
# structure
self._check_list_structure(X, y)
# Checks if all parameters are structured as required
self._check_params()
X = self.X
y = self.y
nlevel = self.nlevel
n_samples = self.n_samples
# initialize lists
self.beta = nlevel * [0]
self.beta_rho = nlevel * [None]
self.beta_regr = nlevel * [None]
self.C = nlevel * [0]
self.D = nlevel * [0]
self.F = nlevel * [0]
self.p = nlevel * [0]
self.q = nlevel * [0]
self.G = nlevel * [0]
self.sigma2 = nlevel * [0]
self._R_adj = nlevel * [None]
# Training data will be normalized using statistical quantities from the low fidelity set.
if self.normalize:
self.X_mean = X_mean = np.mean(X[0], axis=0)
self.X_std = X_std = np.std(X[0], axis=0)
self.y_mean = y_mean = np.mean(y[0], axis=0)
self.y_std = y_std = np.std(y[0], axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
for lvl in range(nlevel):
if self.normalize:
X[lvl] = (X[lvl] - X_mean) / X_std
y[lvl] = (y[lvl] - y_mean) / y_std
# Calculate matrix of distances D between samples
self.D[lvl] = l1_cross_distances(X[lvl])
if (np.min(np.sum(self.D[lvl], axis=1)) == 0.):
raise ValueError("Multiple input features cannot have the same value.")
# Regression matrix and parameters
self.F[lvl] = self.regr(X[lvl])
self.p[lvl] = self.F[lvl].shape[1]
# Concatenate the autoregressive part for levels > 0
if lvl > 0:
F_rho = self.rho_regr(X[lvl])
self.q[lvl] = F_rho.shape[1]
self.F[lvl] = np.hstack((F_rho * np.dot((self.y[lvl - 1])[-n_samples[lvl]:],
np.ones((1, self.q[lvl]))), self.F[lvl]))
else:
self.q[lvl] = 0
n_samples_F_i = self.F[lvl].shape[0]
if n_samples_F_i != n_samples[lvl]:
raise ValueError("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if int(self.p[lvl] + self.q[lvl]) >= n_samples_F_i:
raise ValueError(f"Ordinary least squares problem is undetermined "
f"n_samples={n_samples[lvl]} must be greater than the regression"
f" model size p+q={self.p[lvl] + self.q[lvl]}.")
# Set attributes
self.X = X
self.y = y
self.rlf_value = np.zeros(nlevel)
for lvl in range(nlevel):
# Determine Gaussian Process model parameters
if self.theta[lvl] is None:
# Maximum Likelihood Estimation of the parameters
sol = self._max_rlf(lvl=lvl, initial_range=initial_range, tol=tol)
self.theta[lvl] = sol['theta']
self.rlf_value[lvl] = sol['rlf_value']
if np.isinf(self.rlf_value[lvl]):
raise ValueError("Bad parameter region. Try increasing upper bound")
else:
self.rlf_value[lvl] = self.rlf(lvl=lvl)
if np.isinf(self.rlf_value[lvl]):
raise ValueError("Bad point. Try increasing theta0.")
return
def rlf(self, lvl, theta=None):
"""
Determine BLUP parameters and evaluate negative reduced likelihood function for theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
lvl : int
Level of fidelity.
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta``).
Returns
-------
double
The value of the negative concentrated reduced likelihood function
associated to the given autocorrelation parameters theta.
"""
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta[lvl]
# Initialize output
rlf_value = 1e20
# Retrieve data
n_samples = self.n_samples[lvl]
y = self.y[lvl]
F = self.F[lvl]
p = self.p[lvl]
q = self.q[lvl]
R = self._build_R(lvl, theta)
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
_logger.warning(('Cholesky decomposition of R at level %i failed' % lvl) +
' with theta=' + str(theta))
return rlf_value
# Get generalized least squares solution
Ft = solve_triangular(C, F, lower=True)
Yt = solve_triangular(C, y, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except TypeError: # qr() got an unexpected keyword argument 'econ'
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
# Universal Kriging
beta = solve_triangular(G, np.dot(Q.T, Yt))
err = Yt - np.dot(Ft, beta)
err2 = np.dot(err.T, err)[0, 0]
self._err = err
sigma2 = err2 / (n_samples - p - q)
detR = ((np.diag(C))**(2. / n_samples)).prod()
rlf_value = (n_samples - p - q) * np.log10(sigma2) \
+ n_samples * np.log10(detR)
self.beta_rho[lvl] = beta[:q]
self.beta_regr[lvl] = beta[q:]
self.beta[lvl] = beta
self.sigma2[lvl] = sigma2
self.C[lvl] = C
self.G[lvl] = G
return rlf_value
def _max_rlf(self, lvl, initial_range, tol):
"""
Estimate autocorrelation parameter theta as maximizer of the reduced likelihood function.
(Minimization of the negative reduced likelihood function is used for convenience.)
Parameters
----------
lvl : int
Level of fidelity
initial_range : float
Initial range of the optimizer
tol : float
Optimizer terminates when the tolerance tol is reached.
Returns
-------
array_like
The optimal hyperparameters.
double
The optimal negative reduced likelihood function value.
dict
res['theta']: optimal theta
res['rlf_value']: optimal value for likelihood
"""
# Initialize input
thetaL = self.thetaL[lvl]
thetaU = self.thetaU[lvl]
def rlf_transform(x):
return self.rlf(theta=10.**x, lvl=lvl)
# Use specified starting point as first guess
theta0 = self.theta0[lvl]
x0 = np.log10(theta0[0])
constraints = []
for i in range(theta0.size):
constraints.append({'type': 'ineq', 'fun': lambda log10t, i=i:
log10t[i] - np.log10(thetaL[0][i])})
constraints.append({'type': 'ineq', 'fun': lambda log10t, i=i:
np.log10(thetaU[0][i]) - log10t[i]})
constraints = tuple(constraints)
sol = minimize(rlf_transform, x0, method='COBYLA',
constraints=constraints,
options={'rhobeg': initial_range,
'tol': tol, 'disp': 0})
log10_optimal_x = sol['x']
optimal_rlf_value = sol['fun']
self._nfev += sol['nfev']
optimal_theta = 10. ** log10_optimal_x
res = {}
res['theta'] = optimal_theta
res['rlf_value'] = optimal_rlf_value
return res
def predict(self, X, eval_MSE=True):
"""
Perform the predictions of the kriging model on X.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : bool, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not. Default assumes evalMSE is True.
Returns
-------
array_like
An array with shape (n_eval, ) with the Best Linear Unbiased
Prediction at X. If all_levels is set to True, an array
with shape (n_eval, nlevel) giving the BLUP for all levels.
array_like, optional (if eval_MSE is True)
An array with shape (n_eval, ) with the Mean Squared Error at X.
If all_levels is set to True, an array with shape (n_eval, nlevel)
giving the MSE for all levels.
"""
X = array2d(X)
nlevel = self.nlevel
n_eval, n_features_X = X.shape
# Normalize
if self.normalize:
X = (X - self.X_mean) / self.X_std
# Calculate kriging mean and variance at level 0
mu = np.zeros((n_eval, nlevel))
f = self.regr(X)
f0 = self.regr(X)
dx = l1_cross_distances(X, Y=self.X[0])
# Get regression function and correlation
F = self.F[0]
C = self.C[0]
beta = self.beta[0]
Ft = solve_triangular(C, F, lower=True)
yt = solve_triangular(C, self.y[0], lower=True)
r_ = self.corr(self.theta[0], dx).reshape(n_eval, self.n_samples[0])
gamma = solve_triangular(C.T, yt - np.dot(Ft, beta), lower=False)
# Scaled predictor
mu[:, 0] = (np.dot(f, beta) + np.dot(r_, gamma)).ravel()
if eval_MSE:
MSE = np.zeros((n_eval, nlevel))
r_t = solve_triangular(C, r_.T, lower=True)
G = self.G[0]
u_ = solve_triangular(G.T, f.T - np.dot(Ft.T, r_t), lower=True)
MSE[:, 0] = self.sigma2[0] * \
(1 - (r_t**2).sum(axis=0) + (u_**2).sum(axis=0))
# Calculate recursively kriging mean and variance at level i
for i in range(1, nlevel):
C = self.C[i]
F = self.F[i]
g = self.rho_regr(X)
dx = l1_cross_distances(X, Y=self.X[i])
r_ = self.corr(self.theta[i], dx).reshape(
n_eval, self.n_samples[i])
f = np.vstack((g.T * mu[:, i - 1], f0.T))
Ft = solve_triangular(C, F, lower=True)
yt = solve_triangular(C, self.y[i], lower=True)
r_t = solve_triangular(C, r_.T, lower=True)
G = self.G[i]
beta = self.beta[i]
# scaled predictor
mu[:, i] = (np.dot(f.T, beta)
+ np.dot(r_t.T, yt - np.dot(Ft, beta))).ravel()
if eval_MSE:
Q_ = (np.dot((yt - np.dot(Ft, beta)).T,
yt - np.dot(Ft, beta)))[0, 0]
u_ = solve_triangular(G.T, f - np.dot(Ft.T, r_t), lower=True)
sigma2_rho = np.dot(g,
self.sigma2[
i] * linalg.inv(np.dot(G.T, G))[:self.q[i], :self.q[i]]
+ np.dot(beta[:self.q[i]], beta[:self.q[i]].T))
sigma2_rho = (sigma2_rho * g).sum(axis=1)
MSE[:, i] = sigma2_rho * MSE[:, i - 1] \
+ Q_ / (2 * (self.n_samples[i] - self.p[i] - self.q[i])) \
* (1 - (r_t**2).sum(axis=0)) \
+ self.sigma2[i] * (u_**2).sum(axis=0)
# scaled predictor
for i in range(nlevel): # Predictor
mu[:, i] = self.y_mean + self.y_std * mu[:, i]
if eval_MSE:
MSE[:, i] = self.y_std**2 * MSE[:, i]
if eval_MSE:
return mu[:, -1].reshape((n_eval, 1)), MSE[:, -1].reshape((n_eval, 1))
else:
return mu[:, -1].reshape((n_eval, 1))
def _check_list_structure(self, X, y):
"""
Transform floats and arrays in the training data lists to have a multifidelity structure.
Parameters
----------
X : list of double array_like elements
A list of arrays with the input at which observations were made, from lowest
fidelity to highest fidelity. Designs must be nested
with X[i] = np.vstack([..., X[i+1])
y : list of double array_like elements
A list of arrays with the observations of the scalar output to be predicted,
from lowest fidelity to highest fidelity.
"""
if type(X) is not list:
nlevel = 1
X = [X]
else:
nlevel = len(X)
if type(y) is not list:
y = [y]
if len(X) != len(y):
raise ValueError("X and y must have the same length.")
n_samples = np.zeros(nlevel, dtype=int)
n_features = np.zeros(nlevel, dtype=int)
n_samples_y = np.zeros(nlevel, dtype=int)
for i in range(nlevel):
n_samples[i], n_features[i] = X[i].shape
if i > 0 and n_features[i] != n_features[i - 1]:
raise ValueError("All X must have the same number of columns.")
y[i] = np.asarray(y[i]).ravel()[:, np.newaxis]
n_samples_y[i] = y[i].shape[0]
if n_samples[i] != n_samples_y[i]:
raise ValueError("X and y must have the same number of rows.")
self.n_features = n_features[0]
if type(self.theta) is not list:
self.theta = nlevel * [self.theta]
elif len(self.theta) != nlevel:
raise ValueError(f"theta must be a list of {nlevel} element(s).")
if type(self.theta0) is not list:
self.theta0 = nlevel * [self.theta0]
elif len(self.theta0) != nlevel:
raise ValueError(f"theta0 must be a list of {nlevel} element(s).")
if type(self.thetaL) is not list:
self.thetaL = nlevel * [self.thetaL]
elif len(self.thetaL) != nlevel:
raise ValueError(f"thetaL must be a list of {nlevel} element(s).")
if type(self.thetaU) is not list:
self.thetaU = nlevel * [self.thetaU]
elif len(self.thetaU) != nlevel:
raise ValueError(f"thetaU must be a list of {nlevel} element(s).")
self.nlevel = nlevel
self.X = X[:]
self.y = y[:]
self.n_samples = n_samples
return
def _check_params(self):
"""
Perform sanity checks on all parameters.
"""
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError(f"regr should be one of {self._regression_types.keys()} or "
f"callable, {self.regr} was given.")
# Check rho regression model
if not callable(self.rho_regr):
if self.rho_regr in self._regression_types:
self.rho_regr = self._regression_types[self.rho_regr]
else:
raise ValueError(f"regr should be one of {self._regression_types.keys()} or "
f"callable, {self.rho_regr} was given.")
for i in range(self.nlevel):
# Check correlation parameters
if self.theta[i] is not None:
self.theta[i] = array2d(self.theta[i])
if np.any(self.theta[i] <= 0):
raise ValueError("theta must be strictly positive.")
if self.theta0[i] is not None:
self.theta0[i] = array2d(self.theta0[i])
if np.any(self.theta0[i] <= 0):
raise ValueError("theta0 must be strictly positive.")
else:
self.theta0[i] =
|
array2d(self.n_features * [THETA0_DEFAULT])
|
numpy.atleast_2d
|
import matplotlib.pyplot as plt
import os
import numpy as np
from imantics import Polygons, Mask
import json
from tqdm import tqdm
small_data_num = 500
def get_file_names(folder, SMALL=False):
all_file_names = os.listdir(folder)
file_names = []
for file_name in all_file_names:
if file_name.endswith('jpg'):
file_names.append(file_name[:-4])
if SMALL:
if not small_data_num < len(file_names):
print('small_data_num is more than true data number')
pass
file_names = file_names[:small_data_num]
return file_names
def get_img_mask_path(folder, file_names):
img_pathes = []
mask_pathes = []
for file_name in file_names:
img_path = os.path.join(folder, file_name+'.jpg')
img_pathes.append(img_path)
mask_path = os.path.join(folder, file_name+'.png')
mask_pathes.append(mask_path)
return img_pathes, mask_pathes
def get_img_shape(img_path):
pic = plt.imread(img_path)
h, w, c = pic.shape
return h, w
def get_mask_cate(mask_path):
mask = plt.imread(mask_path)*255
mask = mask.astype(np.uint8)
cat_value = []
for i in range(256):
table = [mask==i]
if not np.sum(table) == 0:
cat_value.append(i)
if len(cat_value) > 9:
print(cat_value)
#从png的mask里提取mask per object,
#input: mask(0-1)
#output:masks: list, each of them is numpy array(bool)
def extract_mask(mask):
mask = mask.astype(np.uint8)
masks = []
masks_cat = []
for i in range(1, 256):
sub_mask = (mask==i)
if np.sum(sub_mask) !=0:
masks.append(sub_mask)
masks_cat.append(i)
return masks, masks_cat
#extract bbox from mask of one object
def bbox(mask):
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
width = cmax - cmin + 1
height = rmax - rmin + 1
maskInt = mask.astype(int)
area = np.sum(maskInt)
return area, [int(cmin), int(rmin), int(width), int(height)]
def mask_to_polygons(mask):
polygons = Mask(mask).polygons().points
# filter out invalid polygons (< 3 points)
polygons_filtered = []
for polygon in polygons:
polygon = polygon.reshape(-1)
polygon = polygon.tolist()
if len(polygon) % 2 == 0 and len(polygon) >= 6:
polygons_filtered.append(polygon)
return polygons_filtered
def devide_set(file_names, mode, val_split=0.2, seed=1234):
file_names =
|
np.array(file_names)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
CIE Chromaticity Diagrams Plotting
==================================
Defines the *CIE* chromaticity diagrams plotting objects:
- :func:`colour.plotting.plot_chromaticity_diagram_CIE1931`
- :func:`colour.plotting.plot_chromaticity_diagram_CIE1960UCS`
- :func:`colour.plotting.plot_chromaticity_diagram_CIE1976UCS`
- :func:`colour.plotting.plot_sds_in_chromaticity_diagram_CIE1931`
- :func:`colour.plotting.plot_sds_in_chromaticity_diagram_CIE1960UCS`
- :func:`colour.plotting.plot_sds_in_chromaticity_diagram_CIE1976UCS`
"""
from __future__ import division
import bisect
import numpy as np
from matplotlib.collections import LineCollection
from matplotlib.patches import Polygon
from colour.algebra import normalise_vector
from colour.colorimetry import sd_to_XYZ, sds_and_msds_to_sds
from colour.models import (Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy,
XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ)
from colour.plotting import (CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE,
XYZ_to_plotting_colourspace, artist, filter_cmfs,
override_style, render)
from colour.utilities import (domain_range_scale, first_item, is_string,
normalise_maximum, tstack, suppress_warnings)
from colour.utilities.deprecation import handle_arguments_deprecation
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'plot_spectral_locus', 'plot_chromaticity_diagram_colours',
'plot_chromaticity_diagram', 'plot_chromaticity_diagram_CIE1931',
'plot_chromaticity_diagram_CIE1960UCS',
'plot_chromaticity_diagram_CIE1976UCS', 'plot_sds_in_chromaticity_diagram',
'plot_sds_in_chromaticity_diagram_CIE1931',
'plot_sds_in_chromaticity_diagram_CIE1960UCS',
'plot_sds_in_chromaticity_diagram_CIE1976UCS'
]
@override_style()
def plot_spectral_locus(cmfs='CIE 1931 2 Degree Standard Observer',
spectral_locus_colours=None,
spectral_locus_labels=None,
method='CIE 1931',
**kwargs):
"""
Plots the *Spectral Locus* according to given method.
Parameters
----------
cmfs : unicode, optional
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
spectral_locus_colours : array_like or unicode, optional
*Spectral Locus* colours, if ``spectral_locus_colours`` is set to
*RGB*, the colours will be computed according to the corresponding
chromaticity coordinates.
spectral_locus_labels : array_like, optional
Array of wavelength labels used to customise which labels will be drawn
around the spectral locus. Passing an empty array will result in no
wavelength labels being drawn.
method : unicode, optional
**{'CIE 1931', 'CIE 1960 UCS', 'CIE 1976 UCS'}**,
*Chromaticity Diagram* method.
Other Parameters
----------------
\\**kwargs : dict, optional
{:func:`colour.plotting.artist`, :func:`colour.plotting.render`},
Please refer to the documentation of the previously listed definitions.
Returns
-------
tuple
Current figure and axes.
Examples
--------
>>> plot_spectral_locus(spectral_locus_colours='RGB') # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Spectral_Locus.png
:align: center
:alt: plot_spectral_locus
"""
if spectral_locus_colours is None:
spectral_locus_colours = CONSTANTS_COLOUR_STYLE.colour.dark
settings = {'uniform': True}
settings.update(kwargs)
_figure, axes = artist(**settings)
method = method.upper()
cmfs = first_item(filter_cmfs(cmfs).values())
illuminant = CONSTANTS_COLOUR_STYLE.colour.colourspace.whitepoint
wavelengths = cmfs.wavelengths
equal_energy = np.array([1 / 3] * 2)
if method == 'CIE 1931':
ij = XYZ_to_xy(cmfs.values, illuminant)
labels = ((390, 460, 470, 480, 490, 500, 510, 520, 540, 560, 580, 600,
620, 700)
if spectral_locus_labels is None else spectral_locus_labels)
elif method == 'CIE 1960 UCS':
ij = UCS_to_uv(XYZ_to_UCS(cmfs.values))
labels = ((420, 440, 450, 460, 470, 480, 490, 500, 510, 520, 530, 540,
550, 560, 570, 580, 590, 600, 610, 620, 630, 645, 680)
if spectral_locus_labels is None else spectral_locus_labels)
elif method == 'CIE 1976 UCS':
ij = Luv_to_uv(XYZ_to_Luv(cmfs.values, illuminant), illuminant)
labels = ((420, 440, 450, 460, 470, 480, 490, 500, 510, 520, 530, 540,
550, 560, 570, 580, 590, 600, 610, 620, 630, 645, 680)
if spectral_locus_labels is None else spectral_locus_labels)
else:
raise ValueError(
'Invalid method: "{0}", must be one of '
'[\'CIE 1931\', \'CIE 1960 UCS\', \'CIE 1976 UCS\']'.format(
method))
pl_ij = tstack([
|
np.linspace(ij[0][0], ij[-1][0], 20)
|
numpy.linspace
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import paddle
import paddle.fluid as fluid
import numpy as np
from scipy import stats
from scipy.special import logsumexp
import unittest
from tests.distributions import utils
from zhusuan.distributions.normal import *
device = paddle.set_device('gpu')
paddle.disable_static(device)
# TODO: test sample value
class TestNormal(unittest.TestCase):
def setUp(self):
self._Normal_std = lambda mean, std, **kwargs: Normal(
mean=mean, std=std, **kwargs)
self._Normal_logstd = lambda mean, logstd, **kwargs: Normal(
mean=mean, logstd=logstd, **kwargs)
def test_init(self):
# with self.assertRaisesRegexp(
# ValueError, "Please use named arguments"):
# Normal(paddle.ones(1), paddle.ones(1))
# with self.assertRaisesRegexp(
# ValueError, "Either.*should be passed"):
# Normal(mean=paddle.ones([2, 1]))
try:
Normal(mean=paddle.ones([2, 1]),
std=paddle.zeros([2, 4, 3]), logstd=paddle.zeros([2, 2, 3]))
except:
raise ValueError("Either.*should be passed")
try:
Normal(mean=paddle.ones([2, 1]), logstd=paddle.zeros([2, 4, 3]))
except:
raise ValueError("should be broadcastable to match")
try:
Normal(mean=paddle.ones([2, 1]), std=paddle.ones([2, 4, 3]))
except:
raise ValueError("should be broadcastable to match")
Normal(mean=paddle.ones([32, 1], dtype='float32'),
logstd=paddle.ones([32, 1, 3], dtype='float32'))
Normal(mean=paddle.ones([32, 1], dtype='float32'),
std=paddle.ones([32, 1, 3], 'float32') )
## TODO: Define the value shape and batch shape in Normal module
# def test_value_shape(self):
#
# # get value shape
# norm = Normal(mean=paddle.cast(paddle.to_tensor([]), 'float32'),
# logstd=paddle.cast(paddle.to_tensor([]), 'float32'))
# self.assertEqual(norm.get_value_shape(), [])
# norm = Normal(mean=paddle.cast(paddle.to_tensor([]), 'float32'),
# std=paddle.cast(paddle.to_tensor([]), 'float32'))
# self.assertEqual(norm.get_value_shape(), [])
#
# # dynamic
# self.assertTrue(norm._value_shape().dtype is 'int32')
# self.assertEqual(norm._value_shape(), [])
#
# self.assertEqual(norm._value_shape().dtype, 'int32')
# def test_batch_shape(self):
# utils.test_batch_shape_2parameter_univariate(
# self, self._Normal_std, np.zeros, np.ones)
# utils.test_batch_shape_2parameter_univariate(
# self, self._Normal_logstd, np.zeros, np.zeros)
def test_sample_shape(self):
utils.test_2parameter_sample_shape_same(
self, self._Normal_std, np.zeros, np.ones)
utils.test_2parameter_sample_shape_same(
self, self._Normal_logstd, np.zeros, np.zeros)
def test_sample_reparameterized(self):
mean = paddle.ones([2, 3])
logstd = paddle.ones([2, 3])
mean.stop_gradient = False
logstd.stop_gradient = False
norm_rep = Normal(mean=mean, logstd=logstd)
samples = norm_rep.sample()
mean_grads, logstd_grads = paddle.grad(outputs=[samples], inputs=[mean, logstd],
allow_unused=True)
self.assertTrue(mean_grads is not None)
self.assertTrue(logstd_grads is not None)
norm_no_rep = Normal(mean=mean, logstd=logstd, is_reparameterized=False)
samples = norm_no_rep.sample()
mean_grads, logstd_grads = paddle.grad(outputs=[samples],
inputs=[mean, logstd],
allow_unused=True)
self.assertEqual(mean_grads, None)
self.assertEqual(logstd_grads, None)
def test_path_derivative(self):
mean = paddle.ones([2, 3])
logstd = paddle.ones([2, 3])
mean.stop_gradient = False
logstd.stop_gradient = False
n_samples = 7
norm_rep = Normal(mean=mean, logstd=logstd, use_path_derivative=True)
samples = norm_rep.sample(n_samples)
log_prob = norm_rep.log_prob(samples)
mean_path_grads, logstd_path_grads = paddle.grad(outputs=[log_prob],
inputs=[mean, logstd],
allow_unused=True, retain_graph=True)
sample_grads = paddle.grad(outputs=[log_prob],inputs=[samples],
allow_unused=True, retain_graph=True)
mean_true_grads = paddle.grad(outputs=[samples],inputs=[mean],
grad_outputs=sample_grads,
allow_unused=True, retain_graph=True)[0]
logstd_true_grads = paddle.grad(outputs=[samples],inputs=[logstd],
grad_outputs=sample_grads,
allow_unused=True, retain_graph=True)[0]
# TODO: Figure out why path gradients unmatched with true gradients
# np.testing.assert_allclose(mean_path_grads.numpy(), mean_true_grads.numpy() )
# np.testing.assert_allclose(logstd_path_grads.numpy(), logstd_true_grads.numpy())
norm_no_rep = Normal(mean=mean, logstd=logstd, is_reparameterized=False,
use_path_derivative=True)
samples = norm_no_rep.sample(n_samples)
log_prob = norm_no_rep.log_prob(samples)
mean_path_grads, logstd_path_grads = paddle.grad(outputs=[log_prob],
inputs=[mean, logstd],
allow_unused=True)
self.assertTrue(mean_path_grads is None)
self.assertTrue(mean_path_grads is None)
def test_log_prob_shape(self):
utils.test_2parameter_log_prob_shape_same(
self, self._Normal_std, np.zeros, np.ones, np.zeros)
utils.test_2parameter_log_prob_shape_same(
self, self._Normal_logstd, np.zeros, np.zeros, np.zeros)
def test_value(self):
def _test_value(given, mean, logstd):
mean = np.array(mean, np.float32)
given = np.array(given, np.float32)
logstd = np.array(logstd, np.float32)
std = np.exp(logstd)
target_log_p = np.array(stats.norm.logpdf(given, mean, np.exp(logstd)), np.float32)
target_p = np.array(stats.norm.pdf(given, mean,
|
np.exp(logstd)
|
numpy.exp
|
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiXy Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example that tests the fit routine.
Synthetic behavioral data is generated using a known model. A new model
is fit to this data, blind to the true model parameters. The routine is
evaluated based on its ability to recover the true model parameters.
"""
import os
from pathlib import Path
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import psixy.catalog
import psixy.task
import psixy.models
import psixy.sequence
def main():
"""Execute script."""
# Settings.
n_sequence = 20
n_epoch = 50
task_idx = 2
task_list, feature_matrix = psixy.task.shepard_hovland_jenkins_1961()
task = task_list[2]
encoder = psixy.models.Deterministic(task.stimulus_id, feature_matrix)
class_id_list = np.array([0, 1], dtype=int)
# Generate some sequences.
s_seq_list = generate_stimulus_sequences(
task.class_id, n_sequence, n_epoch
)
# Define known model.
model_true = psixy.models.ALCOVE(feature_matrix, class_id_list, encoder)
model_true.params['rho'] = 1.0
model_true.params['tau'] = 1.0
model_true.params['beta'] = 6.5
model_true.params['phi'] = 2.0
model_true.params['lambda_w'] = 0.03
model_true.params['lambda_a'] = 0.0033
# Simulate behavioral data with known model.
prob_resp_attn = model_true.predict(s_seq_list, mode='all')
# n_seq, n_trial, n_output
n_trial = prob_resp_attn.shape[1]
sampled_class = np.zeros([n_sequence, n_trial], dtype=int)
for i_seq in range(n_sequence):
for i_trial in range(n_trial):
sampled_class[i_seq, i_trial] = np.random.choice(
class_id_list, p=prob_resp_attn[i_seq, i_trial, :]
)
response_time_ms = 1000 * np.ones(sampled_class.shape)
b_seq_list = psixy.sequence.AFCSequence(sampled_class, response_time_ms)
# Fit a new model.
model_inf = psixy.models.ALCOVE(feature_matrix, class_id_list, encoder)
model_inf.params['rho'] = 1.0
model_inf.params['tau'] = 1.0
model_inf.params['beta'] = 6.5
model_inf.params['phi'] = 2.0
model_inf.params['lambda_w'] = 0.03
model_inf.params['lambda_a'] = 0.0033
loss_train = model_inf.fit(s_seq_list, b_seq_list, verbose=1)
def generate_stimulus_sequences(class_id_in, n_sequence, n_epoch):
"""Generate stimulus sequences."""
np.random.seed(seed=245)
n_stimuli = len(class_id_in)
cat_idx = np.arange(n_stimuli, dtype=int)
cat_idx_all = np.zeros([n_sequence, n_epoch * n_stimuli], dtype=int)
for i_seq in range(n_sequence):
curr_cat_idx = np.array([], dtype=int)
for i_epoch in range(n_epoch):
curr_cat_idx = np.hstack(
[curr_cat_idx,
|
np.random.permutation(cat_idx)
|
numpy.random.permutation
|
#!/usr/bin/env python3
#Load generic Python Modules
import argparse #parse arguments
import os #access operating systems function
import subprocess #run command
import sys #system command
#==============
from amesgcm.Script_utils import check_file_tape,prYellow,prRed,prCyan,prGreen,prPurple
from amesgcm.Script_utils import print_fileContent,print_varContent,FV3_file_type,find_tod_in_diurn
from amesgcm.Script_utils import wbr_cmap,rjw_cmap,dkass_temp_cmap,dkass_dust_cmap
from amesgcm.FV3_utils import lon360_to_180,lon180_to_360,UT_LTtxt,area_weights_deg
from amesgcm.FV3_utils import add_cyclic,azimuth2cart,mollweide2cart,robin2cart,ortho2cart
#=====Attempt to import specific scientic modules one may not find in the default python on NAS ====
try:
import matplotlib
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend.
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import (LogFormatter, NullFormatter, LogFormatterSciNotation, MultipleLocator) #format ticks
from netCDF4 import Dataset, MFDataset
from numpy import sqrt, exp, max, mean, min, log, log10,sin,cos,abs
from matplotlib.colors import LogNorm
from matplotlib.ticker import LogFormatter
except ImportError as error_msg:
prYellow("Error while importing modules")
prYellow('Your are using python '+str(sys.version_info[0:3]))
prYellow('Please, source your virtual environment');prCyan(' source envPython3.7/bin/activate.csh \n')
print("Error was: ", error_msg)
exit()
except Exception as exception:
# Output unexpected Exceptions.
print(exception.__class__.__name__ + ": ", exception)
exit()
degr = u"\N{DEGREE SIGN}"
#======================================================
# ARGUMENTS PARSER
#======================================================
global current_version;current_version=3.2
parser = argparse.ArgumentParser(description="""\033[93mAnalysis Toolkit for the Ames GCM, V%s\033[00m """%(current_version),
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('custom_file', nargs='?',type=argparse.FileType('r'),default=None, #sys.stdin
help='Use optional input file Custom.in to plot the graphs \n'
'> Usage: MarsPlot Custom.in [other options]\n'
'UPDATE as needed with \033[96mpip3 install git+https://github.com/alex-kling/amesgcm.git --upgrade\033[00m \n'
'Tutorial at: \033[93mhttps://github.com/alex-kling/amesgcm\033[00m')
parser.add_argument('-i', '--inspect_file', default=None,
help="""Inspect Netcdf file content. Variables are sorted by dimensions \n"""
"""> Usage: MarsPlot -i 00000.atmos_daily.nc\n"""
"""Options: use --dump (variable content) and --stat (min, mean,max) jointly with --inspect \n"""
"""> MarsPlot -i 00000.atmos_daily.nc -dump pfull 'temp[6,:,30,10]' (quotes '' are needed when browsing dimensions)\n"""
"""> MarsPlot -i 00000.atmos_daily.nc -stat 'ucomp[5,:,:,:]' 'vcomp[5,:,:,:]'\n""")
#These two options are to be used jointly with --inspect
parser.add_argument('--dump','-dump', nargs='+',default=None,
help=argparse.SUPPRESS)
parser.add_argument('--stat','-stat', nargs='+',default=None,
help=argparse.SUPPRESS)
help=argparse.SUPPRESS
parser.add_argument('-d','--date', nargs='+',default=None,
help='Specify the range of files to use, default is the last file \n'
'> Usage: MarsPlot Custom.in -d 700 (one file) \n'
' MarsPlot Custom.in -d 350 700 (start file end file)')
parser.add_argument('--template','-template', action='store_true',
help="""Generate a template Custom.in for customization of the plots.\n """
"""(Use '--temp' for a skinned version of the template)\n""")
parser.add_argument('-temp','--temp', action='store_true',help=argparse.SUPPRESS) #same as --template but without the instructions
parser.add_argument('-do','--do', nargs=1,type=str,default=None, #sys.stdin
help='(Re)-use a template file my_custom.in. First search in ~/amesGCM3/mars_templates/,\n'
' then in /u/mkahre/MCMC/analysis/working/shared_templates/ \n'
'> Usage: MarsPlot -do my_custom [other options]')
parser.add_argument('-sy', '--stack_year', action='store_true',default=False,
help='Stack consecutive years in 1D time series instead of having them back to back\n'
'> Usage: MarsPlot Custom.in -sy \n')
parser.add_argument("-o", "--output",default="pdf",
choices=['pdf','eps','png'],
help='Output file format\n'
'Default is pdf if ghostscript (gs) is available and png otherwise\n'
'> Usage: MarsPlot Custom.in -o png \n'
' : MarsPlot Custom.in -o png -pw 500 (set pixel width to 500, default is 2000)\n')
parser.add_argument('-vert', '--vertical', action='store_true',default=False,
help='Output figures as vertical pages instead of horizonal \n')
parser.add_argument("-pw", "--pwidth",default=2000,type=float,
help=argparse.SUPPRESS)
parser.add_argument('-dir', '--directory', default=os.getcwd(),
help='Target directory if input files are not present in current directory \n'
'> Usage: MarsPlot Custom.in [other options] -dir /u/akling/FV3/verona/c192L28_dliftA/history')
parser.add_argument('--debug', action='store_true', help='Debug flag: do not by-pass errors on a particular figure')
#======================================================
# MAIN PROGRAM
#======================================================
def main():
global output_path ; output_path = os.getcwd()
global input_paths ; input_paths=[];input_paths.append(parser.parse_args().directory)
global out_format ; out_format=parser.parse_args().output
global debug ;debug =parser.parse_args().debug
global Ncdf_num #host the simulation timestamps
global objectList #contains all figure object
global customFileIN #template name
global levels;levels=21 #number of contour for 2D plots
global my_dpi;my_dpi=96. #pixel per inch for figure output
global label_size;label_size=18 #Label size for title, xlabel, ylabel
global title_size;title_size=24 #Label size for title, xlabel, ylabel
global label_factor;label_factor=3/10# reduce the font size as the number of pannel increases size
global tick_factor;tick_factor=1/2
global title_factor;title_factor=10/12
global width_inch; #pixel width for saving figure
global height_inch; #pixel width for saving figure
global vertical_page;vertical_page=parser.parse_args().vertical #vertical pages instead of horizonal for saving figure
global shared_dir; shared_dir='/u/mkahre/MCMC/analysis/working/shared_templates' #directory containing shared templates
#Set Figure dimensions
pixel_width=parser.parse_args().pwidth
if vertical_page:
width_inch=pixel_width/1.4/my_dpi;height_inch=pixel_width/my_dpi
else:
width_inch=pixel_width/my_dpi;height_inch=pixel_width/1.4/my_dpi
objectList=[Fig_2D_lon_lat('fixed.zsurf',True),\
Fig_2D_lat_lev('atmos_average.ucomp',True),\
Fig_2D_time_lat('atmos_average.taudust_IR',False),\
Fig_2D_lon_lev('atmos_average_pstd.temp',False),\
Fig_2D_time_lev('atmos_average_pstd.temp',False),\
Fig_2D_lon_time('atmos_average.temp',False),\
Fig_1D('atmos_average.temp',False)]
#=============================
#----------Group together the 1st two figures----
objectList[0].subID=1;objectList[0].nPan=2 #1st object of a 2 panel figure
objectList[1].subID=2;objectList[1].nPan=2 #2nd object of a 2 panel figure
# Begin main loop:
# ----- Option 1 :Inspect content of a Netcdf file ----
if parser.parse_args().inspect_file:
check_file_tape(parser.parse_args().inspect_file,abort=False) #NAS-specific, check if the file is on tape
if parser.parse_args().dump:
#Dumping variable content
print_varContent(parser.parse_args().inspect_file,parser.parse_args().dump,False)
elif parser.parse_args().stat:
#Printing variable stats
print_varContent(parser.parse_args().inspect_file,parser.parse_args().stat,True)
else:
# Show information on all the variables
print_fileContent(parser.parse_args().inspect_file)
# ----- Option 2: Generate a template file ----
elif parser.parse_args().template or parser.parse_args().temp:
make_template()
# --- Gather simulation information from template or inline argument
else:
# --- Option 2, case A: Use Custom.in for everything ----
if parser.parse_args().custom_file:
print('Reading '+parser.parse_args().custom_file.name)
namelist_parser(parser.parse_args().custom_file.name)
# --- Option 2, case B: Use Custom.in in ~/FV3/templates for everything ----
if parser.parse_args().do:
print('Reading '+path_to_template(parser.parse_args().do))
namelist_parser(path_to_template(parser.parse_args().do))
# Set bounds (e.g. starting file, end file)
if parser.parse_args().date: #a date single date or a range is provided
# first check if the value provided is the right type
try:
bound=np.asarray(parser.parse_args().date).astype(float)
except Exception as e:
prRed('*** Syntax Error***')
prRed("""Please use: 'MarsPlot Custom.in -d XXXX [YYYY] -o out' """)
exit()
else: # no date is provided, default is last file XXXXX.fixed.nc in directory
bound=get_Ncdf_num()
#If one or multiple XXXXX.fixed.nc files are found, use the last one
if bound is not None :bound=bound[-1]
#-----
#Initialization
Ncdf_num=get_Ncdf_num() #Get all timestamps in directory
if Ncdf_num is not None:
Ncdf_num=select_range(Ncdf_num,bound) # Apply bounds to the desired dates
nfiles=len(Ncdf_num) #number of timestamps
else: #No XXXXX.fixed.nc, in the directory. It is assumed we will be looking at one single file
nfiles=1
#print('MarsPlot is running...')
#Make a ./plots folder in the current directory if it does not exist
dir_plot_present=os.path.exists(output_path+'/'+'plots')
if not dir_plot_present:
os.makedirs(output_path+'/'+'plots')
fig_list=list()#list of figures
#============Do plots==================
global i_list;
for i_list in range(0,len(objectList)):
status=objectList[i_list].plot_type+' :'+objectList[i_list].varfull
progress(i_list,len(objectList),status,None) #display the figure in progress
objectList[i_list].do_plot()
if objectList[i_list].success and out_format=='pdf' and not debug : sys.stdout.write("\033[F");sys.stdout.write("\033[K") #if success,flush the previous output
status=objectList[i_list].plot_type+' :'+objectList[i_list].varfull+objectList[i_list].fdim_txt
progress(i_list,len(objectList),status,objectList[i_list].success)
# Add the figure to the list of figures
if objectList[i_list].subID==objectList[i_list].nPan: #only for the last panel of a subplot
if i_list< len(objectList)-1 and not objectList[i_list+1].addLine:
fig_list.append(objectList[i_list].fig_name)
#Last subplot
if i_list== len(objectList)-1 :fig_list.append(objectList[i_list].fig_name)
progress(100,100,'Done')# 100% completed
#========Making multipage pdf=============
if out_format=="pdf" and len(fig_list)>0:
print('Merging figures...')
#print("Plotting figures:",fig_list)
debug_filename=output_path+'/.debug_MCMC_plots.txt' #debug file (masked), use to redirect the outputs from ghost script
fdump = open(debug_filename, 'w') #
#Construct list of figures----
all_fig=' '
for figID in fig_list:
#Add outer quotes(" ") to deal with white space in Windows, e.g. '"/Users/my folder/Diagnostics.pdf"'
figID='"'+figID+'"'
all_fig+=figID+' '
#Output name for the pdf
try:
if parser.parse_args().do:
basename=parser.parse_args().do[0]
else:
input_file=output_path+'/'+parser.parse_args().custom_file.name
basename=input_file.split('/')[-1].split('.')[0].strip() #get the input file name, e.g "Custom_01" or
except: #Special case where no Custom.in is provided
basename='Custom'
#default name is Custom.in, output Diagnostics.pdf
if basename=='Custom':
output_pdf=fig_name=output_path+'/'+'Diagnostics.pdf'
#default name is Custom_XX.in, output Diagnostics_XX.pdf
elif basename[0:7]=="Custom_":
output_pdf=fig_name=output_path+'/Diagnostics_'+basename[7:9]+'.pdf' #same name as input file
#name is different use it
else:
output_pdf=fig_name=output_path+'/'+basename+'.pdf' #same name as input file
#Also add outer quote to the output pdf
output_pdf='"'+output_pdf+'"'
#command to make a multipage pdf out of the the individual figures using ghost scritp.
# Also remove the temporary files when done
cmd_txt='gs -sDEVICE=pdfwrite -dNOPAUSE -dBATCH -dSAFER -dEPSCrop -sOutputFile='+output_pdf+' '+all_fig
try:
#Test the ghost script and remove command, exit otherwise--
subprocess.check_call(cmd_txt,shell=True, stdout=fdump, stderr=fdump)
#Execute the commands now
subprocess.call(cmd_txt,shell=True, stdout=fdump, stderr=fdump) #run ghostscript to merge the pdf
cmd_txt='rm -f '+all_fig
subprocess.call(cmd_txt,shell=True, stdout=fdump, stderr=fdump)#remove temporary pdf figs
cmd_txt='rm -f '+'"'+debug_filename+'"'
subprocess.call(cmd_txt,shell=True)#remove debug file
#If the plot directory was not present initially, remove it
if not dir_plot_present:
cmd_txt='rm -r '+'"'+output_path+'"'+'/plots'
subprocess.call(cmd_txt,shell=True)
give_permission(output_pdf)
print(output_pdf + ' was generated')
except subprocess.CalledProcessError:
print("ERROR with ghostscript when merging pdf, please try alternative formats")
if debug:raise
#======================================================
# DATA OPERATION UTILITIES
#======================================================
def shift_data(lon,data):
'''
This function shift the longitude and data from a 0->360 to a -180/+180 grid.
Args:
lon: 1D array of longitude 0->360
data: 2D array with last dimension being the longitude
Returns:
lon: 1D array of longitude -180/+180
data: shifted data
Note: Use np.ma.hstack instead of np.hstack to keep the masked array properties
'''
lon_180=lon.copy()
nlon=len(lon_180)
# for 1D plots: if 1D, reshape array
if len(data.shape) <=1:
data=data.reshape(1,nlon)
#===
lon_180[lon_180>180]-=360.
data=np.hstack((data[:,lon_180<0],data[:,lon_180>=0]))
lon_180=np.append(lon_180[lon_180<0],lon_180[lon_180>=0])
# If 1D plot, squeeze array
if data.shape[0]==1:
data=np.squeeze(data)
return lon_180,data
def MY_func(Ls_cont):
'''
This function return the Mars Year
Args:
Ls_cont: solar longitude, contineuous
Returns:
MY : int the Mars year
'''
return (Ls_cont)//(360.)+1
def get_lon_index(lon_query_180,lons):
'''
Given a range of requested longitudes, return the indexes to extract data from the netcdf file
Args:
lon_query_180: requested longitudes in -180/+180 units: value, [min, max] or None
lons: 1D array of longitude in the native coordinates (0->360)
Returns:
loni: 1D array of file indexes
txt_lon: text descriptor for the extracted longitudes
*** Note that the keyword 'all' is passed as -99999 by the rT() functions
'''
Nlon=len(lons)
lon_query_180=np.array(lon_query_180)
#If None, set to default, i.e 'all' for a zonal average
if lon_query_180.any()==None: lon_query_180=np.array(-99999)
#=============FV3 format ==============
# lons are 0>360, convert to -180>+180
#======================================
if lons.max()>180:
#one longitude is provided
if lon_query_180.size==1:
#request zonal average
if lon_query_180==-99999:
loni=
|
np.arange(0,Nlon)
|
numpy.arange
|
"""
WS-DeepNet - A Multilayer Perceptron in Python
==============================================
Hi! This is an Multilayer Perceptron implementation in Python,
using Numpy as helper for some math operations.
"""
import numpy as np
class NeuralNetwork(object):
"""
Usage
-----
Import this class and use its constructor to inform:
- How many input nodes this net should have (int)
- How many hidden layers you want (int)
- How many output nodes you need (int)
- What is the desired learning rate (float)
Example:
```
from my_answers import NeuralNetwork
nn = NeuralNetwork(3, 2, 1, 0.98)
```
This will create a neural network object into the nn
variable with 3 input layers, 2 hidden layers and
1 output, using 0.98 as learning rate.
"""
def __init__(self, input_nodes, hidden_nodes,
output_nodes, learning_rate):
"""
The default behavior is to use:
- 32 hidden nodes
- 1 output nodes
- learning_rate = 0.98
- 3000 iterations
Set your values during the network creation, like:
`nn = NeuralNetwork(3, 2, 1, 0.98)`
"""
# Hyperparameters from default or from constructor
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize the weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
# The default activation function is a sigmoid(x)
self.activation_function = lambda x: 1 / (1 + np.exp(-x))
def train(self, features, targets):
"""
Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
"""
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
final_outputs, hidden_outputs = self.forward_pass_train(X)
delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y,
delta_weights_i_h, delta_weights_h_o)
self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)
def forward_pass_train(self, X):
"""
This is the feedforward step used during
the training process.
Arguments
---------
X: features batch
"""
# This is where the inputs feed the hidden layer through the first
# layer of weights.
hidden_inputs = np.dot(X, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
# This is the final flow. We are going to use a linear output
# here instead of the sigmoid function.
# The linear output is represented by f(x) = x.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)
final_outputs = final_inputs
# Return final_outputs and hidden_outputs, to make it easier to train
# the network duting the backpropagation steps.
return final_outputs, hidden_outputs
def backpropagation(self, final_outputs, hidden_outputs,
X, y, delta_weights_i_h, delta_weights_h_o):
"""
Implement backpropagation
Arguments
---------
final_outputs: output from forward pass
y: target (i.e. label) batch
delta_weights_i_h: change in weights from input to hidden layers
delta_weights_h_o: change in weights from hidden to output layers
"""
# The error of the output layer.
# Its formula is E = y - ŷ, where y is the label and ŷ is the
# output given after the feedforward step.
error = y - final_outputs
# Since we are using a linear function, the output error term is:
output_error_term = error
# The hidden layer's contribution to the error
# and its gradient.
hidden_error =
|
np.dot(self.weights_hidden_to_output, output_error_term)
|
numpy.dot
|
import unittest
import numpy as np
import pandas as pd
from numpy import testing as nptest
from operational_analysis.toolkits import power_curve
from operational_analysis.toolkits.power_curve.parametric_forms import *
noise = 0.1
class TestPowerCurveFunctions(unittest.TestCase):
def setUp(self):
np.random.seed(42)
params = [1300, -7, 11, 2, 0.5]
self.x = pd.Series(np.random.random(100) * 30)
self.y = pd.Series(logistic5param(self.x, *params) + np.random.random(100) * noise)
def test_IEC(self):
# Create test data using logistic5param form
curve = power_curve.IEC(self.x, self.y)
y_pred = curve(self.x)
# Does the IEC power curve match the test data?
nptest.assert_allclose(self.y, y_pred, rtol=1, atol=noise * 2, err_msg="Power curve did not properly fit.")
def test_logistic_5_param(self):
# Create test data using logistic5param form
curve = power_curve.logistic_5_parametric(self.x, self.y)
y_pred = curve(self.x)
# Does the logistic-5 power curve match the test data?
nptest.assert_allclose(self.y, y_pred, rtol=1, atol=noise * 2, err_msg="Power curve did not properly fit.")
def test_gam(self):
# Create test data using logistic5param form
curve = power_curve.gam(windspeed_column = self.x, power_column = self.y, n_splines = 20)
y_pred = curve(self.x)
# Does the spline-fit power curve match the test data?
nptest.assert_allclose(self.y, y_pred, rtol=0.05, atol = 20, err_msg="Power curve did not properly fit.")
def test_3paramgam(self):
# Create test data using logistic5param form
winddir = np.random.random(100)
airdens = np.random.random(100)
curve = power_curve.gam_3param(windspeed_column = self.x, winddir_column=winddir, airdens_column=airdens, power_column = self.y, n_splines = 20)
y_pred = curve(self.x, winddir, airdens)
# Does the spline-fit power curve match the test data?
nptest.assert_allclose(self.y, y_pred, rtol=0.05, atol = 20, err_msg="Power curve did not properly fit.")
def tearDown(self):
pass
class TestParametricForms(unittest.TestCase):
def setUp(self):
pass
def test_logistic5parameter(self):
y_pred = logistic5param(np.array([1., 2., 3.]), *[1300., -7., 11., 2., 0.5])
y = np.array([2.29403585, 5.32662505, 15.74992462])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not properly fit.")
y_pred = logistic5param(np.array([1, 2, 3]), *[1300., -7., 11., 2., 0.5])
y = np.array([2.29403585, 5.32662505, 15.74992462])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not handle integer inputs properly.")
y_pred = logistic5param(np.array([0.01, 0.0]), 1300, 7, 11, 2, 0.5)
y = np.array([ 1300.0 , 1300.0 ])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not handle zero properly (b>0).")
y_pred = logistic5param(np.array([0.01, 0.0]), 1300, -7, 11, 2, 0.5)
y = np.array([ 2.0 , 2.0 ])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not handle zero properly (b<0).")
def test_logistic5parameter_capped(self):
# Numpy array + Lower Bound
y_pred = logistic5param_capped(np.array([1., 2., 3.]), *[1300., -7., 11., 2., 0.5], lower=5., upper=20.)
y = np.array([5., 5.32662505, 15.74992462])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not properly fit.")
# Numpy array + Upper and Lower Bound
y_pred = logistic5param_capped(np.array([1., 2., 3.]), *[1300., -7., 11., 2., 0.5], lower=5., upper=10.)
y = np.array([5., 5.32662505, 10.])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not properly fit.")
# Pandas Series + Upper and Lower Bound
y_pred = logistic5param_capped(pd.Series([1., 2., 3.]), *[1300., -7., 11., 2., 0.5], lower=5., upper=20.)
y = pd.Series([5., 5.32662505, 15.74992462])
|
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not properly fit.")
|
numpy.testing.assert_allclose
|
import numpy as np
from scipy.stats import entropy
from sklearn.cluster import KMeans
import random
import pickle
"""Author: <NAME>"""
"""email: <EMAIL>"""
"""This file contains helper functions to run genfact algorithm
create_clusters : Takes featuredata and classdata as array and clustersize as a number.
Returns clusters of featuredata sorted in the descending order of clusterscore.
crossover: Takes population of featuredata, array of classdata for the population, predictive model, datatype of featuredata and offspringsize as a number.
Returns new population after crossover and classdata for the new population
evaluatefitness: Takes population of featuredata, array of classdata for the population.
For each sample in population it finds the another sample in the population as counterfactual having different class and minimum euclidean distance (fitness) with respect to the sample. The fitness and counterfactuals are returned
selectbest: Based on populationsize it returns the most fit set of factual and counterfactual pairs
run_genetic: Using the clustered featuredata, datatype and the model it run the genetic algorithm and returns the factual and counterfactual pairs.
"""
def create_clusters(featuredata,classdata,clustsize=20):
k=round(len(classdata)/(clustsize*2))#round(len(np.unique(classdata))*1.5)
kmeansk = KMeans(n_clusters=k)
y_kmeans = kmeansk.fit_predict(featuredata)
clusters=[]
clusterclassfraction=[]
clusterentropy = []
for i in range(k):
cdata = featuredata[y_kmeans == i]
tclass = classdata[y_kmeans == i]
classfraction=np.unique(tclass, return_counts=True)[1]/len(tclass)
clusters.append(cdata)
clusterclassfraction.append(classfraction)
classfractionentropy=[entropy(i) for i in clusterclassfraction]
sizeofclusters=[len(i) for i in clusters]
clusterscore = [x/
|
np.log(y+1)
|
numpy.log
|
import os
from jetnet.datasets import JetNet
from jetnet import evaluation
import numpy as np
datasets = ["g", "q", "t"]
samples_dict = {key: {} for key in datasets}
real_samples = {}
num_samples = 50000
# mapping folder names to gen and disc names in the final table
model_name_map = {
"fc": ["FC", "FC"],
"fcmp": ["FC", "MP"],
"fcpnet": ["FC", "PointNet"],
"graphcnn": ["GraphCNN", "FC"],
"graphcnnmp": ["GraphCNN", "MP"],
"graphcnnpnet": ["GraphCNN", "PointNet"],
"mp": ["MP", "MP"],
"mpfc": ["MP", "FC"],
"mplfc": ["MP-LFC", "MP"],
"mppnet": ["MP", "PointNet"],
"treeganfc": ["TreeGAN", "FC"],
"treeganmp": ["TreeGAN", "MP"],
"treeganpnet": ["TreeGAN", "PointNet"],
}
# Load samples
# models_dir = "/graphganvol/MPGAN/trained_models/"
models_dir = "./trained_models/"
for dir in os.listdir(models_dir):
if dir == ".DS_Store" or dir == "README.md":
continue
model_name = dir.split("_")[0]
if model_name in model_name_map:
dataset = dir.split("_")[1]
samples = np.load(f"{models_dir}/{dir}/gen_jets.npy")[:num_samples, :, :3]
samples_dict[dataset][model_name] = samples
for dataset in datasets:
real_samples[dataset] = (
JetNet(
dataset, "/graphganvol/MPGAN/datasets/", normalize=False, train=False, use_mask=False
)
.data[:num_samples]
.numpy()
)
# order in final table
order = [
"fc",
"graphcnn",
"treeganfc",
"fcpnet",
"graphcnnpnet",
"treeganpnet",
"-",
"mp",
"mplfc",
"-",
"fcmp",
"graphcnnmp",
"treeganmp",
"mpfc",
"mppnet",
]
# get evaluation metrics for all samples and save in folder
evals_dict = {key: {} for key in datasets}
for key in order:
print(key)
for dataset in datasets:
print(dataset)
if key not in samples_dict[dataset]:
print(f"{key} samples for {dataset} jets not found")
continue
gen_jets = samples_dict[dataset][key]
real_jets = real_samples[dataset]
evals = {}
evals["w1m"] = evaluation.w1m(gen_jets, real_jets)
evals["w1p"] = evaluation.w1p(gen_jets, real_jets, average_over_features=False)
evals["w1efp"] = evaluation.w1efp(gen_jets, real_jets, average_over_efps=False)
evals["fpnd"] = evaluation.fpnd(gen_jets[:, :30], dataset, device="cuda", batch_size=256)
cov, mmd = evaluation.cov_mmd(real_jets, gen_jets)
evals["coverage"] = cov
evals["mmd"] = mmd
f = open(f"{models_dir}/{key}_{dataset}/evals.txt", "w+")
f.write(str(evals))
f.close()
evals_dict[dataset][key] = evals
# load eval metrics if already saved
from numpy import array
for dataset in datasets:
for key in order:
if key != "-":
with open(f"{models_dir}/{key}_{dataset}/evals.txt", "r") as f:
evals_dict[dataset][key] = eval(f.read())
if "cov" in evals_dict[dataset][key]:
evals_dict[dataset][key]["coverage"] = evals_dict[dataset][key]["cov"]
del evals_dict[dataset][key]["cov"]
with open(f"{models_dir}/{key}_{dataset}/evals.txt", "w") as f:
f.write(str(evals_dict[dataset][key]))
# find best values
best_key_dict = {key: {} for key in datasets}
eval_keys = ["w1m", "w1p", "w1efp", "fpnd", "coverage", "mmd"]
for dataset in datasets:
model_keys = list(evals_dict[dataset].keys())
lists = {key: [] for key in eval_keys}
for key in model_keys:
evals = evals_dict[dataset][key]
lists["w1m"].append(np.round(evals["w1m"][0], 5))
lists["w1p"].append(np.round(np.mean(evals["w1p"][0]), 4))
lists["w1efp"].append(np.round(np.mean(evals["w1efp"][0]), 5 if dataset == "t" else 6))
lists["fpnd"].append(np.round(evals["fpnd"], 2))
lists["coverage"].append(1 - np.round(evals["coverage"], 2)) # invert to maximize cov
lists["mmd"].append(np.round(evals["mmd"], 3))
for key in eval_keys:
best_key_dict[dataset][key] = np.array(model_keys)[
np.flatnonzero(np.array(lists[key]) == np.array(lists[key]).min())
]
def format_mean_sd(mean, sd):
"""round mean and standard deviation to most significant digit of sd and apply latex formatting"""
decimals = -int(np.floor(np.log10(sd)))
decimals -= int((sd * 10 ** decimals) >= 9.5)
if decimals < 0:
ten_to = 10 ** (-decimals)
if mean > ten_to:
mean = ten_to * (mean // ten_to)
else:
mean_ten_to = 10 ** np.floor(np.log10(mean))
mean = mean_ten_to * (mean // mean_ten_to)
sd = ten_to * (sd // ten_to)
decimals = 0
if mean >= 1e3 and sd >= 1e3:
mean = np.round(mean * 1e-3)
sd = np.round(sd * 1e-3)
return f"${mean:.{decimals}f}$k $\\pm {sd:.{decimals}f}$k"
else:
return f"${mean:.{decimals}f} \\pm {sd:.{decimals}f}$"
def format_fpnd(fpnd):
if fpnd >= 1e6:
fpnd = np.round(fpnd * 1e-6)
return f"${fpnd:.0f}$M"
elif fpnd >= 1e3:
fpnd = np.round(fpnd * 1e-3)
return f"${fpnd:.0f}$k"
elif fpnd >= 10:
fpnd = np.round(fpnd)
return f"${fpnd:.0f}$"
elif fpnd >= 1:
return f"${fpnd:.1f}$"
else:
return f"${fpnd:.2f}$"
def bold_best_key(val_str: str, bold: bool):
if bold:
return f"$\\mathbf{{{val_str[1:-1]}}}$"
else:
return val_str
# Make and save table
table_dict = {key: {} for key in datasets}
for dataset in datasets:
lines = []
for key in order:
if key == "-":
lines.append("\cmidrule(lr){2-3}\n")
else:
line = f" & {model_name_map[key][0]} & {model_name_map[key][1]}"
evals = evals_dict[dataset][key]
line += " & " + bold_best_key(
format_mean_sd(evals["w1m"][0] * 1e3, evals["w1m"][1] * 1e3),
key in best_key_dict[dataset]["w1m"],
)
line += " & " + bold_best_key(
format_mean_sd(
|
np.mean(evals["w1p"][0])
|
numpy.mean
|
import numpy as np
from colr import color
from gym import Env
from gym.spaces import Discrete, Box
class TwoZeroFourEightEnv(Env):
def __init__(self):
self.metadata = {"render.modes": ["human", "ansi"]}
self.action_space = Discrete(4)
self.observation_space = Box(low=0, high=2 ** 16 - 1, shape=(4,), dtype=np.uint16)
self.board = Board()
def step(self, action):
is_move_valid, total_merged = self.board.move(action)
reward = total_merged if is_move_valid else -2
done = self.board.is_game_over()
info = {"score": self.board.score, "max_tile": self.board.max_tile()}
return self.board.state, reward, done, info
def reset(self):
self.board.reset()
return self.board.state
def render(self, mode="human"):
print(self.board.draw())
class Board:
def __init__(self):
print("Initializing board ...")
self.width, self.height = 4, 4
self.score, self.state = None, None
self.moves_backward = self._compute_moves_backward()
self.moves_forward = self._compute_moves_forward()
self.DIRECTIONS = {0: "UP", 1: "DOWN", 2: "LEFT", 3: "RIGHT"}
self.PALETTE = {
0: ("000000", "000000"),
2 ** 1: ("222222", "eee4da"),
2 ** 2: ("222222", "ede0c8"),
2 ** 3: ("222222", "f2b179"),
2 ** 4: ("222222", "f59563"),
2 ** 5: ("222222", "f67c5f"),
2 ** 6: ("222222", "f65e3b"),
2 ** 7: ("222222", "edcf72"),
2 ** 8: ("222222", "edcc61"),
2 ** 9: ("222222", "edc850"),
2 ** 10: ("222222", "edc53f"),
2 ** 11: ("222222", "edc22e"),
2 ** 12: ("f9f6f2", "3c3a32"),
2 ** 13: ("f9f6f2", "3c3a32"),
2 ** 14: ("f9f6f2", "3c3a32"),
2 ** 15: ("f9f6f2", "3c3a32"),
2 ** 16: ("f9f6f2", "3c3a32")
}
self.reset()
def reset(self):
self.score = 0
self.state = np.uint64(0)
self._add_random_tile()
self._add_random_tile()
def max_tile(self):
return 2 ** np.max([cell for row in self._unpack_rows(self.state) for cell in self._unpack_cells(row)])
def is_game_over(self):
return all([self._move(action, self.state)[0] == self.state for action in self.DIRECTIONS])
def move(self, action):
new_state, total_merged = self._move(action, self.state)
self.score += total_merged
if new_state == self.state:
return False, total_merged
self.state = new_state
self._add_random_tile()
return True, total_merged
def draw(self):
grid = self._state_as_grid()
result = f"SCORE : {self.score}\n"
result += "┌" + ("────────┬" * self.width)[:-1] + "┐\n"
for i, row in enumerate(grid):
result += "|" + "|".join([
color(" " * 8, fore=self.PALETTE[cell][0], back=self.PALETTE[cell][1], style="bold")
for cell in row
]) + "|\n"
result += "|" + "|".join([
color(str(cell if cell != 0 else "").center(8), fore=self.PALETTE[cell][0], back=self.PALETTE[cell][1],
style="bold")
for cell in row
]) + "|\n"
result += "|" + "|".join([
color(" " * 8, fore=self.PALETTE[cell][0], back=self.PALETTE[cell][1], style="bold")
for cell in row
]) + "|\n"
if i + 1 < grid.shape[0]:
result += "├" + ("────────┼" * self.width)[:-1] + "┤\n"
result += "└" + ("────────┴" * self.width)[:-1] + "┘\n"
return result
def _add_random_tile(self):
def num_zero_tiles(s):
nz = 0
mask = np.uint64(0x000000000000000F)
for _ in range(16):
if s & mask == 0:
nz += 1
s >>= np.uint(4)
return nz
n_zeros = num_zero_tiles(self.state)
index = np.random.randint(0, n_zeros)
tile = np.uint64(2) if np.random.uniform() > 0.9 else np.uint64(1)
state = self.state
while True:
while state & np.uint64(0xf) != 0:
state >>= np.uint64(4)
tile <<= np.uint64(4)
if index == 0:
break
index -= 1
state >>= np.uint64(4)
tile <<= np.uint64(4)
self.state |= tile
def _move(self, action, current_state):
def move_up(state):
columns = self._unpack_columns(state)
columns, total_merged = zip(*[self.moves_forward[column] for column in columns])
return self._pack_columns(columns), np.sum(total_merged)
def move_down(state):
columns = self._unpack_columns(state)
columns, total_merged = zip(*[self.moves_backward[column] for column in columns])
return self._pack_columns(columns), np.sum(total_merged)
def move_left(state):
rows = self._unpack_rows(state)
rows, total_merged = zip(*[self.moves_backward[row] for row in rows])
return self._pack_rows(rows), np.sum(total_merged)
def move_right(state):
rows = self._unpack_rows(state)
rows, total_merged = zip(*[self.moves_forward[row] for row in rows])
return self._pack_rows(rows), np.sum(total_merged)
moves = {"UP": move_up, "DOWN": move_down, "LEFT": move_left, "RIGHT": move_right}
move = moves.get(self.DIRECTIONS.get(action))
return move(current_state)
@staticmethod
def _pack_rows(rows):
state = np.uint64(0)
for row in map(np.uint64, rows):
state <<= np.uint64(16)
state |= row
return state
@staticmethod
def _unpack_rows(state):
rows = []
for _ in range(4):
mask = np.uint64(0x000000000000FFFF)
rows.append(np.uint16(state & mask))
state >>= np.uint64(16)
rows.reverse()
return np.array(rows)
@staticmethod
def _pack_columns(columns):
state = np.uint64(0)
mask = np.uint64(0x000F000F000F000F)
for column in map(np.uint64, columns):
state <<= np.uint64(4)
state |= ((column >> np.uint64(12)) | (column << np.uint64(8)) | (
column << np.uint64(28)) | column << np.uint64(48)) & mask
return state
@staticmethod
def _unpack_columns(state):
columns = []
mask = np.uint64(0x000F000F000F000F)
for _ in range(4):
column = state & mask
column = ((column << np.uint64(12)) | (column >> np.uint64(8)) | (column >> np.uint64(28)) | (
column >> np.uint64(48)))
columns.append(np.uint16(column))
state >>= np.uint64(4)
columns.reverse()
return np.array(columns)
@staticmethod
def _pack_cells(cells):
row = np.int16(0)
for cell in cells:
row <<= 4
row |= cell
return row
@staticmethod
def _unpack_cells(row_or_column):
cells = []
mask = np.uint16(0x000000000000000F)
for _ in range(4):
cells.append(np.uint8(row_or_column & mask))
row_or_column >>= np.uint(4)
cells.reverse()
return np.array(cells)
def _compute_moves_backward(self):
moves = {}
for state in range(np.iinfo(np.uint16).max):
current = np.uint16(state)
cells = self._unpack_cells(current)
cells = np.compress(cells != 0, cells)
cells = np.pad(cells, (0, 4 - cells.size + 1))
next_ = []
total_merged = 0
for i in range(4):
if cells[i] == cells[i + 1]:
next_.append(cells[i] + 1 if cells[i] != 0 else 0)
total_merged += 2 ** (cells[i] + 1) if cells[i] != 0 else 0
cells[i + 1] = 0
else:
next_.append(cells[i])
next_ = np.array(next_)
next_ = np.compress(next_ != 0, next_)
next_ = np.pad(next_, (0, 4 - next_.size))
moves[current] = (self._pack_cells(next_), total_merged)
return moves
def _compute_moves_forward(self):
moves = {}
for state in range(np.iinfo(np.uint16).max):
current = np.uint16(state)
cells = self._unpack_cells(current)
cells = np.compress(cells != 0, cells)
cells = np.pad(cells, (4 - cells.size + 1, 0))
next_ = []
total_merged = 0
for i in range(4, 0, -1):
if cells[i] == cells[i - 1]:
next_.append(cells[i] + 1 if cells[i] != 0 else 0)
total_merged += 2 ** (cells[i] + 1) if cells[i] != 0 else 0
cells[i - 1] = 0
else:
next_.append(cells[i])
next_.reverse()
next_ = np.array(next_)
next_ =
|
np.compress(next_ != 0, next_)
|
numpy.compress
|
import tensorflow as tf
import tensorflow_probability as tfp
# from tensorflow.core.protobuf import config_pb2
import numpy as np
# import os
# from fit_model import load_data
import matplotlib.pyplot as plt
import time
import numbers
import pandas as pd
import tf_keras_tfp_lbfgs as funfac
from dotenv import load_dotenv
import os
import requests
from datetime import datetime, timedelta
# for the file selection dialogue (see https://codereview.stackexchange.com/questions/162920/file-selection-button-for-jupyter-notebook)
import traitlets
from ipywidgets import widgets
from IPython.display import display
from tkinter import Tk, filedialog
class SelectFilesButton(widgets.Button):
"""A file widget that leverages tkinter.filedialog."""
# see https: // codereview.stackexchange.com / questions / 162920 / file - selection - button - for -jupyter - notebook
def __init__(self, out, CallBack=None,Load=True):
super(SelectFilesButton, self).__init__()
# Add the selected_files trait
self.add_traits(files=traitlets.traitlets.List())
# Create the button.
if Load:
self.description = "Load"
else:
self.description = "Save"
self.isLoad=Load
self.icon = "square-o"
self.style.button_color = "orange"
# Set on click behavior.
self.on_click(self.select_files)
self.CallBack = CallBack
self.out = widgets.Output()
@staticmethod
def select_files(b):
"""Generate instance of tkinter.filedialog.
Parameters
----------
b : obj:
An instance of ipywidgets.widgets.Button
"""
with b.out:
try:
# Create Tk root
root = Tk()
# Hide the main window
root.withdraw()
# Raise the root to the top of all windows.
root.call('wm', 'attributes', '.', '-topmost', True)
# List of selected files will be set to b.value
if b.isLoad:
filename = filedialog.askopenfilename() # multiple=False
else:
filename = filedialog.asksaveasfilename()
# print('Load/Save Dialog finished')
#b.description = "Files Selected"
#b.icon = "check-square-o"
#b.style.button_color = "lightgreen"
if b.CallBack is not None:
#print('Invoking CallBack')
b.CallBack(filename)
#else:
#print('no CallBack')
except:
#print('Problem in Load/Save')
#print('File is'+b.files)
pass
cumulPrefix = '_cumul_' # this is used as a keyword to identify whether this plot was already plotted
def getNumArgs(myFkt):
from inspect import signature
sig = signature(myFkt)
return len(sig.parameters)
class DataLoader(object):
def __init__(self):
load_dotenv()
def pull_data(self, uri='http://ec2-3-122-224-7.eu-central-1.compute.amazonaws.com:8080/daily_data'):
return requests.get(uri).json()
# return requests.get('http://ec2-3-122-224-7.eu-central-1.compute.amazonaws.com:8080/daily_data').json()
def get_new_data(self):
uri = "http://ec2-3-122-224-7.eu-central-1.compute.amazonaws.com:8080/data"
json_data = self.pull_data(uri)
table =
|
np.array(json_data["rows"])
|
numpy.array
|
"""Ensembles of motifs"""
import numpy as np
import settings, log
logger = log.get("ensembles")
class Ensemble:
"""Ensemble base class.
Parameters
----------
image : img.SparseImage
A sparse image encoding a set of motifs and the points they select from a data set.
Attributes
----------
size : int
Number of motifs present in the ensemble.
domain : img.Point list
Points classified by motifs in the ensemble.
motifs : motifs.Motif list
Motifs present in the ensemble.
"""
def __init__(self, image):
# keep the image around for a few things
logger.info(f"Building ensemble from image {image}...")
self._image = image
self._point_map = list(image.domain)
self._motif_map = image.motifs
# construct inclusion matrix by building rows per-motif
rows = []
for motif in self._motif_map:
rows.append(self._to_row(image.motif_domain(motif)))
self._inclusion = np.transpose(np.array(rows))
logger.info(f"Ensemble {self} built with {len(self._motif_map)} motifs and {len(self._point_map)} points.")
def _to_row(self, points):
"""Converts a list of points to a row in the ensemble's inclusion matrix.
Parameters
----------
points : img.Point list
A list of points classified by a single motif
Returns
-------
np.Array
A 0-1 integer array representing the provided list of points
Notes
-----
Internal helper function, not intended for use outside this base class.
Functional inverse of `_to_points`.
"""
row = [1 if point in points else 0 for point in self._point_map]
return np.array(row)
def _to_points(self, row):
"""Converts a row in the ensemble's inclusion matrix to a list of points.
Parameters
----------
row : np.Array
A 0-1 np.Array from a row in the inclusion matrix.
Returns
-------
img.Point list
List of points encoded in the provided row.
Notes
-----
Internal helper function, not intended for use outside this base class.
Functional inverse of `_to_row`.
"""
result = []
for point, value in zip(self._point_map, np.nditer(row, order='C')):
if value:
result.append(point)
return result
@property
def size(self):
return len(self._motif_map)
@property
def domain(self):
return self._point_map
@property
def motifs(self):
return self._motif_map
def motif_domain(self, motif):
"""Set of points classified by a motif in the ensemble.
Parameters
----------
motif : motifs.Motif
A motif object in the ensemble.
Returns
-------
img.Point list
A list of points classified by the provided motif.
See Also
--------
`domain` - the `domain` attribute gives the list of points classified by all motifs in the ensemble.
"""
return self._image.motif_domain(motif)
def classify(self, point):
"""Classifies a single point using the ensemble.
Parameters
----------
point : img.Point
A point-to-be-classified.
Returns
-------
bool
True/false classification of the provided point.
Notes
-----
Assumes the class extending `Ensemble` uses `classified` to determine what is and isn't classified.
"""
return (point in self.classified())
def update(self):
"""Updates some internal state to "improve" the ensemble. Intended to be overwritten.
Raises
------
NotImplementedError
"""
raise NotImplementedError
def classified(self, threshold=None):
"""Provides a set of positively-classified points.
Parameters
----------
threshold : float, optional
A [0,1]-valued threshold indicating the minimum positive probability for a point to be classified. Defaults to `settings.CLASSIFICATION_THRESHOLD`.
Returns
-------
img.Point list
List of points in the ensemble domain with sufficiently high classification probability.
Notes
-----
Assumes the class extending `Ensemble` uses `probabilities` to determine confidence of classification.
"""
if threshold is None:
threshold = settings.CLASSIFICATION_THRESHOLD
return self._to_points(self.probabilities() >= threshold)
def probabilities(self):
"""Provides classification probabilities for points in the ensemble's domain. Intended to be overwritten.
Returns
-------
np.Array
An [0,1]-valued np.Array whose dimensions match the `Ensemble._point_map` attribute.
Raises
------
NotImplementedError
"""
raise NotImplementedError
def probabilities_per_point(self):
"""Provide classification probabilities for every point in the domain as a list of pairs.
Returns
-------
(img.Point, float) list
A list of points in the ensemble domain paired-up with their positive classification probabilities.
"""
return zip(self._point_map, self.probabilities())
class Disjunction(Ensemble):
"""Ensemble computing classification via a "disjunction" of individual motif classifications.
Parameters
----------
image : img.SparseImage
A sparse image encoding a set of motifs and the points they select from a data set.
accuracy_smoothing : int, optional
Constant integer to add to numerators and denominators to smooth accuracy computations. Defaults to 1.
Attributes
----------
size : int
Number of *relevant* motifs present in the ensemble.
domain : img.Point list
Points classified by motifs in the ensemble.
motifs : motifs.Motif list
Motifs present in the ensemble.
accuracies : np.Array
1-dimensional float array encoding accuracies per-motif. Indices match that of the `motifs` attribute.
"""
def __init__(self, image, accuracy_smoothing=1):
# build inclusion matrix, via super
super().__init__(image)
# keep observations for accuracy computations
self._observations = []
# and a set of accuracies built from observations
self._accuracy_smoothing = accuracy_smoothing
self.accuracies = np.ones(len(self._motif_map))
def update(self, point, classification):
"""Update the per-motif accuracy prediction, given an observation.
Parameters
----------
point : img.Point
A point in the ensemble's domain whose classification has been observed.
classification : bool
The classification of the observed point.
"""
self._observations.append( (point, classification) )
accuracies = []
for motif in self._motif_map:
prediction = point in self.motif_domain(motif)
correct = sum([1 for (point, classification) in self._observations if prediction == classification])
total = len(self._observations)
accuracies.append(
(correct + self._accuracy_smoothing) / (total + self._accuracy_smoothing)
)
self.accuracies = np.array(accuracies)
def _relevant_motifs(self):
"""Select all motifs with accuracy above a particular threshold.
Returns
-------
np.Array
A 0-1 array indicating which motifs have accuracies above a threshold.
Notes
-----
The accuracy threshold is determined by the value `settings.ACCURACY_THRESHOLD`.
"""
return np.where(
self.accuracies >= settings.ACCURACY_THRESHOLD,
np.ones_like(self.accuracies),
|
np.zeros_like(self.accuracies)
|
numpy.zeros_like
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for Saccader model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from saccader.visual_attention import saccader
from saccader.visual_attention import saccader_config
def _get_test_cases():
"""Provides test cases."""
is_training = [True, False]
policy = ["learned", "random", "ordered_logits", "sobel_mean", "sobel_var"]
i = 0
cases = []
for p in policy:
for t in is_training:
cases.append(("case_%d" % i, p, t))
i += 1
return tuple(cases)
class SaccaderTest(tf.test.TestCase, parameterized.TestCase):
def test_import(self):
self.assertIsNotNone(saccader)
@parameterized.named_parameters(
*_get_test_cases()
)
def test_build(self, policy, is_training):
config = saccader_config.get_config()
num_times = 2
image_shape = (224, 224, 3)
num_classes = 10
config.num_classes = num_classes
config.num_times = num_times
batch_size = 3
images = tf.constant(
np.random.rand(*((batch_size,) + image_shape)), dtype=tf.float32)
model = saccader.Saccader(config)
logits = model(images, num_times=num_times, is_training=is_training,
policy=policy)[0]
init_op = model.init_op
self.evaluate(init_op)
self.assertEqual((batch_size, num_classes),
self.evaluate(logits).shape)
@parameterized.named_parameters(
*_get_test_cases()
)
def test_locations(self, policy, is_training):
config = saccader_config.get_config()
num_times = 2
image_shape = (224, 224, 3)
num_classes = 10
config.num_classes = num_classes
config.num_times = num_times
batch_size = 4
images = tf.constant(
|
np.random.rand(*((batch_size,) + image_shape))
|
numpy.random.rand
|
#!/usr/bin/env python3
"""
Created on Tue Apr 24 15:48:52 2020
@author: <NAME>
"""
import sys
from os.path import splitext
import numpy as np
# import spatialmath as sp
from spatialmath import SE3
from spatialmath.base.argcheck import getvector, verifymatrix
from roboticstoolbox.robot.ELink import ELink, ETS
# from roboticstoolbox.backends.PyPlot.functions import \
# _plot, _teach, _fellipse, _vellipse, _plot_ellipse, \
# _plot2, _teach2
from roboticstoolbox.tools import xacro
from roboticstoolbox.tools import URDF
from roboticstoolbox.robot.Robot import Robot
from roboticstoolbox.robot.Gripper import Gripper
from pathlib import PurePath, PurePosixPath
from ansitable import ANSITable, Column
from spatialmath import SpatialAcceleration, SpatialVelocity, \
SpatialInertia, SpatialForce
class ERobot(Robot):
"""
The ERobot. A superclass which represents the
kinematics of a serial-link manipulator
:param et_list: List of elementary transforms which represent the robot
kinematics
:type et_list: ET list
:param name: Name of the robot
:type name: str, optional
:param manufacturer: Manufacturer of the robot
:type manufacturer: str, optional
:param base: Location of the base is the world frame
:type base: SE3, optional
:param tool: Offset of the flange of the robot to the end-effector
:type tool: SE3, optional
:param gravity: The gravity vector
:type n: ndarray(3)
:references:
- Kinematic Derivatives using the Elementary Transform Sequence,
<NAME> and <NAME>
"""
# TODO do we need tool and base as well?
def __init__(
self,
elinks,
base_link=None,
gripper_links=None,
**kwargs
):
self._ets = []
self._linkdict = {}
self._n = 0
self._ee_links = []
self._base_link = None
if isinstance(elinks, ETS):
# were passed an ETS string
ets = elinks
elinks = []
# chop it up into segments, a link frame after every joint
start = 0
for j, k in enumerate(ets.joints()):
ets_j = ets[start:k+1]
start = k + 1
if j == 0:
parent = None
else:
parent = elinks[-1]
elink = ELink(ets_j, parent=parent, name=f"link{j:d}")
elinks.append(elink)
n = len(ets.joints())
tool = ets[start:]
if len(tool) > 0:
elinks.append(ELink(tool, parent=elinks[-1], name="ee"))
elif isinstance(elinks, list):
# were passed a list of ELinks
# check all the incoming ELink objects
n = 0
for link in elinks:
if isinstance(link, ELink):
self._linkdict[link.name] = link
else:
raise TypeError("Input can be only ELink")
if link.isjoint:
n += 1
else:
raise TypeError('elinks must be a list of ELinks or an ETS')
self._n = n
# scan for base
for link in elinks:
# is this a base link?
if link._parent is None:
if self._base_link is not None:
raise ValueError('Multiple base links')
self._base_link = link
else:
# no, update children of this link's parent
link._parent._child.append(link)
# Set up the gripper, make a list containing the root of all
# grippers
if gripper_links is not None:
if isinstance(gripper_links, ELink):
gripper_links = [gripper_links]
else:
gripper_links = []
# An empty list to hold all grippers
self.grippers = []
# Make a gripper object for each gripper
for link in gripper_links:
g_links = self.dfs_links(link)
# Remove gripper links from the robot
for g_link in g_links:
elinks.remove(g_link)
# Save the gripper object
self.grippers.append(Gripper(g_links))
# Subtract the n of the grippers from the n of the robot
for gripper in self.grippers:
self._n -= gripper.n
# Set the ee links
self.ee_links = []
if len(gripper_links) == 0:
for link in elinks:
# is this a leaf node? and do we not have any grippers
if len(link.child) == 0:
# no children, must be an end-effector
self.ee_links.append(link)
else:
for link in gripper_links:
# use the passed in value
self.ee_links.append(link.parent)
# assign the joint indices
if all([link.jindex is None for link in elinks]):
jindex = [0] # "mutable integer" hack
def visit_link(link, jindex):
# if it's a joint, assign it a jindex and increment it
if link.isjoint and link in elinks:
link.jindex = jindex[0]
jindex[0] += 1
# visit all links in DFS order
self.dfs_links(
self.base_link, lambda link: visit_link(link, jindex))
elif all([link.jindex is not None for link in elinks]):
# jindex set on all, check they are unique and sequential
jset = set(range(self._n))
for link in elinks:
if link.jindex not in jset:
raise ValueError(
'joint index {link.jindex} was '
'repeated or out of range')
jset -= set([link.jindex])
if len(jset) > 0: # pragma nocover # is impossible
raise ValueError('joints {jset} were not assigned')
else:
# must be a mixture of ELinks with/without jindex
raise ValueError(
'all links must have a jindex, or none have a jindex')
# Current joint angles of the robot
# TODO should go to Robot class?
self.q = np.zeros(self.n)
self.qd = np.zeros(self.n)
self.qdd = np.zeros(self.n)
self.control_type = 'v'
super().__init__(elinks, **kwargs)
def dfs_links(self, start, func=None):
"""
Visit all links from start in depth-first order and will apply
func to each visited link
:param start: the link to start at
:type start: ELink
:param func: An optional function to apply to each link as it is found
:type func: function
:returns: A list of links
:rtype: list of ELink
"""
visited = []
def vis_children(link):
visited.append(link)
if func is not None:
func(link)
for li in link.child:
if li not in visited:
vis_children(li)
vis_children(start)
return visited
# def dfs_path(self, l1, l2):
# path = []
# visited = [l1]
# def vis_children(link):
# visited.append(link)
# for li in link.child:
# if li not in visited:
# if li == l2 or vis_children(li):
# path.append(li)
# return True
# vis_children(l1)
# path.append(l1)
# path.reverse()
# return path
def to_dict(self):
ob = {
'links': [],
'name': self.name,
'n': self.n
}
self.fkine_all()
for link in self.links:
li = {
'axis': [],
'eta': [],
'geometry': [],
'collision': []
}
for et in link.ets():
li['axis'].append(et.axis)
li['eta'].append(et.eta)
if link.v is not None:
li['axis'].append(link.v.axis)
li['eta'].append(link.v.eta)
for gi in link.geometry:
li['geometry'].append(gi.to_dict())
for gi in link.collision:
li['collision'].append(gi.to_dict())
ob['links'].append(li)
# Do the grippers now
for gripper in self.grippers:
for link in gripper.links:
li = {
'axis': [],
'eta': [],
'geometry': [],
'collision': []
}
for et in link.ets():
li['axis'].append(et.axis)
li['eta'].append(et.eta)
if link.v is not None:
li['axis'].append(link.v.axis)
li['eta'].append(link.v.eta)
for gi in link.geometry:
li['geometry'].append(gi.to_dict())
for gi in link.collision:
li['collision'].append(gi.to_dict())
ob['links'].append(li)
return ob
def fk_dict(self):
ob = {
'links': []
}
self.fkine_all()
# Do the robot
for link in self.links:
li = {
'geometry': [],
'collision': []
}
for gi in link.geometry:
li['geometry'].append(gi.fk_dict())
for gi in link.collision:
li['collision'].append(gi.fk_dict())
ob['links'].append(li)
# Do the grippers now
for gripper in self.grippers:
for link in gripper.links:
li = {
'geometry': [],
'collision': []
}
for gi in link.geometry:
li['geometry'].append(gi.fk_dict())
for gi in link.collision:
li['collision'].append(gi.fk_dict())
ob['links'].append(li)
return ob
# @classmethod
# def urdf_to_ets(cls, file_path):
# name, ext = splitext(file_path)
# if ext == '.xacro':
# urdf_string = xacro.main(file_path)
# urdf = URDF.loadstr(urdf_string, file_path)
# return ERobot(
# urdf.elinks,
# name=urdf.name
# )
def urdf_to_ets_args(self, file_path, tld=None):
"""
[summary]
:param file_path: File path relative to the xacro folder
:type file_path: str, in Posix file path fprmat
:param tld: top-level directory, defaults to None
:type tld: str, optional
:return: Links and robot name
:rtype: tuple(ELink list, str)
"""
# get the path to the class that defines the robot
classpath = sys.modules[self.__module__].__file__
# add on relative path to get to the URDF or xacro file
base_path = PurePath(classpath).parent.parent / 'URDF' / 'xacro'
file_path = base_path / PurePosixPath(file_path)
name, ext = splitext(file_path)
if ext == '.xacro':
# it's a xacro file, preprocess it
if tld is not None:
tld = base_path / PurePosixPath(tld)
urdf_string = xacro.main(file_path, tld)
urdf = URDF.loadstr(urdf_string, file_path)
else: # pragma nocover
urdf = URDF.loadstr(open(file_path).read(), file_path)
return urdf.elinks, urdf.name
# @classmethod
# def dh_to_ets(cls, robot):
# """
# Converts a robot modelled with standard or modified DH parameters to
# an ERobot representation
# :param robot: The robot model to be converted
# :type robot: SerialLink
# :return: List of returned :class:`bluepy.btle.Characteristic` objects
# :rtype: ets class
# """
# ets = []
# q_idx = []
# M = 0
# for j in range(robot.n):
# L = robot.links[j]
# # Method for modified DH parameters
# if robot.mdh:
# # Append Tx(a)
# if L.a != 0:
# ets.append(ET.Ttx(L.a))
# M += 1
# # Append Rx(alpha)
# if L.alpha != 0:
# ets.append(ET.TRx(L.alpha))
# M += 1
# if L.is_revolute:
# # Append Tz(d)
# if L.d != 0:
# ets.append(ET.Ttz(L.d))
# M += 1
# # Append Rz(q)
# ets.append(ET.TRz(joint=j+1))
# q_idx.append(M)
# M += 1
# else:
# # Append Tz(q)
# ets.append(ET.Ttz(joint=j+1))
# q_idx.append(M)
# M += 1
# # Append Rz(theta)
# if L.theta != 0:
# ets.append(ET.TRz(L.alpha))
# M += 1
# return cls(
# ets,
# q_idx,
# robot.name,
# robot.manuf,
# robot.base,
# robot.tool)
@property
def qlim(self):
v = np.zeros((2, self.n))
j = 0
for i in range(len(self.links)):
if self.links[i].isjoint:
v[:, j] = self.links[i].qlim
j += 1
return v
# @property
# def qdlim(self):
# return self.qdlim
# --------------------------------------------------------------------- #
@property
def n(self):
return self._n
# --------------------------------------------------------------------- #
@property
def elinks(self):
# return self._linkdict
return self._links
# --------------------------------------------------------------------- #
@property
def link_dict(self):
return self._linkdict
# --------------------------------------------------------------------- #
@property
def base_link(self):
return self._base_link
@base_link.setter
def base_link(self, link):
if isinstance(link, ELink):
self._base_link = link
else:
# self._base_link = self.links[link]
raise TypeError('Must be an ELink')
# self._reset_fk_path()
# --------------------------------------------------------------------- #
# TODO get configuration string
@property
def ee_links(self):
return self._ee_links
# def add_ee(self, link):
# if isinstance(link, ELink):
# self._ee_link.append(link)
# else:
# raise ValueError('must be an ELink')
# self._reset_fk_path()
@ee_links.setter
def ee_links(self, link):
if isinstance(link, ELink):
self._ee_links = [link]
elif isinstance(link, list) and \
all([isinstance(x, ELink) for x in link]):
self._ee_links = link
else:
raise TypeError('expecting an ELink or list of ELinks')
# self._reset_fk_path()
# --------------------------------------------------------------------- #
# @property
# def ets(self):
# return self._ets
# --------------------------------------------------------------------- #
# @property
# def M(self):
# return self._M
# --------------------------------------------------------------------- #
# @property
# def q_idx(self):
# return self._q_idx
# --------------------------------------------------------------------- #
def ets(self, ee=None):
if ee is None:
if len(self.ee_links) == 1:
link = self.ee_links[0]
else:
raise ValueError(
'robot has multiple end-effectors, specify one')
# elif isinstance(ee, str) and ee in self._linkdict:
# ee = self._linkdict[ee]
elif isinstance(ee, ELink) and ee in self._links:
link = ee
else:
raise ValueError('end-effector is not valid')
ets = ETS()
# build the ETS string from ee back to root
while link is not None:
ets = link.ets() * ets
link = link.parent
return ets
def config(self):
s = ''
for link in self.links:
if link.v is not None:
if link.v.isprismatic:
s += 'P'
elif link.v.isrevolute:
s += 'R'
return s
# --------------------------------------------------------------------- #
def fkine(self, q=None, from_link=None, to_link=None):
'''
Evaluates the forward kinematics of a robot based on its ETS and
joint angles q.
T = fkine(q) evaluates forward kinematics for the robot at joint
configuration q.
T = fkine() as above except uses the stored q value of the
robot object.
Trajectory operation:
Calculates fkine for each point on a trajectory of joints q where
q is (nxm) and the returning SE3 in (m)
:param q: The joint angles/configuration of the robot (Optional,
if not supplied will use the stored q values).
:type q: float ndarray(n)
:return: The transformation matrix representing the pose of the
end-effector
:rtype: SE3
:notes:
- The robot's base or tool transform, if present, are incorporated
into the result.
:references:
- Kinematic Derivatives using the Elementary Transform
Sequence, <NAME> and <NAME>
'''
if from_link is None:
from_link = self.base_link
if to_link is None:
to_link = self.ee_links[0]
trajn = 1
if q is None:
q = self.q
path, n = self.get_path(from_link, to_link)
use_jindex = True
try:
q = getvector(q, self.n, 'col')
except ValueError:
try:
q = getvector(q, n, 'col')
use_jindex = False
j = 0
except ValueError:
trajn = q.shape[1]
verifymatrix(q, (self.n, trajn))
for i in range(trajn):
tr = self.base.A
for link in path:
if link.isjoint:
if use_jindex:
T = link.A(q[link.jindex, i], fast=True)
else:
T = link.A(q[j, i], fast=True)
j += 1
else:
T = link.A(fast=True)
tr = tr @ T
if i == 0:
t = SE3(tr)
else:
t.append(SE3(tr))
return t
def fkine_all(self, q=None):
'''
Tall = fkine_all(q) evaluates fkine for each joint within a robot and
returns a trajecotry of poses.
Tall = fkine_all() as above except uses the stored q value of the
robot object.
:param q: The joint angles/configuration of the robot (Optional,
if not supplied will use the stored q values).
:type q: float ndarray(n)
:return T: Homogeneous transformation trajectory
:rtype T: SE3 list
:notes:
- The robot's base transform, if present, are incorporated
into the result.
:references:
- Kinematic Derivatives using the Elementary Transform
Sequence, <NAME> and <NAME>
'''
if q is None:
q = np.copy(self.q)
else:
q = getvector(q, self.n)
for link in self.links:
if link.isjoint:
t = link.A(q[link.jindex])
else:
t = link.A()
if link.parent is None:
link._fk = self.base * t
else:
link._fk = link.parent._fk * t
# Update the collision objects transform as well
for col in link.collision:
col.wT = link._fk
for gi in link.geometry:
gi.wT = link._fk
# Do the grippers now
for gripper in self.grippers:
for link in gripper.links:
# print(link.jindex)
if link.isjoint:
t = link.A(gripper.q[link.jindex])
else:
t = link.A()
link._fk = link.parent._fk * t
# Update the collision objects transform as well
for col in link.collision:
col.wT = link._fk
for gi in link.geometry:
gi.wT = link._fk
# def jacob0(self, q=None):
# """
# J0 = jacob0(q) is the manipulator Jacobian matrix which maps joint
# velocity to end-effector spatial velocity. v = J0*qd in the
# base frame.
# J0 = jacob0() as above except uses the stored q value of the
# robot object.
# :param q: The joint angles/configuration of the robot (Optional,
# if not supplied will use the stored q values).
# :type q: float ndarray(n)
# :return J: The manipulator Jacobian in ee frame
# :rtype: float ndarray(6,n)
# :references:
# - Kinematic Derivatives using the Elementary Transform
# Sequence, <NAME> and <NAME>
# """
# if q is None:
# q = np.copy(self.q)
# else:
# q = getvector(q, self.n)
# T = (self.base.inv() * self.fkine(q)).A
# U = np.eye(4)
# j = 0
# J = np.zeros((6, self.n))
# for link in self._fkpath:
# for k in range(link.M):
# if k != link.q_idx:
# U = U @ link.ets[k].T().A
# else:
# # self._jacoblink(link, k, T)
# U = U @ link.ets[k].T(q[j]).A
# Tu = np.linalg.inv(U) @ T
# n = U[:3, 0]
# o = U[:3, 1]
# a = U[:3, 2]
# x = Tu[0, 3]
# y = Tu[1, 3]
# z = Tu[2, 3]
# if link.ets[k].axis == 'Rz':
# J[:3, j] = (o * x) - (n * y)
# J[3:, j] = a
# elif link.ets[k].axis == 'Ry':
# J[:3, j] = (n * z) - (a * x)
# J[3:, j] = o
# elif link.ets[k].axis == 'Rx':
# J[:3, j] = (a * y) - (o * z)
# J[3:, j] = n
# elif link.ets[k].axis == 'tx':
# J[:3, j] = n
# J[3:, j] = np.array([0, 0, 0])
# elif link.ets[k].axis == 'ty':
# J[:3, j] = o
# J[3:, j] = np.array([0, 0, 0])
# elif link.ets[k].axis == 'tz':
# J[:3, j] = a
# J[3:, j] = np.array([0, 0, 0])
# j += 1
# return J
def get_path(self, from_link, to_link):
path = []
n = 0
link = to_link
path.append(link)
if link.isjoint:
n += 1
while link != from_link:
link = link.parent
path.append(link)
if link.isjoint:
n += 1
path.reverse()
return path, n
def jacob0(
self, q=None, from_link=None, to_link=None,
offset=None, T=None):
if from_link is None:
from_link = self.base_link
if to_link is None:
to_link = self.ee_links[0]
if offset is None:
offset = SE3()
path, n = self.get_path(from_link, to_link)
if q is None:
q = np.copy(self.q)
else:
try:
q = getvector(q, n)
except ValueError:
q = getvector(q, self.n)
if T is None:
T = (self.base.inv()
* self.fkine(q, from_link=from_link, to_link=to_link)
* offset)
T = T.A
U = np.eye(4)
j = 0
J = np.zeros((6, n))
for link in path:
if link.isjoint:
U = U @ link.A(q[j], fast=True)
if link == to_link:
U = U @ offset.A
Tu = np.linalg.inv(U) @ T
n = U[:3, 0]
o = U[:3, 1]
a = U[:3, 2]
x = Tu[0, 3]
y = Tu[1, 3]
z = Tu[2, 3]
if link.v.axis == 'Rz':
J[:3, j] = (o * x) - (n * y)
J[3:, j] = a
elif link.v.axis == 'Ry':
J[:3, j] = (n * z) - (a * x)
J[3:, j] = o
elif link.v.axis == 'Rx':
J[:3, j] = (a * y) - (o * z)
J[3:, j] = n
elif link.v.axis == 'tx':
J[:3, j] = n
J[3:, j] = np.array([0, 0, 0])
elif link.v.axis == 'ty':
J[:3, j] = o
J[3:, j] = np.array([0, 0, 0])
elif link.v.axis == 'tz':
J[:3, j] = a
J[3:, j] = np.array([0, 0, 0])
j += 1
else:
U = U @ link.A(fast=True)
return J
def jacobe(self, q=None, from_link=None, to_link=None, offset=None):
"""
Je = jacobe(q) is the manipulator Jacobian matrix which maps joint
velocity to end-effector spatial velocity. v = Je*qd in the
end-effector frame.
Je = jacobe() as above except uses the stored q value of the
robot object.
:param q: The joint angles/configuration of the robot (Optional,
if not supplied will use the stored q values).
:type q: float ndarray(n)
:return J: The manipulator Jacobian in ee frame
:rtype: float ndarray(6,n)
"""
if from_link is None:
from_link = self.base_link
if to_link is None:
to_link = self.ee_links[0]
if offset is None:
offset = SE3()
if q is None:
q = np.copy(self.q)
# else:
# q = getvector(q, n)
T = (self.base.inv()
* self.fkine(q, from_link=from_link, to_link=to_link)
* offset)
J0 = self.jacob0(q, from_link, to_link, offset, T)
Je = self.jacobev(q, from_link, to_link, offset, T) @ J0
return Je
def hessian0(self, q=None, J0=None, from_link=None, to_link=None):
"""
The manipulator Hessian tensor maps joint acceleration to end-effector
spatial acceleration, expressed in the world-coordinate frame. This
function calulcates this based on the ETS of the robot. One of J0 or q
is required. Supply J0 if already calculated to save computation time
:param q: The joint angles/configuration of the robot (Optional,
if not supplied will use the stored q values).
:type q: float ndarray(n)
:param J0: The manipulator Jacobian in the 0 frame
:type J0: float ndarray(6,n)
:return: The manipulator Hessian in 0 frame
:rtype: float ndarray(6,n,n)
:references:
- Kinematic Derivatives using the Elementary Transform
Sequence, <NAME> and <NAME>
"""
if from_link is None:
from_link = self.base_link
if to_link is None:
to_link = self.ee_links[0]
path, n = self.get_path(from_link, to_link)
if J0 is None:
if q is None:
q = np.copy(self.q)
else:
q = getvector(q, n)
J0 = self.jacob0(q, from_link, to_link)
else:
verifymatrix(J0, (6, n))
H = np.zeros((6, n, n))
for j in range(n):
for i in range(j, n):
H[:3, i, j] = np.cross(J0[3:, j], J0[:3, i])
H[3:, i, j] = np.cross(J0[3:, j], J0[3:, i])
if i != j:
H[:3, j, i] = H[:3, i, j]
return H
def manipulability(self, q=None, J=None, from_link=None, to_link=None):
"""
Calculates the manipulability index (scalar) robot at the joint
configuration q. It indicates dexterity, that is, how isotropic the
robot's motion is with respect to the 6 degrees of Cartesian motion.
The measure is high when the manipulator is capable of equal motion
in all directions and low when the manipulator is close to a
singularity. One of J or q is required. Supply J if already
calculated to save computation time
:param q: The joint angles/configuration of the robot (Optional,
if not supplied will use the stored q values).
:type q: float ndarray(n)
:param J: The manipulator Jacobian in any frame
:type J: float ndarray(6,n)
:return: The manipulability index
:rtype: float
:references:
- Analysis and control of robot manipulators with redundancy,
<NAME>,
- Robotics Research: The First International Symposium (<NAME>
and <NAME>, eds.), pp. 735-747, The MIT press, 1984.
"""
if from_link is None:
from_link = self.base_link
if to_link is None:
to_link = self.ee_links[0]
path, n = self.get_path(from_link, to_link)
if J is None:
if q is None:
q =
|
np.copy(self.q)
|
numpy.copy
|
import numpy as np
import matplotlib.pyplot as plt
import umap
import Macosko_utils as utils
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
# from evaluate import kNN_acc, kmeans_acc_ari_ami
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from munkres import Munkres
import csv
from numpy import savetxt
from pandas import DataFrame
from sklearn.metrics import adjusted_rand_score, adjusted_mutual_info_score
import os
import glob
from matplotlib.backends.backend_pdf import PdfPages
from MantelTest import Mantel
from hub_toolbox.distances import euclidean_distance
from sklearn.model_selection import StratifiedShuffleSplit
import pandas as pd
import numba
def kNN_acc(X, L):
X_train, X_test, Y_train, Y_test = train_test_split(X, L, random_state=0)
knc = KNeighborsClassifier(n_neighbors=1)
knc.fit(X_train, Y_train)
Y_pred = knc.predict(X_test)
score = knc.score(X_test, Y_test)
return score
def kmeans_acc_ari_ami(X, L):
"""
Calculate clustering accuracy. Require scikit-learn installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
n_clusters = len(np.unique(L))
kmeans = KMeans(n_clusters=n_clusters, n_init=20)
y_pred = kmeans.fit_predict(X)
y_pred = y_pred.astype(np.int64)
y_true = L.astype(np.int64)
assert y_pred.size == y_true.size
y_pred = y_pred.reshape((1, -1))
y_true = y_true.reshape((1, -1))
# D = max(y_pred.max(), L.max()) + 1
# w = np.zeros((D, D), dtype=np.int64)
# for i in range(y_pred.size):
# w[y_pred[i], L[i]] += 1
# # from sklearn.utils.linear_assignment_ import linear_assignment
# from scipy.optimize import linear_sum_assignment
# row_ind, col_ind = linear_sum_assignment(w.max() - w)
#
# return sum([w[i, j] for i in row_ind for j in col_ind]) * 1.0 / y_pred.size
if len(np.unique(y_pred)) == len(np.unique(y_true)):
C = len(np.unique(y_true))
cost_m = np.zeros((C, C), dtype=float)
for i in np.arange(0, C):
a = np.where(y_pred == i)
# print(a.shape)
a = a[1]
l = len(a)
for j in np.arange(0, C):
yj = np.ones((1, l)).reshape(1, l)
yj = j * yj
cost_m[i, j] = np.count_nonzero(yj - y_true[0, a])
mk = Munkres()
best_map = mk.compute(cost_m)
(_, h) = y_pred.shape
for i in np.arange(0, h):
c = y_pred[0, i]
v = best_map[c]
v = v[1]
y_pred[0, i] = v
acc = 1 - (np.count_nonzero(y_pred - y_true) / h)
else:
acc = 0
# print(y_pred.shape)
y_pred = y_pred[0]
y_true = y_true[0]
ari, ami = adjusted_rand_score(y_true, y_pred), adjusted_mutual_info_score(y_true, y_pred)
return acc, ari, ami
@numba.jit()
def mantel_test(X, L, embed, describe = True):
sss = StratifiedShuffleSplit(n_splits=50, test_size=1000, random_state=0)
sss.get_n_splits(X, L)
label_type = list(set(L))
r_lst = np.array([])
p_lst = np.array([])
for _, idx in sss.split(X, L):
# print('Index: ', idx)
# X_test = X[idx]
# y_train =
X_high, L_hl = X[idx], L[idx]
X_low = embed[idx]
# print(X_high.shape, L_high.shape)
# print(X_low.shape, L_low.shape)
label_idx = []
for _, i in enumerate(label_type):
l_idx = np.where(L_hl == i)
label_idx.append(l_idx)
# print(label_type)
# label_idx
X_high_lst = []
X_low_lst = []
# for _, i in enumerate(label_type):
# X_high_lst.append(X_high[label_idx[i]])
for i, _ in enumerate(label_type):
centroid =
|
np.mean(X_high[label_idx[i]], axis=0)
|
numpy.mean
|
"""
1HN In-phase/Anti-phase Proton CEST
===================================
Analyzes chemical exchange during the CEST block. Magnetization evolution is
calculated using the (6n)×(6n), two-spin matrix, where n is the number of
states::
{ Ix(a), Iy(a), Iz(a), IxSz(a), IySz(a), IzSz(a),
Ix(b), Iy(b), Iz(b), IxSz(b), IySz(b), IzSz(b), ... }
References
----------
| Yuwen, Sekhar and Kay. Angew Chem Int Ed (2017) 56:6122-6125
| Yuwen and Kay. J Biomol NMR (2017) 67:295-307
| Yuwen and Kay. J Biomol NMR (2018) 70:93-102
Note
----
A sample configuration file for this module is available using the command::
$ chemex config cest_1hn_ip_ap
"""
import functools as ft
import numpy as np
import numpy.linalg as nl
import chemex.experiments.helper as ceh
import chemex.helper as ch
import chemex.nmr.liouvillian as cnl
_SCHEMA = {
"type": "object",
"properties": {
"experiment": {
"type": "object",
"properties": {
"d1": {"type": "number"},
"time_t1": {"type": "number"},
"carrier": {"type": "number"},
"b1_frq": {"type": "number"},
"b1_inh_scale": {"type": "number", "default": 0.1},
"b1_inh_res": {"type": "integer", "default": 11},
"observed_state": {
"type": "string",
"pattern": "[a-z]",
"default": "a",
},
"eta_block": {"type": "integer", "default": 0},
},
"required": ["d1", "time_t1", "carrier", "b1_frq"],
}
},
}
def read(config):
ch.validate(config, _SCHEMA)
config["basis"] = cnl.Basis(type="ixyzsz_eq", spin_system="hn")
config["fit"] = _fit_this()
config["data"]["filter_ref_planes"] = True
return ceh.load_experiment(config=config, pulse_seq_cls=PulseSeq)
def _fit_this():
return {
"rates": [
"r2_i_{states}",
"r1_i_{observed_state}",
"r1_s_{observed_state}",
"etaxy_i_{observed_state}",
"etaz_i_{observed_state}",
],
"model_free": [
"tauc_{observed_state}",
"s2_{observed_state}",
"khh_{observed_state}",
],
}
class PulseSeq:
def __init__(self, config, propagator):
self.prop = propagator
settings = config["experiment"]
self.time_t1 = settings["time_t1"]
self.d1 = settings["d1"]
self.taua = 2.38e-3
self.prop.carrier_i = settings["carrier"]
self.prop.b1_i = settings["b1_frq"]
self.prop.b1_i_inh_scale = settings["b1_inh_scale"]
self.prop.b1_i_inh_res = settings["b1_inh_res"]
self.eta_block = settings["eta_block"]
self.observed_state = settings["observed_state"]
self.prop.detection = f"[2izsz_{self.observed_state}]"
self.dephased = settings["b1_inh_scale"] == np.inf
if self.eta_block > 0:
self.taud = max(self.d1 - self.time_t1, 0.0)
self.taue = 0.5 * self.time_t1 / self.eta_block
else:
self.taud = self.d1
self.p90_i = self.prop.perfect90_i
self.p180_sx = self.prop.perfect180_s[0]
self.p180_isx = self.prop.perfect180_i[0] @ self.prop.perfect180_s[0]
@ft.lru_cache(maxsize=10000)
def calculate(self, offsets, params_local):
self.prop.update(params_local)
self.prop.offset_i = 0.0
d_taud, d_taua = self.prop.delays([self.taud, self.taua])
start = d_taud @ self.prop.get_start_magnetization(terms="ie")
start = self.prop.keep_components(start, terms=["ie", "iz"])
intst = {}
for offset in set(offsets):
self.prop.offset_i = offset
if self.eta_block > 0:
d_2taue = self.prop.delays(2.0 * self.taue)
p_taue = self.prop.pulse_i(self.taue, 0.0, self.dephased)
cest_block = p_taue @ self.p180_sx @ d_2taue @ self.p180_sx @ p_taue
cest =
|
nl.matrix_power(cest_block, self.eta_block)
|
numpy.linalg.matrix_power
|
import csv
import datetime
import os
import subprocess
import sys
from distutils import dir_util
import monopsr
import numpy as np
import tensorflow as tf
from PIL import Image
from monopsr.core import box_3d_projector
from monopsr.core import summary_utils
from monopsr.datasets.kitti import calib_utils
def save_predictions_box_2d_in_kitti_format(score_threshold,
dataset,
predictions_base_dir,
predictions_box_2d_dir,
global_step):
"""Converts and saves predictions (box_3d) into text files required for KITTI evaluation
Args:
score_threshold: score threshold to filter predictions
dataset: Dataset object
predictions_box_2d_dir: predictions (box_3d) folder
predictions_base_dir: predictions base folder
global_step: global step
"""
score_threshold = round(score_threshold, 3)
data_split = dataset.data_split
# Output folder
kitti_predictions_2d_dir = predictions_base_dir + \
'/kitti_predictions_3d/{}/{}/{}/data'.format(data_split, score_threshold, global_step)
if not os.path.exists(kitti_predictions_2d_dir):
os.makedirs(kitti_predictions_2d_dir)
# Do conversion
num_samples = dataset.num_samples
num_valid_samples = 0
print('\nGlobal step:', global_step)
print('Converting detections from:', predictions_box_2d_dir)
print('3D Detections being saved to:', kitti_predictions_2d_dir)
for sample_idx in range(num_samples):
# Print progress
sys.stdout.write('\rConverting {} / {}'.format(sample_idx + 1, num_samples))
sys.stdout.flush()
sample_name = dataset.sample_list[sample_idx].name
prediction_file = sample_name + '.txt'
kitti_predictions_2d_file_path = kitti_predictions_2d_dir + '/' + prediction_file
predictions_file_path = predictions_box_2d_dir + '/' + prediction_file
# If no predictions, skip to next file
if not os.path.exists(predictions_file_path):
np.savetxt(kitti_predictions_2d_file_path, [])
continue
all_predictions = np.loadtxt(predictions_file_path).reshape(-1, 6)
# Change the order to be (x1, y1, x2, y2)
copied_predictions = np.copy(all_predictions)
all_predictions[:, 0:4] = copied_predictions[:, [1, 0, 3, 2]]
score_filter = all_predictions[:, 4] >= score_threshold
all_predictions = all_predictions[score_filter]
# If no predictions, skip to next file
if len(all_predictions) == 0:
np.savetxt(kitti_predictions_2d_file_path, [])
continue
num_valid_samples += 1
# To keep each value in its appropriate position, an array of -1000
# (N, 16) is allocated but only values [4:16] are used
kitti_predictions = np.full([all_predictions.shape[0], 16], -1000.0)
# To avoid estimating alpha, -10 is used as a placeholder
kitti_predictions[:, 3] = -10.0
# Get object types
all_pred_classes = all_predictions[:, 5].astype(np.int32)
obj_types = [dataset.classes[class_idx] for class_idx in all_pred_classes]
# 2D predictions
kitti_predictions[:, 4:8] = all_predictions[:, 0:4]
# Score
kitti_predictions[:, 15] = all_predictions[:, 4]
# Round detections to 3 decimal places
kitti_predictions = np.round(kitti_predictions, 3)
# Stack 3D predictions text
kitti_text_3d = np.column_stack([obj_types,
kitti_predictions[:, 1:16]])
# Save to text files
np.savetxt(kitti_predictions_2d_file_path, kitti_text_3d,
newline='\r\n', fmt='%s')
print('\nNum valid:', num_valid_samples)
print('Num samples:', num_samples)
def save_predictions_box_3d_in_kitti_format(score_threshold,
dataset,
predictions_base_dir,
predictions_box_3d_dir,
predictions_box_2d_dir,
global_step,
project_3d_box=False):
"""Converts and saves predictions (box_3d) into text files required for KITTI evaluation
Args:
score_threshold: score threshold to filter predictions
dataset: Dataset object
predictions_box_3d_dir: predictions (box_3d) folder
predictions_box_2d_dir: predictions (box_2d) folder
predictions_base_dir: predictions base folder
global_step: global step
project_3d_box: Bool whether to project 3D box to image space to get 2D box
"""
score_threshold = round(score_threshold, 3)
data_split = dataset.data_split
# Output folder
kitti_predictions_3d_dir = predictions_base_dir + \
'/kitti_predictions_3d/{}/{}/{}/data'.format(data_split, score_threshold, global_step)
if not os.path.exists(kitti_predictions_3d_dir):
os.makedirs(kitti_predictions_3d_dir)
# Do conversion
num_samples = dataset.num_samples
num_valid_samples = 0
print('\nGlobal step:', global_step)
print('Converting detections from:', predictions_box_3d_dir)
print('3D Detections being saved to:', kitti_predictions_3d_dir)
for sample_idx in range(num_samples):
# Print progress
sys.stdout.write('\rConverting {} / {}'.format(sample_idx + 1, num_samples))
sys.stdout.flush()
sample_name = dataset.sample_list[sample_idx].name
prediction_file = sample_name + '.txt'
kitti_predictions_3d_file_path = kitti_predictions_3d_dir + '/' + prediction_file
predictions_3d_file_path = predictions_box_3d_dir + '/' + prediction_file
predictions_2d_file_path = predictions_box_2d_dir + '/' + prediction_file
# If no predictions, skip to next file
if not os.path.exists(predictions_3d_file_path):
np.savetxt(kitti_predictions_3d_file_path, [])
continue
all_predictions_3d = np.loadtxt(predictions_3d_file_path)
if len(all_predictions_3d) == 0:
|
np.savetxt(kitti_predictions_3d_file_path, [])
|
numpy.savetxt
|
import numpy as np
import math
import scipy.stats
import copy
import random
from typing import Union, List, Tuple, Optional
from nnet import NNet
import constants as c
class Game:
"""
Implements Connect 4.
Attrs:
game_state: 2-axis numpy array that saves the current state of the game. 1 is player 1, -1 is player 2, 0 is an
empty square. Index ordering is (column, row).
player: Player whose turn is next.
finished: True if the game is won or drawn.
"""
def __init__(self):
self.game_state = np.zeros((c.COLUMNS, c.ROWS))
self.player = 1
self.finished = False
def decide_move(self, method: str, iterations: int, model_name: str = c.DEFAULT_MODEL_NAME,
print_out: bool = True) -> Union[int, np.ndarray]:
"""
Outputs a move for the current game state determined by a specified method.
:param method: 'nnet': neural network with tree search, 'simple_nnet': neural network without tree search,
'mcts': Monte Carlo tree search, 'input': input via terminal.
:param iterations: Number of iterations in tree searches
:param model_name: Structure name of the neural network
:param print_out: Prints out extra information by the neural network
:return: Determined best move
"""
if method == 'input':
while True:
try:
move = int(input('input column number between 1 and 7: ')) - 1
if move in self.get_legal_moves(self.game_state):
return move
except ValueError:
pass
print('input not valid')
if method == 'mcts':
pi = self.tree_search(iterations=iterations)
return
|
np.argmax(pi)
|
numpy.argmax
|
__author__ = "<NAME>, <NAME>"
import sys
import os
import string
from qpsolvers import solve_qp
import numpy as np
import math
from baselines.gail.planner.velocityplanner import BVPStage
from baselines.gail.planner.velocityplanner import applybvpstage
# simple version
eps_H = 1e-2
est_minacc=0.5
# input V_0 a_0 (S_0 = 0), V_tgt (a_tgt = 0) S_tgt = S
# Solution
# 1. segment whole acc/dacc into N=5 parts
# 2. minimize sampled t -> s''(t) + s'''(t)
# 3. equal constrains: V_0 a_0 S_0 V_tgt a_tgt S_tgt V_previous = V_next a_previous = a_next
# 4. inequal constrains: sampled t -> Vmax, t-> s''(t) >= 0 for acceleration or t-> s''(t) <= 0 for deceleration
# V_0, a_0, S_0 = 7.775, 0.8, 0
# init_para = np.array([V_0, a_0])
# S_tgt = 29.4
# V_tgt = 8.333
# des_para = np.array([S_tgt, V_tgt, 0])
degree, num_seg = 5, 5 # degree of polynomials + 1, number of spline segments
# degree * num_seg
T_sampled = 20 # sampled T interval
def ti(t):
return np.array([0, t, t * t, pow(t, 3), pow(t, 4)])
def dti(t):
return np.array([0, 1, 2 * t, 3 * t * t, 4 * pow(t, 3)])
def ddti(t):
return np.array([0, 0, 2, 6 * t, 12 * t * t])
def dddti(t):
return np.array([0, 0, 0, 6, 24 * t])
def calcspeed(X, V_0, a_0, V_tgt):
S_tgt = X
V_mid = V_tgt + V_0
V_mid = V_mid / 2.0
t_e = S_tgt / V_mid
t_est = t_e / num_seg
t_int = t_est / T_sampled # time interval after split into time segments
H =
|
np.zeros((degree * num_seg, degree * num_seg))
|
numpy.zeros
|
import json
import inflect
import numpy as np
import requests
import tagme
from nltk.stem.porter import *
stemmer = PorterStemmer()
p = inflect.engine()
tagme.GCUBE_TOKEN = ""
def sort_dict_by_values(dictionary):
keys = []
values = []
for key, value in sorted(dictionary.items(), key=lambda item: (item[1], item[0]), reverse=True):
keys.append(key)
values.append(value)
return keys, values
def preprocess_relations(file, prop=False):
relations = {}
with open(file, encoding='utf-8') as f:
content = f.readlines()
for line in content:
split_line = line.split()
key = ' '.join(split_line[2:])[1:-3].lower()
key = ' '.join([stemmer.stem(word) for word in key.split()])
if key not in relations:
relations[key] = []
uri = split_line[0].replace('<', '').replace('>', '')
if prop is True:
uri_property = uri.replace('/ontology/', '/property/')
relations[key].extend([uri, uri_property])
else:
relations[key].append(uri)
return relations
def get_earl_entities(query, earl_url='http://localhost:4999'):
result = {}
result['question'] = query
result['entities'] = []
result['relations'] = []
THRESHOLD = 0.1
response = requests.post(f'{earl_url}/processQuery',
headers={"Content-Type": "application/json"},
json={"nlquery": query, "pagerankflag": False})
json_response = json.loads(response.text)
type_list = []
chunk = []
for i in json_response['ertypes']:
type_list.append(i)
for i in json_response['chunktext']:
chunk.append([i['surfacestart'], i['surfacelength']])
keys = list(json_response['rerankedlists'].keys())
reranked_lists = json_response['rerankedlists']
for i in range(len(keys)):
if type_list[i] == 'entity':
entity = {}
entity['uris'] = []
entity['surface'] = chunk[i]
for r in reranked_lists[keys[i]]:
if r[0] > THRESHOLD:
uri = {}
uri['uri'] = r[1]
uri['confidence'] = r[0]
entity['uris'].append(uri)
if entity['uris'] != []:
result['entities'].append(entity)
if type_list[i] == 'relation':
relation = {}
relation['uris'] = []
relation['surface'] = chunk[i]
for r in reranked_lists[keys[i]]:
if r[0] > THRESHOLD:
uri = {}
uri['uri'] = r[1]
uri['confidence'] = r[0]
relation['uris'].append(uri)
if relation['uris'] != []:
result['relations'].append(relation)
return result
def get_tag_me_entities(query):
threshold = 0.1
try:
response = requests.get("https://tagme.d4science.org/tagme/tag?lang=en&gcube-token={}&text={}"
.format('1b4eb12e-d434-4b30-8c7f-91b3395b96e8-843339462', query))
entities = []
for annotation in json.loads(response.text)['annotations']:
confidence = float(annotation['link_probability'])
if confidence > threshold:
entity = {}
uris = {}
uri = 'http://dbpedia.org/resource/' + annotation['title'].replace(' ', '_')
uris['uri'] = uri
uris['confidence'] = confidence
surface = [annotation['start'], annotation['end'] - annotation['start']]
entity['uris'] = [uris]
entity['surface'] = surface
entities.append(entity)
except:
entities = []
print('get_tag_me_entities: ', query)
return entities
def get_nliwod_entities(query, hashmap):
ignore_list = []
entities = []
singular_query = [stemmer.stem(word) if p.singular_noun(word) == False else stemmer.stem(p.singular_noun(word)) for
word in query.lower().split(' ')]
string = ' '.join(singular_query)
words = query.split(' ')
indexlist = {}
surface = []
current = 0
locate = 0
for i in range(len(singular_query)):
indexlist[current] = {}
indexlist[current]['len'] = len(words[i]) - 1
indexlist[current]['surface'] = [locate, len(words[i]) - 1]
current += len(singular_query[i]) + 1
locate += len(words[i]) + 1
for key in hashmap.keys():
if key in string and len(key) > 2 and key not in ignore_list:
e_list = list(set(hashmap[key]))
k_index = string.index(key)
if k_index in indexlist.keys():
surface = indexlist[k_index]['surface']
else:
for i in indexlist:
if k_index > i and k_index < (i + indexlist[i]['len']):
surface = indexlist[i]['surface']
break
for e in e_list:
r_e = {}
r_e['surface'] = surface
r_en = {}
r_en['uri'] = e
r_en['confidence'] = 0.3
r_e['uris'] = [r_en]
entities.append(r_e)
return entities
def get_spotlight_entities(query):
entities = []
data = {
'text': query,
'confidence': '0.4',
'support': '10'
}
headers = {"accept": "application/json"}
response = requests.get('http://api.dbpedia-spotlight.org/en/annotate', params=data, headers=headers)
try:
response_json = response.text.replace('@', '')
output = json.loads(response_json)
if 'Resources' in output.keys():
resource = output['Resources']
for item in resource:
entity = {}
uri = {}
uri['uri'] = item['URI']
uri['confidence'] = float(item['similarityScore'])
entity['uris'] = [uri]
entity['surface'] = [int(item['offset']), len(item['surfaceForm'])]
entities.append(entity)
except json.JSONDecodeError:
print('Spotlight:', query)
return entities
def get_falcon_entities(query):
entities = []
relations = []
headers = {
'Content-Type': 'application/json',
}
params = (
('mode', 'long'),
)
data = "{\"text\": \"" + query + "\"}"
response = requests.post('https://labs.tib.eu/falcon/api', headers=headers, params=params,
data=data.encode('utf-8'))
try:
output = json.loads(response.text)
for i in output['entities']:
ent = {}
ent['surface'] = ""
ent_uri = {}
ent_uri['confidence'] = 0.9
ent_uri['uri'] = i[0]
ent['uris'] = [ent_uri]
entities.append(ent)
for i in output['relations']:
rel = {}
rel['surface'] = ""
rel_uri = {}
rel_uri['confidence'] = 0.9
rel_uri['uri'] = i[0]
rel['uris'] = [rel_uri]
relations.append(rel)
except:
print('get_falcon_entities: ', query)
return entities, relations
def merge_entity(old_e, new_e):
for i in new_e:
exist = False
for j in old_e:
for k in j['uris']:
if i['uris'][0]['uri'] == k['uri']:
k['confidence'] = max(k['confidence'], i['uris'][0]['confidence'])
exist = True
if not exist:
old_e.append(i)
return old_e
def merge_relation(old_e, new_e):
for i in range(len(new_e)):
for j in range(len(old_e)):
if new_e[i]['surface'] == old_e[j]['surface']:
for i1 in range(len(new_e[i]['uris'])):
notexist = True
for j1 in range(len(old_e[j]['uris'])):
if new_e[i]['uris'][i1]['uri'] == old_e[j]['uris'][j1]['uri']:
old_e[j]['uris'][j1]['confidence'] = max(old_e[j]['uris'][j1]['confidence'],
new_e[i]['uris'][i1]['confidence'])
notexist = False
if notexist:
old_e[j]['uris'].append(new_e[i]['uris'][i1])
return old_e
import argparse
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument('--qald_ver', type=str, required=True)
argparser.add_argument('--earl_url', default='http://localhost:4999', type=str)
args = argparser.parse_args()
with open(f'data/QALD/{args.qald_ver}.json', 'r', encoding='utf-8') as f:
data = json.load(f)
properties = preprocess_relations('data/dbpedia/dbpedia_3Eng_1_property.ttl', True)
print('properties: ', len(properties))
linked_data = []
count = 0
for q in data['questions']:
q_idx = [i for i, q_by_lang in enumerate(q['question']) if q_by_lang['language'] == 'en'][0]
query = q['question'][q_idx]['string']
earl = get_earl_entities(query, earl_url=args.earl_url)
tagme_e = get_tag_me_entities(query)
if len(tagme_e) > 0:
earl['entities'] = merge_entity(earl['entities'], tagme_e)
nliwod = get_nliwod_entities(query, properties)
if len(nliwod) > 0:
earl['relations'] = merge_entity(earl['relations'], nliwod)
spot_e = get_spotlight_entities(query)
if len(spot_e) > 0:
earl['entities'] = merge_entity(earl['entities'], spot_e)
e_falcon, r_falcon = get_falcon_entities(query)
if len(e_falcon) > 0:
earl['entities'] = merge_entity(earl['entities'], e_falcon)
if len(r_falcon) > 0:
earl['relations'] = merge_entity(earl['relations'], r_falcon)
esim = []
for i in earl['entities']:
i['uris'] = sorted(i['uris'], key=lambda k: k['confidence'], reverse=True)
esim.append(max([j['confidence'] for j in i['uris']]))
earl['entities'] = np.array(earl['entities'])
esim = np.array(esim)
inds = esim.argsort()[::-1]
earl['entities'] = earl['entities'][inds]
rsim = []
for i in earl['relations']:
i['uris'] = sorted(i['uris'], key=lambda k: k['confidence'], reverse=True)
rsim.append(max([j['confidence'] for j in i['uris']]))
earl['relations'] =
|
np.array(earl['relations'])
|
numpy.array
|
import os, sys
import numpy as np
from six.moves import cPickle
from sklearn.metrics import roc_curve, auc, precision_recall_curve, accuracy_score, roc_auc_score, confusion_matrix
from scipy import stats
__all__ = [
"pearsonr",
"rsquare",
"accuracy",
"roc",
"pr",
"calculate_metrics"
]
# class MLMetrics(object):
class MLMetrics(object):
def __init__(self, objective='binary'):
self.objective = objective
self.metrics = []
def update(self, label, pred, other_lst):
met, _ = calculate_metrics(label, pred, self.objective)
if len(other_lst)>0:
met.extend(other_lst)
self.metrics.append(met)
self.compute_avg()
def compute_avg(self):
if len(self.metrics)>1:
self.avg = np.array(self.metrics).mean(axis=0)
self.sum = np.array(self.metrics).sum(axis=0)
else:
self.avg = self.metrics[0]
self.sum = self.metrics[0]
self.acc = self.avg[0]
self.auc = self.avg[1]
self.prc = self.avg[2]
self.tp = int(self.sum[3])
self.tn = int(self.sum[4])
self.fp = int(self.sum[5])
self.fn = int(self.sum[6])
if len(self.avg)>7:
self.other = self.avg[7:]
def pearsonr(label, prediction):
ndim = np.ndim(label)
if ndim == 1:
corr = [stats.pearsonr(label, prediction)]
else:
num_labels = label.shape[1]
corr = []
for i in range(num_labels):
#corr.append(np.corrcoef(label[:,i], prediction[:,i]))
corr.append(stats.pearsonr(label[:,i], prediction[:,i])[0])
return corr
def rsquare(label, prediction):
ndim = np.ndim(label)
if ndim == 1:
y = label
X = prediction
m = np.dot(X,y)/np.dot(X, X)
resid = y - m*X;
ym = y - np.mean(y);
rsqr2 = 1 - np.dot(resid.T,resid)/ np.dot(ym.T, ym);
metric = [rsqr2]
slope = [m]
else:
num_labels = label.shape[1]
metric = []
slope = []
for i in range(num_labels):
y = label[:,i]
X = prediction[:,i]
m = np.dot(X,y)/np.dot(X, X)
resid = y - m*X;
ym = y - np.mean(y);
rsqr2 = 1 - np.dot(resid.T,resid)/ np.dot(ym.T, ym);
metric.append(rsqr2)
slope.append(m)
return metric, slope
def accuracy(label, prediction):
ndim = np.ndim(label)
if ndim == 1:
metric = np.array(accuracy_score(label, np.round(prediction)))
else:
num_labels = label.shape[1]
metric = np.zeros((num_labels))
for i in range(num_labels):
metric[i] = accuracy_score(label[:,i], np.round(prediction[:,i]))
return metric
def roc(label, prediction):
ndim = np.ndim(label)
if ndim == 1:
fpr, tpr, thresholds = roc_curve(label, prediction)
score = auc(fpr, tpr)
metric = np.array(score)
curves = [(fpr, tpr)]
else:
num_labels = label.shape[1]
curves = []
metric = np.zeros((num_labels))
for i in range(num_labels):
fpr, tpr, thresholds = roc_curve(label[:,i], prediction[:,i])
score = auc(fpr, tpr)
metric[i]= score
curves.append((fpr, tpr))
return metric, curves
def pr(label, prediction):
ndim = np.ndim(label)
if ndim == 1:
precision, recall, thresholds = precision_recall_curve(label, prediction)
score = auc(recall, precision)
metric = np.array(score)
curves = [(precision, recall)]
else:
num_labels = label.shape[1]
curves = []
metric = np.zeros((num_labels))
for i in range(num_labels):
precision, recall, thresholds = precision_recall_curve(label[:,i], prediction[:,i])
score = auc(recall, precision)
metric[i] = score
curves.append((precision, recall))
return metric, curves
def tfnp(label, prediction):
try:
tn, fp, fn, tp = confusion_matrix(label, prediction).ravel()
except Exception:
tp, tn, fp, fn =0,0,0,0
return tp, tn, fp, fn
def calculate_metrics(label, prediction, objective):
"""calculate metrics for classification"""
# import pdb; pdb.set_trace()
if (objective == "binary") | (objective == 'hinge'):
ndim = np.ndim(label)
#if ndim == 1:
# label = one_hot_labels(label)
correct = accuracy(label, prediction)
auc_roc, roc_curves = roc(label, prediction)
auc_pr, pr_curves = pr(label, prediction)
# import pdb; pdb.set_trace()
if ndim == 2:
prediction=prediction[:,0]
label = label[:,0]
# pred_class = prediction[:,0]>0.5
pred_class = prediction>0.5
# tp, tn, fp, fn = tfnp(label[:,0], pred_class)
tp, tn, fp, fn = tfnp(label, pred_class)
# tn8, fp8, fn8, tp8 = tfnp(label[:,0], prediction[prediction>0.8][:,0])
# import pdb; pdb.set_trace()
mean = [np.nanmean(correct), np.nanmean(auc_roc), np.nanmean(auc_pr),tp, tn, fp, fn]
std = [np.nanstd(correct), np.nanstd(auc_roc), np.nanstd(auc_pr)]
elif objective == "categorical":
correct = np.mean(np.equal(np.argmax(label, axis=1), np.argmax(prediction, axis=1)))
auc_roc, roc_curves = roc(label, prediction)
auc_pr, pr_curves = pr(label, prediction)
mean = [np.nanmean(correct), np.nanmean(auc_roc), np.nanmean(auc_pr)]
std = [np.nanstd(correct), np.nanstd(auc_roc), np.nanstd(auc_pr)]
for i in range(label.shape[1]):
label_c, prediction_c = label[:,i], prediction[:,i]
auc_roc, roc_curves = roc(label_c, prediction_c)
mean.append(np.nanmean(auc_roc))
std.append(np.nanstd(auc_roc))
elif (objective == 'squared_error') | (objective == 'kl_divergence') | (objective == 'cdf'):
ndim = np.ndim(label)
#if ndim == 1:
# label = one_hot_labels(label)
label[label<0.5] = 0
label[label>=0.5] = 1
# import pdb; pdb.set_trace()
correct = accuracy(label, prediction)
auc_roc, roc_curves = roc(label, prediction)
auc_pr, pr_curves = pr(label, prediction)
# import pdb; pdb.set_trace()
if ndim == 2:
prediction=prediction[:,0]
label = label[:,0]
# pred_class = prediction[:,0]>0.5
pred_class = prediction>0.5
# tp, tn, fp, fn = tfnp(label[:,0], pred_class)
tp, tn, fp, fn = tfnp(label, pred_class)
# mean = [np.nanmean(correct), np.nanmean(auc_roc), np.nanmean(auc_pr),tp, tn, fp, fn]
# std = [np.nanstd(correct), np.nanstd(auc_roc), np.nanstd(auc_pr)]
# squared_error
corr = pearsonr(label,prediction)
rsqr, slope = rsquare(label, prediction)
# mean = [np.nanmean(corr), np.nanmean(rsqr), np.nanmean(slope)]
# std = [np.nanstd(corr), np.nanstd(rsqr), np.nanstd(slope)]
mean = [np.nanmean(correct), np.nanmean(auc_roc), np.nanmean(auc_pr),tp, tn, fp, fn, np.nanmean(corr), np.nanmean(rsqr), np.nanmean(slope)]
std = [np.nanstd(correct), np.nanstd(auc_roc), np.nanstd(auc_pr),
|
np.nanstd(corr)
|
numpy.nanstd
|
from conv_net import ConvNet
from torchvision import datasets, transforms
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns
import torch
sns.set(style="white")
sns.set_color_codes("dark")
sns.set_context("paper", font_scale=1.5, rc={"lines.linewidth": 2.})
def plot_test_error(accuracies, iters):
"""
Plot the test error for the Enkf method
"""
fig, ax1 = plt.subplots()
acc = 100 - np.array(accuracies)
ax1.plot(acc, '.-', markersize=6., label=r'EnKF')
ax1.set_ylabel('Test error in %')
ax1.set_xlabel('Iterations')
plt.xticks(range(len(iters)), iters)
tkl = ax1.xaxis.get_ticklabels()
for label in tkl[::2]:
label.set_visible(False)
plt.tight_layout()
plt.legend()
plt.savefig('enkf_test_error.pdf',
bbox_inches='tight', pad_inches=0.1)
plt.show()
def plot_error_sgd_adam(accuracies):
"""
Plots test errors for different realizations of sigma ($\sigma=1, \sigma=3$)
when the network is initialized and optimized with SGD and Adam.
"""
accs = np.array(accuracies)
plt.plot(100 - accs[:49][:, 1].astype(float), label=r'SGD $\sigma=1$')
plt.plot(100 - accs[49:98][:, 1].astype(float), label=r'SGD $\sigma=3$')
plt.plot(100 - accs[99:147][:, 1].astype(float), label=r'ADAM $\sigma=1$')
plt.plot(100 - accs[147:][:, 1].astype(float), label=r'ADAM $\sigma=3$')
plt.xlabel('Epochs')
plt.ylabel('Test error in %')
plt.legend()
# plt.xticks(range(0, 50, 10), range(1, 51, 10))
plt.savefig('sgd_adam_test_error.pdf', bbox_inches='tight', pad_inches=0.1)
plt.show()
def plot_different_accuracies(iters):
"""
Plots accuracies when 500, 5000 and 10000 ensembles are used with EnKF.
"""
norm_loss = 100 - np.array(torch.load('acc_loss.pt')[0])
more_loss = 100 - np.array(torch.load('more_ensembles_acc_loss.pt')[0])
less_loss = 100 - np.array(torch.load('less_ensembles_acc_loss.pt')[0])
fig, ax1 = plt.subplots()
ax1.plot(less_loss, '.-', label='100 ensembles')
ax1.plot(norm_loss, '.-', label='5000 ensembles')
ax1.plot(more_loss, '.-', label='10000 ensembles')
ax1.set_ylabel('Test Error in %')
ax1.set_xlabel('Iterations')
plt.xticks(range(len(iters)), iters)
tkl = ax1.xaxis.get_ticklabels()
for label in tkl[::2]:
label.set_visible(False)
plt.legend()
plt.show()
fig.savefig('ensembles_diff_test_accuracies.pdf')
def plot_act_func_accuracies(iters):
norm_loss = 100 - np.array(torch.load('acc_loss.pt')[0])
more_loss = 100 - np.array(torch.load('relu_acc_loss.pt')[0])
less_loss = 100 - np.array(torch.load('tanh_acc_loss.pt')[0])
fig, ax1 = plt.subplots(figsize=(6, 6))
ax1.plot(norm_loss, '.-', label='Logistic Function')
ax1.plot(more_loss, '.-', label='ReLU')
ax1.plot(less_loss, '.-', label='Tanh')
ax1.set_ylabel('Test Error in %')
ax1.set_xlabel('Iterations')
plt.xticks(range(len(iters)), iters)
tkl = ax1.xaxis.get_ticklabels()
for label in tkl[::2]:
label.set_visible(False)
# ax1.set_title()
# plt.tight_layout()
plt.legend()
plt.show()
fig.savefig('act_func_test_accuracies.pdf',
bbox_inches='tight', pad_inches=0.1)
def adaptive_test_loss_splitted(normal_loss, dynamic_loss):
adaptive_ta = 100 - np.unique(dynamic_loss.get('test_loss'))
iterations = dynamic_loss.get('iteration')
iterations.insert(0, 0)
n_ens = dynamic_loss.get('n_ensembles')
n_ens.insert(0, 5000)
reps = dynamic_loss.get('model_reps')
reps.insert(0, 8)
normal_loss = 100 - np.array(normal_loss[0][:len(adaptive_ta)])
# prepare plots
fig, (ax1, ax2) = plt.subplots(
nrows=1, ncols=2, sharex=True, figsize=(8.5, 4))
ax3 = ax2.twinx()
# plot 1
marker = 'o'
p11, = ax1.plot(adaptive_ta, marker=marker, label='fixed')
p12, = ax1.plot(normal_loss, marker=marker, label='adaptive')
ax1.set_xticks(range(len(iterations)))
ax1.set_xticklabels(iterations)
ax1.set_ylabel('Test Error in %')
ax1.set_xlabel('Iterations')
# plot 2
marker = 's--'
markersize = 6
# plot for n_ens normal
p21, = ax2.plot(range(len(n_ens)), [5000] *
len(n_ens), marker, markersize=markersize)
# plot for n_ens dynamic
p22, = ax2.plot(n_ens, marker, markersize=markersize)
# empty fake plot for the correct label color
ax2p = ax2.plot([], marker, label='# ensembles', c='k')
ax2.set_ylabel('Number of ensembles')
# plot 3
marker = '*--'
markersize = 8
# plot for reps normal
p31, = ax3.plot(range(len(reps)), [8]*len(reps), marker,
c=p21.get_color(), ms=markersize)
# plot for reps dynamic
p32, = ax3.plot(range(len(reps)), reps, marker,
c=p22.get_color(), ms=markersize)
# empty fake plot for the correct label color
ax3p = ax3.plot([], marker, label='repetitions', c='k', ms=markersize)
ax3.set_xticks(range(len(reps)))
# ax3.tick_params(axis='y')
ax3.set_ylabel('Number of repetitions')
# merge labels into one legend
lgd = ax2p + ax3p
labs = [l.get_label() for l in lgd]
ax1.legend(prop={'size': 10})
ax2.legend(lgd, labs, prop={'size': 10})
# remove ax ticks from second plot
ax2.tick_params(axis=u'both', which=u'both', length=0)
ax3.yaxis.set_tick_params(length=0)
fig.tight_layout()
fig.subplots_adjust(top=0.942, bottom=0.166, left=0.095, right=0.918,
hspace=0.1, wspace=0.388)
fig.savefig('dynamic_changes_splitted.pdf', format='pdf',
bbox_inches='tight')
plt.show()
def plot_accuracy_all_iteration_std(startswith, inset=True, path='.'):
fig, ax1 = plt.subplots()
for starts in startswith:
if starts.startswith('SGD'):
mu_sgd, std_sgd = _load_pt_files(starts, path)
mu_sgd = np.insert(mu_sgd, 0, 0)
std_sgd = np.insert(std_sgd, 0, 0)
mu_sgd = 100 - mu_sgd
elif starts.startswith('acc'):
mu_enkf, std_enkf = _load_pt_files(starts, path)
mu_enkf =
|
np.insert(mu_enkf, 0, 0)
|
numpy.insert
|
# -*- coding: utf-8 -*-
"""
@author: alexyang
@contact: <EMAIL>
@file: utils.py
@time: 2018/4/20 21:36
@desc:
"""
import os
import numpy as np
import codecs
import pickle
def get_glove_vectors(vector_file):
pre_trained_vectors = {}
with codecs.open(vector_file, 'r', encoding='utf8')as reader:
for line in reader:
content = line.strip().split()
pre_trained_vectors[content[0]] = np.array(list(map(float, content[1:])))
return pre_trained_vectors
def get_gensim_vectors(vector_file):
with codecs.open(vector_file, 'rb')as reader:
pre_train_vectors = pickle.load(reader)
return pre_train_vectors
def split_train_valid(data, fold_index, fold_size):
train = []
valid = []
for data_item in data:
valid.append(data_item[(fold_index-1)*fold_size: fold_index*fold_size])
train.append(np.concatenate([data_item[:(fold_index-1)*fold_size],
data_item[fold_index*fold_size:]]))
return train, valid
def batch_index(length, batch_size, n_iter=100):
index = range(length)
for j in range(n_iter):
for i in range(int(length / batch_size) + (1 if length % batch_size else 0)):
yield index[i * batch_size:(i + 1) * batch_size]
def onehot_encoding(y):
class_set = set(y)
n_class = len(class_set)
y_onehot_mapping = dict(zip(class_set, range(n_class)))
onehot = []
for label in y:
tmp = [0] * n_class
tmp[y_onehot_mapping[label]] = 1
onehot.append(tmp)
return
|
np.asarray(onehot, dtype=np.int32)
|
numpy.asarray
|
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from pylab import rcParams
import check
"""
"data" contains:
1. nsite, nocc, sigma, U as from input
2. nsample (int) deduced from the quantities below
3. e [nsample]:
total energy
4. hd [nsample, nsite]:
each row is a vector of diagonal of h (on-site potential fluctuation)
5. pair [nsample, nsite]:
each row is a vector of pair densities
6. pop [nsample, nsite]:
each row is a vector of populations
7. rdm1 [nsample*nsite, nsite]:
every nsite rows correspond to a rdm1 (whose diagonal matches the
corresponding row of pop)
"""
# given a site in the 1pdm, generate indices with pbc for all pops and coherences we want to collect for that site
def get_pdm_terms(site_index, n, adj_sites=4, shift=0):
"""
inputs:
site_index (int): center site index
n (int): number of sites in lattice (for enforcing periodic boundary conditions)
adj_site (int): how many adjacent sites to collect pop and coherence values from
shift (int): return row indices shifted by this value (when collecting from matrix w/ compound index
returns:
ind_list (list of ints): all site indices to collect
shift_ind_list (list of ints): site indices shifted down i*n rows to collect the correct sample
coh_list (list of ints): coherences in fragment with central site
shift_coh_list (list of ints): coherences shifted down i*n rows to select particular sample
"""
# if a term in sites is out of bounds, subtract bound length
ind_list, shift_ind_list = [site_index], [site_index+shift]
coh_list, shift_coh_list = [], []
#print("Site: ", site_index)
for ind in range(site_index - adj_sites, site_index + adj_sites + 1):
if ind != site_index: # we've already add the target site population to ind_list and shift_ind_list
if ind < 0: ind += n
elif ind >= n: ind -= n
else: pass
#print(ind)
ind_list.append(ind)
shift_ind_list.append(ind+shift) # shift down to specific matrix in set we are selecting
coh_list.append(ind)
shift_coh_list.append(ind+shift)
return ind_list, shift_ind_list, coh_list, shift_coh_list
parent_path = "/home/nricke/pymod/DMRC_data/hubbard/data"
S_list = [1., 0.5, 0.3, 0.1]
U_list = [1, 4, 8]
no_list = [1,2,3,4,5]
n_list = [10,12]
adj_site_num = 4
nsamples = 100
# dict_keys: ['nsite', 'nocc', 'sigma', 'U', 'e', 'hd', 'pair', 'pop', 'rdm1', 'nsample']
# namelist: ["e", "hd", "pair", "pop", "rdm1"]
"""
We want the model to only take in 1pdm elements from all of this data, and check how 2-pdm on-site is reproduced
We'll start with only trying to fit to on-site terms, but we'll probably want to collect coherences as well
How should I actually store this data? In one dataframe for each folder in data? Probably all of this should fit in memory, and
we can load and parse based on the values of S, U, no, and n, so a single dataframe is probably better
"""
## create column names for dataframe
column_name_list = ["n", "U", "sigma", "n_occ", "site_pop"]
L_site_list, R_site_list, L_coh_list, R_coh_list = [], [], [], []
for i in range(adj_site_num):
L_site_list.append("L"+str(i+1)) # sites left of target set
R_site_list.append("R"+str(i+1)) # ditto for the right
L_coh_list = ["c"+name for name in L_site_list]
R_coh_list = ["c"+name for name in R_site_list]
column_name_list = column_name_list + L_site_list + R_site_list + L_coh_list + R_coh_list + ["pair_density"]
## Load data for several sets of job parameters, and see how well we can fit on-site terms from neighboring populations
XY_df_list = []
for n in n_list:
x = np.zeros((n*nsamples, 2*adj_site_num + 1))
x_coh = np.zeros((n*nsamples, 2*adj_site_num))
n_arr = np.ones((n*nsamples,1))*n
for U in U_list:
print("U progress: ", U)
U_arr =
|
np.ones((n*nsamples,1))
|
numpy.ones
|
"""
Description:
Author: <NAME> (<EMAIL>)
Date: 2021-06-06 02:17:08
LastEditors: <NAME> (<EMAIL>)
LastEditTime: 2021-06-06 02:17:08
"""
import logging
from functools import lru_cache
from typing import Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from torch import nn
from scipy.stats import truncnorm
from torch.autograd import grad
from torch.tensor import Tensor
from torch.types import Device, _size
from torch.nn.modules.utils import _pair
from .torch_train import set_torch_deterministic
__all__ = [
"shift",
"Krylov",
"circulant",
"toeplitz",
"complex_circulant",
"complex_mult",
"expi",
"complex_matvec_mult",
"complex_matmul",
"real_to_complex",
"get_complex_magnitude",
"get_complex_energy",
"complex_to_polar",
"polar_to_complex",
"absclamp",
"absclamp_",
"im2col_2d",
"check_identity_matrix",
"check_unitary_matrix",
"check_equal_tensor",
"batch_diag",
"batch_eye_cpu",
"batch_eye",
"merge_chunks",
"partition_chunks",
"clip_by_std",
"percentile",
"gen_boolean_mask_cpu",
"gen_boolean_mask",
"fftshift_cpu",
"ifftshift_cpu",
"gen_gaussian_noise",
"gen_gaussian_filter2d_cpu",
"gen_gaussian_filter2d",
"add_gaussian_noise_cpu",
"add_gaussian_noise",
"add_gaussian_noise_",
"circulant_multiply",
"calc_diagonal_hessian",
"calc_jacobian",
"polynomial",
"gaussian",
"lowrank_decompose",
"get_conv2d_flops",
]
def shift(v: Tensor, f: float = 1) -> Tensor:
return torch.cat((f * v[..., -1:], v[..., :-1]), dim=-1)
def Krylov(linear_map: Callable, v: Tensor, n: Optional[int] = None) -> Tensor:
if n is None:
n = v.size(-1)
cols = [v]
for _ in range(n - 1):
v = linear_map(v)
cols.append(v)
return torch.stack(cols, dim=-2)
def circulant(eigens: Tensor) -> Tensor:
circ = Krylov(shift, eigens).transpose(-1, -2)
return circ
@lru_cache(maxsize=4)
def _get_toeplitz_indices(n: int, device: Device) -> Tensor:
# cached toeplitz indices. avoid repeatedly generate the indices.
indices = circulant(torch.arange(n, device=device))
return indices
def toeplitz(col: Tensor) -> Tensor:
"""
Efficient Toeplitz matrix generation from the first column. The column vector must in the last dimension. Batch generation is supported. Suitable for AutoGrad. Circulant matrix multiplication is ~4x faster than rfft-based implementation!\\
@col {torch.Tensor} (Batched) column vectors.\\
return out {torch.Tensor} (Batched) circulant matrices
"""
n = col.size(-1)
indices = _get_toeplitz_indices(n, device=col.device)
return col[..., indices]
def complex_circulant(eigens: Tensor) -> Tensor:
circ = Krylov(shift, eigens).transpose(-1, -2)
return circ
def complex_mult(X: Tensor, Y: Tensor) -> Tensor:
"""Complex-valued element-wise multiplication
Args:
X (Tensor): Real tensor with last dim of 2 or complex tensor
Y (Tensor): Real tensor with last dim of 2 or complex tensor
Returns:
Tensor: tensor with the same type as input
"""
if not torch.is_complex(X) and not torch.is_complex(Y):
assert X.shape[-1] == 2 and Y.shape[-1] == 2, "Last dimension of real-valued tensor must be 2"
if hasattr(torch, "view_as_complex"):
return torch.view_as_real(torch.view_as_complex(X) * torch.view_as_complex(Y))
else:
return torch.stack(
(
X[..., 0] * Y[..., 0] - X[..., 1] * Y[..., 1],
X[..., 0] * Y[..., 1] + X[..., 1] * Y[..., 0],
),
dim=-1,
)
else:
return X.mul(Y)
def complex_matvec_mult(W: Tensor, X: Tensor) -> Tensor:
return torch.sum(complex_mult(W, X.unsqueeze(0).repeat(W.size(0), 1, 1)), dim=1)
def complex_matmul(X: Tensor, Y: Tensor) -> Tensor:
assert X.shape[-1] == 2 and Y.shape[-1] == 2, "Last dimension must be 2"
if torch.__version__ >= "1.8" or (torch.__version__ >= "1.7" and X.shape[:-3] == Y.shape[:-3]):
return torch.view_as_real(torch.matmul(torch.view_as_complex(X), torch.view_as_complex(Y)))
return torch.stack(
[
X[..., 0].matmul(Y[..., 0]) - X[..., 1].matmul(Y[..., 1]),
X[..., 0].matmul(Y[..., 1]) + X[..., 1].matmul(Y[..., 0]),
],
dim=-1,
)
def expi(x: Tensor) -> Tensor:
if torch.__version__ >= "1.8" or (torch.__version__ >= "1.7" and not x.requires_grad):
return torch.exp(1j * x)
else:
return x.cos().type(torch.cfloat) + 1j * x.sin().type(torch.cfloat)
def real_to_complex(x: Tensor) -> Tensor:
if torch.__version__ < "1.7":
return torch.stack((x, torch.zeros_like(x).to(x.device)), dim=-1)
else:
return torch.view_as_real(x.to(torch.complex64))
def get_complex_magnitude(x: Tensor) -> Tensor:
assert x.size(-1) == 2, "[E] Input must be complex Tensor"
return torch.sqrt(x[..., 0] * x[..., 0] + x[..., 1] * x[..., 1])
def complex_to_polar(x: Tensor) -> Tensor:
# real and imag to magnitude and angle
if isinstance(x, torch.Tensor):
mag = x.norm(p=2, dim=-1)
angle = torch.view_as_complex(x).angle()
x = torch.stack([mag, angle], dim=-1)
elif isinstance(x, np.ndarray):
x = x.astype(np.complex64)
mag = np.abs(x)
angle = np.angle(x)
x = np.stack([mag, angle], axis=-1)
else:
raise NotImplementedError
return x
def polar_to_complex(mag: Tensor, angle: Tensor) -> Tensor:
# magnitude and angle to real and imag
if angle is None:
return real_to_complex(angle)
if mag is None:
if isinstance(angle, torch.Tensor):
x = torch.stack([angle.cos(), angle.sin()], dim=-1)
elif isinstance(angle, np.ndarray):
x = np.stack([np.cos(angle), np.sin(angle)], axis=-1)
else:
raise NotImplementedError
else:
if isinstance(angle, torch.Tensor):
x = torch.stack([mag * angle.cos(), mag * angle.sin()], dim=-1)
elif isinstance(angle, np.ndarray):
x = np.stack([mag * np.cos(angle), mag * np.sin(angle)], axis=-1)
else:
raise NotImplementedError
return x
def get_complex_energy(x: Tensor) -> Tensor:
assert x.size(-1) == 2, "[E] Input must be complex Tensor"
return x[..., 0] * x[..., 0] + x[..., 1] * x[..., 1]
def absclamp(x: Tensor, min: Optional[float] = None, max: Optional[float] = None) -> Tensor:
if isinstance(x, torch.Tensor):
mag = x.norm(p=2, dim=-1).clamp(min=min, max=max)
angle = torch.view_as_complex(x).angle()
x = polar_to_complex(mag, angle)
elif isinstance(x, np.ndarray):
x = x.astype(np.complex64)
mag = np.clip(np.abs(x), a_min=min, a_max=max)
angle = np.angle(x)
x = polar_to_complex(mag, angle)
else:
raise NotImplementedError
return x
def absclamp_(x: Tensor, min: Optional[float] = None, max: Optional[float] = None) -> Tensor:
if isinstance(x, torch.Tensor):
y = torch.view_as_complex(x)
mag = y.abs().clamp(min=min, max=max)
angle = y.angle()
x.data.copy_(polar_to_complex(mag, angle))
elif isinstance(x, np.ndarray):
y = x.astype(np.complex64)
mag = np.clip(np.abs(y), a_min=min, a_max=max)
angle =
|
np.angle(y)
|
numpy.angle
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import torch
from op_tester import op_tester
def test_cumsum_1d(op_tester):
x = np.array([1., 2., 3., 4., 5.]).astype(np.float32)
axis = np.array(0).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
out = torch.cumsum(tx, axis.item(0))
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_1d_exclusive(op_tester):
x = np.array([1., 2., 3., 4., 5.]).astype(np.float32)
axis = np.array(0).astype(np.int32)
expected = np.array([0., 1., 3., 6., 10.]).astype(np.float32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1], exclusive=1)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = torch.tensor(expected)
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_1d_reverse(op_tester):
x = np.array([1., 2., 3., 4., 5.]).astype(np.float32)
axis = np.array(0).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1], reverse=1)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
tx = torch.flip(tx, [0])
out = torch.cumsum(tx, 0)
out = torch.flip(out, [0])
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_1d_reverse_exclusive(op_tester):
x = np.array([1., 2., 3., 4., 5.]).astype(np.float32)
axis = np.array(0).astype(np.int32)
expected = np.array([14., 12., 9., 5., 0.]).astype(np.float32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1], reverse=1, exclusive=1)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = torch.tensor(expected)
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_2d_axis_0(op_tester):
x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32).reshape((2, 3))
axis = np.array(0).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
out = torch.cumsum(tx, axis.item(0))
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_2d_axis_1(op_tester):
x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32).reshape((2, 3))
axis = np.array(1).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
out = torch.cumsum(tx, axis.item(0))
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_2d_negative_axis(op_tester):
x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32).reshape((2, 3))
axis = np.array(-1).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
out = torch.cumsum(tx, axis.item(0))
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_3d(op_tester):
a0 = np.array([[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
[[20, 22, 24, 26, 28], [30, 32, 34, 36, 38],
[40, 42, 44, 46, 48], [50, 52, 54, 56, 58]],
[[60, 63, 66, 69, 72], [75, 78, 81, 84, 87],
[90, 93, 96, 99, 102], [105, 108, 111, 114,
117]]]).astype(np.float32)
a1 = np.array([[[0, 1, 2, 3, 4], [5, 7, 9, 11, 13], [15, 18, 21, 24, 27],
[30, 34, 38, 42, 46]],
[[20, 21, 22, 23, 24], [45, 47, 49, 51, 53],
[75, 78, 81, 84, 87], [110, 114, 118, 122, 126]],
[[40, 41, 42, 43, 44], [85, 87, 89, 91, 93],
[135, 138, 141, 144, 147], [190, 194, 198, 202,
206]]]).astype(np.float32)
a2 = np.array([[[0, 1, 3, 6, 10], [5, 11, 18, 26, 35],
[10, 21, 33, 46, 60], [15, 31, 48, 66, 85]],
[[20, 41, 63, 86, 110], [25, 51, 78, 106, 135],
[30, 61, 93, 126, 160], [35, 71, 108, 146, 185]],
[[40, 81, 123, 166, 210], [45, 91, 138, 186, 235],
[50, 101, 153, 206, 260], [55, 111, 168, 226,
285]]]).astype(np.float32)
am1 = a2
am2 = a1
am3 = a0
expected = {-3: am3, -2: am2, -1: am1, 0: a0, 1: a1, 2: a2}
testAxis = np.array([-3, -2, -1, 0, 1, 2]).astype(np.int32)
for a in testAxis:
x = np.arange(60).astype(np.float32).reshape((3, 4, 5))
axis = np.array(a).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = torch.tensor(expected[a])
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_3d_v2(op_tester):
testAxis = [-3, -2, -1, 0, 1, 2]
for a in testAxis:
x = np.arange(60).astype(np.float32).reshape((3, 4, 5))
axis = np.array(a).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
out = torch.cumsum(tx, a)
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_3d_reverse(op_tester):
a0 = np.array([[[60, 63, 66, 69, 72], [75, 78, 81, 84, 87],
[90, 93, 96, 99, 102], [105, 108, 111, 114, 117]],
[[60, 62, 64, 66, 68], [70, 72, 74, 76, 78],
[80, 82, 84, 86, 88], [90, 92, 94, 96, 98]],
[[40, 41, 42, 43, 44], [45, 46, 47, 48, 49],
[50, 51, 52, 53, 54], [55, 56, 57, 58,
59]]]).astype(np.float32)
a1 = np.array([[[30, 34, 38, 42, 46], [30, 33, 36, 39, 42],
[25, 27, 29, 31, 33], [15, 16, 17, 18, 19]],
[[110, 114, 118, 122, 126], [90, 93, 96, 99, 102],
[65, 67, 69, 71, 73], [35, 36, 37, 38, 39]],
[[190, 194, 198, 202, 206], [150, 153, 156, 159, 162],
[105, 107, 109, 111, 113], [55, 56, 57, 58,
59]]]).astype(np.float32)
a2 = np.array([[[10, 10, 9, 7, 4], [35, 30, 24, 17, 9],
[60, 50, 39, 27, 14], [85, 70, 54, 37, 19]],
[[110, 90, 69, 47, 24], [135, 110, 84, 57, 29],
[160, 130, 99, 67, 34], [185, 150, 114, 77, 39]],
[[210, 170, 129, 87, 44], [235, 190, 144, 97, 49],
[260, 210, 159, 107, 54], [285, 230, 174, 117,
59]]]).astype(np.float32)
am1 = a2
am2 = a1
am3 = a0
expected = {-3: am3, -2: am2, -1: am1, 0: a0, 1: a1, 2: a2}
testAxis = np.array([-3, -2, -1, 0, 1, 2]).astype(np.int32)
for a in testAxis:
x = np.arange(60).astype(np.float32).reshape((3, 4, 5))
axis = np.array(a).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1], reverse=1)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = torch.tensor(expected[a])
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_3d_reverse_v2(op_tester):
testAxis = [-3, -2, -1, 0, 1, 2]
for a in testAxis:
x = np.arange(60).astype(np.float32).reshape((3, 4, 5))
axis = np.array(a).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1], reverse=1)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
tx = torch.flip(tx, [a])
out = torch.cumsum(tx, a)
out = torch.flip(out, [a])
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_3d_exclusive(op_tester):
# Expected from tf as pytorch does not support
# exclusive and reverse.
a0 = np.array([[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
[[20, 22, 24, 26, 28], [30, 32, 34, 36, 38],
[40, 42, 44, 46, 48], [50, 52, 54, 56,
58]]]).astype(np.float32)
a1 = np.array(
[[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4], [5, 7, 9, 11, 13],
[15, 18, 21, 24, 27]],
[[0, 0, 0, 0, 0], [20, 21, 22, 23, 24], [45, 47, 49, 51, 53],
[75, 78, 81, 84, 87]],
[[0, 0, 0, 0, 0], [40, 41, 42, 43, 44], [85, 87, 89, 91, 93],
[135, 138, 141, 144, 147]]]).astype(np.float32)
a2 = np.array([[[0, 0, 1, 3, 6], [0, 5, 11, 18, 26], [0, 10, 21, 33, 46],
[0, 15, 31, 48, 66]],
[[0, 20, 41, 63, 86], [0, 25, 51, 78, 106],
[0, 30, 61, 93, 126], [0, 35, 71, 108, 146]],
[[0, 40, 81, 123, 166], [0, 45, 91, 138, 186],
[0, 50, 101, 153, 206], [0, 55, 111, 168,
226]]]).astype(np.float32)
am1 = a2
am2 = a1
am3 = a0
expected = {-3: am3, -2: am2, -1: am1, 0: a0, 1: a1, 2: a2}
testAxis = np.array([-3, -2, -1, 0, 1, 2]).astype(np.int32)
for a in testAxis:
x =
|
np.arange(60)
|
numpy.arange
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import matplotlib.pyplot as plt
import pyforms
from numpy import dot
from pyforms import BaseWidget
from pyforms.controls import ControlButton, ControlText, ControlSlider, \
ControlFile, ControlPlayer, ControlCheckBox, ControlCombo, ControlProgress
from scipy.spatial.distance import squareform, pdist
from helpers.functions import get_log_kernel, inv, linear_sum_assignment, \
local_maxima, select_frames
from helpers.video_window import VideoWindow
class MultipleBlobDetection(BaseWidget):
def __init__(self):
super(MultipleBlobDetection, self).__init__(
'Multiple Blob Detection')
# Definition of the forms fields
self._videofile = ControlFile('Video')
self._outputfile = ControlText('Results output file')
self._threshold_box = ControlCheckBox('Threshold')
self._threshold = ControlSlider('Binary Threshold')
self._threshold.value = 114
self._threshold.min = 1
self._threshold.max = 255
self._roi_x_min = ControlSlider('ROI x top')
self._roi_x_max = ControlSlider('ROI x bottom')
self._roi_y_min = ControlSlider('ROI y left')
self._roi_y_max = ControlSlider('ROI y right')
# self._blobsize = ControlSlider('Minimum blob size', 100, 100, 2000)
self._player = ControlPlayer('Player')
self._runbutton = ControlButton('Run')
self._start_frame = ControlText('Start Frame')
self._stop_frame = ControlText('Stop Frame')
self._color_list = ControlCombo('Color channels')
self._color_list.add_item('Red Image Channel', 2)
self._color_list.add_item('Green Image Channel', 1)
self._color_list.add_item('Blue Image Channel', 0)
self._clahe = ControlCheckBox('CLAHE ')
self._dilate = ControlCheckBox('Morphological Dilation')
self._dilate_type = ControlCombo('Dilation Kernel Type')
self._dilate_type.add_item('RECTANGLE', cv2.MORPH_RECT)
self._dilate_type.add_item('ELLIPSE', cv2.MORPH_ELLIPSE)
self._dilate_type.add_item('CROSS', cv2.MORPH_CROSS)
self._dilate_size = ControlSlider('Dilation Kernel Size', default=3,
min=1, max=10)
self._dilate_size.value = 5
self._dilate_size.min = 1
self._dilate_size.max = 10
self._erode = ControlCheckBox('Morphological Erosion')
self._erode_type = ControlCombo('Erode Kernel Type')
self._erode_type.add_item('RECTANGLE', cv2.MORPH_RECT)
self._erode_type.add_item('ELLIPSE', cv2.MORPH_ELLIPSE)
self._erode_type.add_item('CROSS', cv2.MORPH_CROSS)
self._erode_size = ControlSlider('Erode Kernel Size')
self._erode_size.value = 5
self._erode_size.min = 1
self._erode_size.max = 10
self._open = ControlCheckBox('Morphological Opening')
self._open_type = ControlCombo('Open Kernel Type')
self._open_type.add_item('RECTANGLE', cv2.MORPH_RECT)
self._open_type.add_item('ELLIPSE', cv2.MORPH_ELLIPSE)
self._open_type.add_item('CROSS', cv2.MORPH_CROSS)
self._open_size = ControlSlider('Open Kernel Size')
self._open_size.value = 20
self._open_size.min = 1
self._open_size.max = 40
self._close = ControlCheckBox('Morphological Closing')
self._close_type = ControlCombo('Close Kernel Type')
self._close_type.add_item('RECTANGLE', cv2.MORPH_RECT)
self._close_type.add_item('ELLIPSE', cv2.MORPH_ELLIPSE)
self._close_type.add_item('CROSS', cv2.MORPH_CROSS)
self._close_size = ControlSlider('Close Kernel Size', default=19,
min=1, max=40)
self._close_size.value = 20
self._close_size.min = 1
self._close_size.max = 40
self._LoG = ControlCheckBox('LoG - Laplacian of Gaussian')
self._LoG_size = ControlSlider('LoG Kernel Size')
self._LoG_size.value = 20
self._LoG_size.min = 1
self._LoG_size.max = 60
self._progress_bar = ControlProgress('Progress Bar')
# Define the function that will be called when a file is selected
self._videofile.changed_event = self.__video_file_selection_event
# Define the event that will be called when the run button is processed
self._runbutton.value = self.__run_event
# Define the event called before showing the image in the player
self._player.process_frame_event = self.__process_frame
self._error_massages = {}
# Define the organization of the Form Controls
self.formset = [
('_videofile', '_outputfile'),
('_start_frame', '_stop_frame'),
('_color_list', '_clahe', '_roi_x_min', '_roi_y_min'),
('_threshold_box', '_threshold', '_roi_x_max', '_roi_y_max'),
('_dilate', '_erode', '_open', '_close'),
('_dilate_type', '_erode_type', '_open_type', '_close_type'),
('_dilate_size', '_erode_size', '_open_size', '_close_size'),
('_LoG', '_LoG_size'),
('_runbutton', '_progress_bar'),
'_player'
]
self.is_roi_set = False
def _parameters_check(self):
self._error_massages = {}
if not self._player.value:
self._error_massages['video'] = 'No video specified'
elif not self._start_frame.value or not self._stop_frame.value or \
int(self._start_frame.value) >= int(self._stop_frame.value) or \
int(self._start_frame.value) < 0 or int(self._stop_frame.value) < 0:
self._error_massages['frames'] = 'Wrong start/end frame number'
def __video_file_selection_event(self):
"""
When the video file is selected instanciate the video in the player
"""
self._player.value = self._videofile.value
def __color_channel(self, frame):
"""
Returns only one color channel of input frame.
Output is in grayscale.
"""
frame = frame[:, :, self._color_list.value]
return frame
def __create_kernels(self):
"""
Creates kernels for morphological operations.
Check cv2.getStructuringElement() doc for more info:
http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/
py_morphological_ops/py_morphological_ops.html
Assumed that all kernels (except LoG kernel) are square.
Example of use:
open_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (19, 19))
erosion_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
dilate_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
:return: _opening_kernel, _close_kernel, _erosion_kernel, \
_dilate_kernel, _LoG_kernel
"""
if self._open_type.value and self._open_size.value:
_opening_kernel = cv2.getStructuringElement(self._open_type.value,
(self._open_size.value,
self._open_size.value))
else:
_opening_kernel = None
if self._close_type.value and self._close_size.value:
_close_kernel = cv2.getStructuringElement(self._close_type.value,
(self._close_size.value,
self._close_size.value))
else:
_close_kernel = None
if self._erode_type.value and self._erode_size.value:
_erosion_kernel = cv2.getStructuringElement(self._erode_type.value,
(self._erode_size.value,
self._erode_size.value))
else:
_erosion_kernel = None
if self._dilate_type.value and self._dilate_size.value:
_dilate_kernel = cv2.getStructuringElement(self._dilate_type.value,
(self._dilate_size.value,
self._dilate_size.value))
else:
_dilate_kernel = None
if self._LoG.value and self._LoG_size.value:
_LoG_kernel = get_log_kernel(self._LoG_size.value,
int(self._LoG_size.value * 0.5))
else:
_LoG_kernel = None
return _opening_kernel, _close_kernel, _erosion_kernel, \
_dilate_kernel, _LoG_kernel
def __morphological(self, frame):
"""
Apply morphological operations selected by the user.
:param frame: input frame of selected video.
:return: preprocessed frame.
"""
opening_kernel, close_kernel, erosion_kernel, \
dilate_kernel, log_kernel = self.__create_kernels()
# prepare image - morphological operations
if self._erode.value:
frame = cv2.erode(frame, erosion_kernel, iterations=1)
if self._open.value:
frame = cv2.morphologyEx(frame, cv2.MORPH_OPEN, opening_kernel)
if self._close.value:
frame = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, close_kernel)
if self._dilate.value:
frame = cv2.dilate(frame, dilate_kernel, iterations=1)
# create LoG kernel for finding local maximas
if self._LoG.value:
frame = cv2.filter2D(frame, cv2.CV_32F, log_kernel)
frame *= 255
# remove near 0 floats
frame[frame < 0] = 0
return frame
def __roi(self, frame):
"""
Define image region of interest.
"""
# ROI
height, width = frame.shape
self._roi_x_max.min = int(height / 2)
self._roi_x_max.max = height
self._roi_y_max.min = int(width / 2)
self._roi_y_max.max = width
self._roi_x_min.min = 0
self._roi_x_min.max = int(height / 2)
self._roi_y_min.min = 0
self._roi_y_min.max = int(width / 2)
if not self.is_roi_set:
self._roi_x_max.value = height
self._roi_y_max.value = width
self.is_roi_set = True
# x axis
frame[:int(self._roi_x_min.value)][::] = 255
frame[int(self._roi_x_max.value)::][::] = 255
# y axis
for m in range(height): # height
for n in range(width): # width
if n > self._roi_y_max.value or n < self._roi_y_min.value:
frame[m][n] = 255
# frame[0::][:int(self._roi_y_min.value)] = 255
# frame[0::][int(self._roi_y_max.value):] = 255
return frame
def _kalman(self, max_points, stop_frame, vid_fragment):
"""
Kalman Filter function. Takes measurements from video analyse function
and estimates positions of detected objects. Munkres algorithm is used
for assignments between estimates (states) and measurements.
:param max_points: measurements.
:param stop_frame: number of frames to analise
:param vid_fragment: video fragment for estimates displaying
:return: x_est, y_est - estimates of x and y positions in the following
format: x_est[index_of_object][frame] gives x position of object
with index = [index_of_object] in the frame = [frame]. The same
goes with y positions.
"""
# font for displaying info on the image
index_error = 0
value_error = 0
# step of filter
dt = 1.
R_var = 1 # measurements variance between x-x and y-y
# Q_var = 0.1 # model variance
# state covariance matrix - no initial covariances, variances only
# [10^2 px, 10^2 px, ..] -
P = np.diag([100, 100, 10, 10, 1, 1])
# state transition matrix for 6 state variables
# (position - velocity - acceleration,
# x, y)
F = np.array([[1, 0, dt, 0, 0.5 * pow(dt, 2), 0],
[0, 1, 0, dt, 0, 0.5 * pow(dt, 2)],
[0, 0, 1, 0, dt, 0],
[0, 0, 0, 1, 0, dt],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
# x and y coordinates only - measurements matrix
H = np.array([[1., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0.]])
# no initial corelation between x and y positions - variances only
R = np.array(
[[R_var, 0.], [0., R_var]]) # measurement covariance matrix
# Q must be the same shape as P
Q =
|
np.diag([100, 100, 10, 10, 1, 1])
|
numpy.diag
|
import pytest
import os
import h5py
import tempfile
import numpy as np
from numpy.testing import assert_allclose
from numpy.testing import assert_raises
from scipy.sparse import rand
from keras import backend as K
from keras.engine.saving import preprocess_weights_for_loading
from keras.models import Model, Sequential
from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed
from keras.layers import Embedding
from keras.layers import Bidirectional, GRU, LSTM, CuDNNGRU, CuDNNLSTM
from keras.layers import Conv2D, Flatten
from keras.layers import Input, InputLayer
from keras.initializers import Constant
from keras import optimizers
from keras import losses
from keras import metrics
from keras.models import save_model, load_model, save_mxnet_model
from keras import datasets
skipif_no_tf_gpu = pytest.mark.skipif(
(K.backend() != 'tensorflow' or
not K.tensorflow_backend._get_available_gpus()),
reason='Requires TensorFlow backend and a GPU')
def test_sequential_model_saving():
model = Sequential()
model.add(Dense(2, input_shape=(3,)))
model.add(RepeatVector(3))
model.add(TimeDistributed(Dense(3)))
model.compile(loss=losses.MSE,
optimizer=optimizers.RMSprop(lr=0.0001),
metrics=[metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
new_model = load_model(fname)
os.remove(fname)
out2 = new_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
# Flaky tests. Reducing the tolerance to 2 decimal points.
assert_allclose(out, out2, atol=1e-02)
def test_sequential_model_saving_2():
# test with custom optimizer, loss
custom_opt = optimizers.rmsprop
custom_loss = losses.mse
model = Sequential()
model.add(Dense(2, input_shape=(3,)))
model.add(Dense(3))
model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])
x = np.random.random((1, 3))
y =
|
np.random.random((1, 3))
|
numpy.random.random
|
from unittest import TestCase
from mock import patch
from dglt.models.layers import GaussianSampling
import torch
import numpy as np
import numpy.testing as npt
from third_party.torchtest import torchtest as tt
class test_GaussianSampling(TestCase):
def test_init_no_bidirectional(self):
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
layer = GaussianSampling(2, 4, bidirectional=False)
npt.assert_equal(np.array(layer.mu.weight.size()), np.array([4, 2]))
npt.assert_equal(np.array(layer.mu.bias.size()), np.array([4]))
npt.assert_equal(np.array(layer.logvar.weight.size()), np.array([4, 2]))
npt.assert_equal(np.array(layer.logvar.bias.size()), np.array([4]))
def test_init_bidirectional(self):
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
layer = GaussianSampling(2, 5, bidirectional=True)
npt.assert_equal(np.array(layer.mu.weight.size()),
|
np.array([5, 4])
|
numpy.array
|
# plotter.py
# for generating plots
import matplotlib
try:
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
except:
matplotlib.use('Agg') # for linux server with no tkinter
import matplotlib.pyplot as plt
plt.rcParams['image.cmap'] = 'inferno'
# avoid Type 3 fonts
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import numpy as np
import os
import torch
from torch.nn.functional import pad
from mpl_toolkits.mplot3d import Axes3D # for 3D plots
from PIL import Image, ImageDraw # for video
from src.OCflow import OCflow
# the parameters used in this function are shared by most functions in this file
def plotQuadcopter(x, net, prob, nt, sPath, sTitle="", approach='ocflow'):
"""
plot images of the 12-d quadcopter
:param x: tensor, initial spatial point(s) at initial time
:param net: Module, the network Phi (or in some cases the baseline)
:param prob: problem object, which is needed for targets and obstacles
:param nt: int, number of time steps
:param sPath: string, path where you want the files saved
:param sTitle: string, the title wanted to be applied to the figure
:param approach: string, used to distinguish how the plot function behaves with inputs 'ocflow' or 'baseline'
"""
xtarget = prob.xtarget
if approach == 'ocflow':
traj, trajCtrl = OCflow(x, net, prob, tspan=[0.0, 1.0], nt=nt, stepper="rk4", alph=net.alph, intermediates=True)
trajCtrl = trajCtrl[:,:,1:] # want last dimension to be nt
elif approach == 'baseline':
# overload inputs to treat x and net differently for baseline
traj = x # expects a tensor of size (nex, d, nt+1)
trajCtrl = net # expects a tensor (nex, a, nt) where a is the dimension of the controls
else:
print("approach=" , approach, " is not an acceptable parameter value for plotQuadcopter")
# 3-d plot bounds
xbounds = [-3.0, 3.0]
ybounds = [-3.0, 3.0]
zbounds = [-3.0, 3.0]
# make grid of plots
nCol = 3
nRow = 2
fig = plt.figure(figsize=plt.figaspect(1.0))
fig.set_size_inches(16, 8)
fig.suptitle(sTitle)
# positional movement training
ax = fig.add_subplot(nRow, nCol, 1, projection='3d')
ax.set_title('Flight Path')
ax.scatter(xtarget[0].cpu().numpy(), xtarget[1].cpu().numpy(), xtarget[2].cpu().numpy(), s=140, marker='x', c='r',
label="target")
for i in range(traj.shape[0]):
ax.plot(traj[i, 0, :].view(-1).cpu().numpy(), traj[i, 1, :].view(-1).cpu().numpy(),
traj[i, 2, :].view(-1).cpu().numpy(), 'o-')
# ax.legend()
ax.view_init(10, -30)
ax.set_xlim(*xbounds)
ax.set_ylim(*ybounds)
ax.set_zlim(*zbounds)
# plotting path from eagle view
ax = fig.add_subplot(nRow, nCol, 2)
# traj is nex by d+1 by nt+1
ax.plot(traj[0, 0, :].cpu().numpy(), traj[0, 1, :].cpu().numpy(), 'o-')
xtarget = xtarget.detach().cpu().numpy()
ax.scatter(xtarget[0], xtarget[1], marker='x', color='red')
ax.set_xlim(*xbounds)
ax.set_ylim(*ybounds)
ax.set_aspect('equal')
ax.set_title('Path From Bird View')
# plot controls
ax = fig.add_subplot(nRow, nCol, 3)
timet = range(nt)
# not using the t=0 values
ax.plot(timet, trajCtrl[0, 0, :].cpu().numpy(), 'o-', label='u')
ax.plot(timet, trajCtrl[0, 1, :].cpu().numpy(), 'o-', label=r'$\tau_\psi$')
ax.plot(timet, trajCtrl[0, 2, :].cpu().numpy(), 'o-', label=r'$\tau_\theta$')
ax.plot(timet, trajCtrl[0, 3, :].cpu().numpy(), 'o-', label=r'$\tau_\phi$')
ax.legend()
ax.set_xticks([0, nt / 2, nt])
ax.set_xlabel('nt')
ax.set_ylabel('control')
# plot L at each time step
ax = fig.add_subplot(nRow, nCol, 4)
timet = range(nt)
# not using the t=0 values
trajL = torch.sum(trajCtrl[0, :, :] ** 2, dim=0, keepdims=True)
totL = torch.sum(trajL[0, :]) / nt
ax.plot(timet, trajL[0, :].cpu().numpy(), 'o-', label='L')
ax.legend()
ax.set_xticks([0, nt / 2, nt])
ax.set_xlabel('nt')
ax.set_ylabel('L')
ax.set_title('L(x,T)=' + str(totL.item()))
# plot velocities
ax = fig.add_subplot(nRow, nCol, 5)
timet = range(nt+1)
ax.plot(timet, traj[0, 6, :].cpu().numpy(), 'o-', label=r'$v_x$')
ax.plot(timet, traj[0, 7, :].cpu().numpy(), 'o-', label=r'$v_y$')
ax.plot(timet, traj[0, 8, :].cpu().numpy(), 'o-', label=r'$v_z$')
ax.plot(timet, traj[0, 9, :].cpu().numpy(), 'o-', label=r'$v_\psi$')
ax.plot(timet, traj[0, 10, :].cpu().numpy(), 'o-', label=r'$v_\theta$')
ax.plot(timet, traj[0, 11, :].cpu().numpy(), 'o-', label=r'$v_\phi$')
ax.legend(ncol=2)
ax.set_xticks([0, nt / 2, nt])
ax.set_xlabel('nt')
ax.set_ylabel('value')
# plot angles
ax = fig.add_subplot(nRow, nCol, 6)
timet = range(nt+1)
ax.plot(timet, traj[0, 3, :].cpu().numpy(), 'o-', label=r'$\psi$')
ax.plot(timet, traj[0, 4, :].cpu().numpy(), 'o-', label=r'$\theta$')
ax.plot(timet, traj[0, 5, :].cpu().numpy(), 'o-', label=r'$\phi$')
ax.legend()
ax.set_xticks([0, nt / 2, nt])
ax.set_xlabel('nt')
ax.set_ylabel('value')
if not os.path.exists(os.path.dirname(sPath)):
os.makedirs(os.path.dirname(sPath))
plt.savefig(sPath, dpi=300)
plt.close()
def videoQuadcopter(x, net, prob, nt, sPath, sTitle="", approach='ocflow'):
""" make video for the 12-d quadcopter """
if approach != 'ocflow':
print('approach ' + approach + ' is not supported')
return 1
nex, d = x.shape
LOWX, HIGHX, LOWY, HIGHY , msz= getMidcrossBounds(x,d)
extent = [LOWX, HIGHX, LOWY, HIGHY]
xtarget = prob.xtarget.view(-1).detach().cpu().numpy()
# 3-d plot bounds
xbounds = [-3.0, 3.0]
ybounds = [-3.0, 3.0]
zbounds = [-3.0, 3.0]
traj, trajCtrl = OCflow(x, net, prob, tspan=[0.0, 1.0], nt=nt, stepper="rk4", alph=net.alph, intermediates=True)
ims = []
if nex > 1:
examples = [0,1,2]
else:
examples = [0]
for ex in examples:
tracePhiFlow = traj[ex, 0:d, :]
tracePhiFlow = tracePhiFlow.detach().cpu().numpy()
ctrls = trajCtrl[ex,:,:].detach().cpu().numpy()
timet = range(1, nt + 1)
for n in range(1,nt-1):
# make grid of plots
nCol = 1
nRow = 2
fig = plt.figure(figsize=plt.figaspect(1.0))
fig.set_size_inches(14, 10)
# postional movement training
ax = fig.add_subplot(nRow, nCol, 1, projection='3d')
ax.set_title('Flight Path')
for j in range(prob.nAgents):
for i in range(ex):
ax.plot(traj[i, 12*j, :nt-1], traj[i, 12*j+1, :nt-1],
traj[i, 12*j+2, :nt-1], '-', linewidth=1, color='gray')
ax.plot(tracePhiFlow[12 * j, :n], tracePhiFlow[12 * j + 1, :n],
tracePhiFlow[12 * j + 2, :n],'o-', linewidth=2, markersize=msz)
ax.scatter(xtarget[12 * j], xtarget[12 * j + 1], xtarget[12 * j + 2], s=140, marker='x', color='red')
ax.view_init(10, -30)
ax.set_xlim(*xbounds)
ax.set_ylim(*ybounds)
ax.set_zlim(*zbounds)
# plot controls
ax = fig.add_subplot(nRow, nCol, 2)
# not using the t=0 values
ax.plot(timet[:n], trajCtrl[ex, 0, 1:n+1].cpu().numpy(), 'o-', label='u')
ax.plot(timet[:n], trajCtrl[ex, 1, 1:n+1].cpu().numpy(), 'o-', label=r'$\tau_\psi$')
ax.plot(timet[:n], trajCtrl[ex, 2, 1:n+1].cpu().numpy(), 'o-', label=r'$\tau_\theta$')
ax.plot(timet[:n], trajCtrl[ex, 3, 1:n+1].cpu().numpy(), 'o-', label=r'$\tau_\phi$')
ax.legend(loc='upper center')
ax.set_xticks([0, nt / 2, nt])
ax.set_xlabel('nt')
ax.set_ylabel('control')
ax.set_ylim(-80,80)
ax.set_xlim(0,nt)
ax.set_title('Controls')
im = fig2img ( fig )
ims.append(im)
plt.close(fig)
sPath = sPath[:-4] + '.gif'
ims[0].save(sPath, save_all=True, append_images=ims[1:], duration=100, loop=0)
print('saved video to', sPath)
def videoSwarm(x, net, prob, nt, sPath, sTitle="", approach='ocflow'):
""" make video for the swarm trajectory planning """
nex = x.shape[0]
d = x.shape[1]
xtarget = prob.xtarget.detach().cpu().numpy()
msz = 3 # markersize
if approach == 'ocflow':
traj, trajCtrl = OCflow(x, net, prob, tspan=[0.0, 1.0], nt=nt, stepper="rk4", alph=net.alph, intermediates=True)
# 3-d plot bounds
xbounds = [ min( x[:, 0::3].min().item() , xtarget[0::3].min().item()) - 1 , max( x[:, 0::3].max().item() , xtarget[0::3].max().item()) + 1 ]
ybounds = [ min( x[:, 1::3].min().item() , xtarget[1::3].min().item()) - 1 , max( x[:, 1::3].max().item() , xtarget[1::3].max().item()) + 1 ]
zbounds = [ min( x[:, 2::3].min().item() , xtarget[2::3].min().item()) - 1 , max( x[:, 2::3].max().item() , xtarget[2::3].max().item()) + 1 ]
xls = torch.linspace(*xbounds, 50)
yls = torch.linspace(*ybounds, 50)
gridPts = torch.stack(torch.meshgrid(xls, yls)).to(x.dtype).to(x.dtype).to(x.device)
gridShape = gridPts.shape[1:]
gridPts = gridPts.reshape(2, -1).t()
# setup example initial z
z0 = pad(gridPts, [0, 1, 0, 0], value=-1.5)
z0 = pad(z0, [0, 1, 0, 0], value=0.0)
# make grid of subplots
nCol = 2
nRow = 2
# fig = plt.figure(figsize=plt.figaspect(1.0))
# fig.set_size_inches(17, 10) # (14,10)
# fig.suptitle(sTitle)
ims = []
for n in range(nt):
fig = plt.figure()
fig.set_size_inches(17, 10)
# positional movement/trajectory
for place in [1,2,4]: # plot two angles of it
ax = fig.add_subplot(nRow, nCol, place, projection='3d')
ax.set_title('Flight Path')
if prob.obstacle == 'blocks':
shade = 0.4
# block 1
X, Y = np.meshgrid([-2, 2], [-0.5, 0.5])
ax.plot_surface(X, Y, 7 * np.ones((2,2)) , alpha=shade, color='gray')
ax.plot_surface(X, Y, 0 * np.ones((2, 2)), alpha=shade, color='gray')
X, Z = np.meshgrid([-2, 2], [0, 7])
ax.plot_surface(X, -0.5 * np.ones((2, 2)), Z, alpha=shade, color='gray')
ax.plot_surface(X, 0.5 * np.ones((2, 2)), Z, alpha=shade, color='gray')
Y, Z = np.meshgrid([-0.5, 0.5], [0, 7])
ax.plot_surface(-2 * np.ones((2, 2)), Y , Z, alpha=shade, color='gray')
ax.plot_surface( 2 * np.ones((2, 2)), Y , Z, alpha=shade, color='gray')
# block 2
X, Y = np.meshgrid([2, 4], [-1, 1])
ax.plot_surface(X, Y, 4 * np.ones((2,2)) , alpha=shade, color='gray')
ax.plot_surface(X, Y, 0 * np.ones((2, 2)), alpha=shade, color='gray')
X, Z = np.meshgrid([2, 4], [0, 4])
ax.plot_surface(X, -1 *
|
np.ones((2, 2))
|
numpy.ones
|
#!/usr/bin/env python
from collections import namedtuple
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from scipy import interpolate
import cv2 as cv
import spatialmath.base.argcheck as argcheck
from machinevisiontoolbox.base import color, int_image, float_image, plot_histogram
class ImageProcessingBaseMixin:
"""
Image processing basic operations on the Image class
"""
def int(self, intclass='uint8'):
"""
Convert image to integer type
:param intclass: either 'uint8', or any integer class supported by np
:type intclass: str
:return: Image with integer pixel types
:rtype: Image instance
- ``IM.int()`` is a copy of image with pixels converted to unsigned
8-bit integer (uint8) elements in the range 0 to 255.
- ``IM.int(intclass)`` as above but the output pixels are converted to
the integer class ``intclass``.
Example:
.. runblock:: pycon
>>> from machinevisiontoolbox import Image
>>> im = Image('flowers1.png', dtype='float64')
>>> print(im)
>>> im_int = im.int()
>>> print(im_int)
.. note::
- Works for an image with arbitrary number of dimensions, eg. a
color image or image sequence.
- If the input image is floating point (single or double) the
pixel values are scaled from an input range of [0,1] to a range
spanning zero to the maximum positive value of the output integer
class.
- If the input image is an integer class then the pixels are cast
to change type but not their value.
:references:
- Robotics, Vision & Control, Section 12.1, <NAME>,
Springer 2011.
"""
out = []
for im in self:
out.append(int_image(im.image, intclass))
return self.__class__(out)
def float(self, floatclass='float32'):
"""
Convert image to float type
:param floatclass: 'single', 'double', 'float32' [default], 'float64'
:type floatclass: str
:return: Image with floating point pixel types
:rtype: Image instance
- ``IM.float()`` is a copy of image with pixels converted to
``float32`` floating point values spanning the range 0 to 1. The
input integer pixels are assumed to span the range 0 to the maximum
value of their integer class.
- ``IM.float(im, floatclass)`` as above but with floating-point pixel
values belonging to the class ``floatclass``.
Example:
.. runblock:: pycon
>>> im = Image('flowers1.png')
>>> print(im)
>>> im_float = im.float()
>>> print(im_float)
:references:
- Robotics, Vision & Control, Section 12.1, <NAME>,
Springer 2011.
"""
out = []
for im in self:
out.append(float_image(im.image, floatclass))
return self.__class__(out)
def mono(self, opt='r601'):
"""
Convert color image to monochrome
:param opt: greyscale conversion option 'r601' [default] or 'r709'
:type opt: string
:return: Image with floating point pixel types
:rtype: Image instance
- ``IM.mono(im)`` is a greyscale equivalent of the color image ``im``
Example:
.. runblock:: pycon
>>> im = Image('flowers1.png')
>>> print(im)
>>> im_mono = im.mono()
>>> print(im_mono)
:references:
- Robotics, Vision & Control, Section 10.1, <NAME>,
Springer 2011.
"""
if not self.iscolor:
return self
out = []
for im in [img.bgr for img in self]:
if opt == 'r601':
new = 0.229 * im[:, :, 2] + 0.587 * im[:, :, 1] + \
0.114 * im[:, :, 0]
new = new.astype(im.dtype)
elif opt == 'r709':
new = 0.2126 * im[:, :, 0] + 0.7152 * im[:, :, 1] + \
0.0722 * im[:, :, 2]
new = new.astype(im.dtype)
elif opt == 'value':
# 'value' refers to the V in HSV space, not the CIE L*
# the mean of the max and min of RGB values at each pixel
mn = im[:, :, 2].min(axis=2)
mx = im[:, :, 2].max(axis=2)
# if np.issubdtype(im.dtype, np.float):
# NOTE let's make a new predicate for Image
if im.isfloat:
new = 0.5 * (mn + mx)
new = new.astype(im.dtype)
else:
z = (np.int32(mx) + np.int32(mn)) / 2
new = z.astype(im.dtype)
else:
raise TypeError('unknown type for opt')
out.append(new)
return self.__class__(out)
def stretch(self, max=1, r=None):
"""
Image normalisation
:param max: M pixels are mapped to the r 0 to M
:type max: scalar integer or float
:param r: r[0] is mapped to 0, r[1] is mapped to 1 (or max value)
:type r: 2-tuple or numpy array (2,1)
:return: Image with pixel values stretched to M across r
:rtype: Image instance
- ``IM.stretch()`` is a normalised image in which all pixel values lie
in the r range of 0 to 1. That is, a linear mapping where the minimum
value of ``im`` is mapped to 0 and the maximum value of ``im`` is
mapped to 1.
Example:
.. runblock:: pycon
.. note::
- For an integer image the result is a float image in the range 0
to max value
:references:
- Robotics, Vision & Control, Section 12.1, <NAME>,
Springer 2011.
"""
# TODO make all infinity values = None?
out = []
for im in [img.image for img in self]:
if r is None:
mn = np.min(im)
mx = np.max(im)
else:
r = argcheck.getvector(r)
mn = r[0]
mx = r[1]
zs = (im - mn) / (mx - mn) * max
if r is not None:
zs = np.maximum(0, np.minimum(max, zs))
out.append(zs)
return self.__class__(out)
def thresh(self, t=None, opt='binary'):
"""
Image threshold
:param t: threshold
:type t: scalar
:param opt: threshold option (see below)
:type opt: string
:return imt: Image thresholded binary image
:rtype imt: Image instance
:return: threshold if opt is otsu or triangle
:rtype: list of scalars
- ``IM.thresh()`` uses Otsu's method for thresholding a greyscale
image.
- ``IM.thresh(t)`` as above but the threshold ``t`` is specified.
- ``IM.thresh(t, opt)`` as above but the threshold option is specified.
See opencv threshold types for threshold options
https://docs.opencv.org/4.2.0/d7/d1b/group__imgproc__
misc.html#gaa9e58d2860d4afa658ef70a9b1115576
Example:
.. runblock:: pycon
:options:
- 'binary' # TODO consider the LaTeX formatting of equations
- 'binary_inv'
- 'trunc'
- 'tozero'
- 'tozero_inv'
- 'otsu'
- 'triangle'
.. note::
- Converts a color image to greyscale.
- For a uint8 class image the slider range is 0 to 255.
- For a floating point class image the slider range is 0 to 1.0
"""
# dictionary of threshold options from OpenCV
threshopt = {
'binary': cv.THRESH_BINARY,
'binary_inv': cv.THRESH_BINARY_INV,
'trunc': cv.THRESH_TRUNC,
'tozero': cv.THRESH_TOZERO,
'tozero_inv': cv.THRESH_TOZERO_INV,
'otsu': cv.THRESH_OTSU,
'triangle': cv.THRESH_TRIANGLE
}
if t is not None:
if not argcheck.isscalar(t):
raise ValueError(t, 't must be a scalar')
else:
# if no threshold is specified, we assume to use Otsu's method
print('No threshold specified. Applying Otsu''s method.')
opt = 'otsu'
# ensure mono images
if self.iscolor:
imono = self.mono()
else:
imono = self
out_t = []
out_imt = []
for im in [img.image for img in imono]:
# for image int class, maxval = max of int class
# for image float class, maxval = 1
if np.issubdtype(im.dtype, np.integer):
maxval = np.iinfo(im.dtype).max
else:
# float image, [0, 1] range
maxval = 1.0
threshvalue, imt = cv.threshold(im, t, maxval, threshopt[opt])
out_t.append(threshvalue)
out_imt.append(imt)
if opt == 'otsu' or opt == 'triangle':
return self.__class__(out_imt), out_t
else:
return self.__class__(out_imt)
def otsu(self, levels=256, valley=None):
"""
Otsu threshold selection
:return t: Otsu's threshold
:rtype t: float
:return imt: Image thresholded to a binary image
:rtype imt: Image instance
- ``otsu(im)`` is an optimal threshold for binarizing an image with a
bimodal intensity histogram. ``t`` is a scalar threshold that
maximizes the variance between the classes of pixels below and above
the thresold ``t``.
Example::
.. runblock:: pycon
.. note::
- Converts a color image to greyscale.
:references:
- A Threshold Selection Method from Gray-Level Histograms, N. Otsu.
IEEE Trans. Systems, Man and Cybernetics Vol SMC-9(1), Jan 1979,
pp 62-66.
- An improved method for image thresholding on the valley-emphasis
method. <NAME>, <NAME> etal. Signal and Info Proc.
Assocn. Annual Summit and Conf (APSIPA). 2013. pp1-4
"""
# mvt-mat has options on levels and valleys, which Opencv does not have
# TODO best option is likely just to code the function itself, with
# default option of simply calling OpenCV's Otsu implementation
im = self.mono()
if (valley is None):
imt, t = im.thresh(opt='otsu')
else:
raise ValueError(valley, 'not implemented yet')
# TODO implement otsu.m
# TODO levels currently ignored
return imt, t
def nonzero(self):
return np.nonzero(self.image)
def meshgrid(self, step=1):
"""
Domain matrices for image
:param a1: array input 1
:type a1: numpy array
:param a2: array input 2
:type a2: numpy array
:return u: domain of image, horizontal
:rtype u: numpy array
:return v: domain of image, vertical
:rtype v: numpy array
- ``IM.imeshgrid()`` are matrices that describe the domain of image
``im (h,w)`` and are each ``(h,w)``. These matrices are used for the
evaluation of functions over the image. The element ``u(r,c) = c``
and ``v(r,c) = r``.
- ``IM.imeshgrid(w, h)`` as above but the domain is ``(w,h)``.
- ``IM.imeshgrid(s)`` as above but the domain is described by ``s``
which can be a scalar ``(s,s)`` or a 2-vector ``s=[w,h]``.
Example:
.. runblock:: pycon
"""
# TODO too complex, simplify
# Use cases
# image.meshgrid() spans image
# image.meshgrid(step=N) spans image with step
# if not (argcheck.isvector(a1) or isinstance(a1, np.ndarray)
# or argcheck.isscalar(a1) or isinstance(a1, self.__class__)):
# raise ValueError(
# a1, 'a1 must be an Image, matrix, vector, or scalar')
# if a2 is not None and (not (argcheck.isvector(a2) or
# isinstance(a2, np.ndarray) or
# argcheck.isscalar(a2) or
# isinstance(a2, self.__class__))):
# raise ValueError(
# a2, 'a2 must be Image, matrix, vector, scalar or None')
# if isinstance(a1, self.__class__):
# a1 = a1.image
# if isinstance(a2, self.__class__):
# a2 = a2.image
# if a2 is None:
# if a1.ndim <= 1 and len(a1) == 1:
# # if a1 is a single number
# # we specify a size for a square output image
# ai = np.arange(0, a1)
# u, v = np.meshgrid(ai, ai)
# elif a1.ndim <= 1 and len(a1) == 2:
# # if a1 is a 2-vector
# # we specify a size for a rectangular output image (w, h)
# a10 = np.arange(0, a1[0])
# a11 = np.arange(0, a1[1])
# u, v = np.meshgrid(a10, a11)
# elif (a1.ndim >= 2): # and (a1.shape[2] > 2):
# u, v = np.meshgrid(np.arange(0, a1.shape[1]),
# np.arange(0, a1.shape[0]))
# else:
# raise ValueError(a1, 'incorrect argument a1 shape')
# else:
# # we assume a1 and a2 are two scalars
# u, v = np.meshgrid(np.arange(0, a1), np.arange(0, a2))
u = np.arange(0, self.width, step)
v = np.arange(0, self.height, step)
return np.meshgrid(v, u, indexing='ij')
def hist(self, nbins=256, opt=None):
"""
Image histogram
:param nbins: number of bins for histogram
:type nbins: integer
:param opt: histogram option
:type opt: string
:return hist: histogram h as a column vector, and corresponding bins x,
cdf and normcdf
:rtype hist: collections.namedtuple
- ``IM.hist()`` is the histogram of intensities for image as a vector.
For an image with multiple planes, the histogram of each plane is
given in a separate column. Additionally, the cumulative histogram
and normalized cumulative histogram, whose maximum value is one, are
computed.
- ``IM.hist(nbins)`` as above with the number of bins specified
- ``IM.hist(opt)`` as above with histogram options specified
:options:
- 'sorted' histogram but with occurrence sorted in descending
magnitude order. Bin coordinates X reflect this sorting.
Example:
.. runblock:: pycon
.. note::
- The bins spans the greylevel range 0-255.
- For a floating point image the histogram spans the greylevel
range 0-1.
- For floating point images all NaN and Inf values are first
removed.
- OpenCV CalcHist only works on floats up to 32 bit, images are
automatically converted from float64 to float32
"""
# check inputs
optHist = ['sorted']
if opt is not None and opt not in optHist:
raise ValueError(opt, 'opt is not a valid option')
if self.isint:
maxrange =
|
np.iinfo(self.dtype)
|
numpy.iinfo
|
def BSDriver(LoadCase):
# BoundingSurface J2 with kinematic hardening
# Written by <NAME>, Mar. 22 2019
# Copyright Arduino Computational Geomechanics Group
# Ported into Python/Jupyter Notebook by <NAME>, Jul. 2019
#
#
# LoadCase:
# 1 ... proportionally increasing strain
# 2 ... cyclic strain
# 3 ... proportionally increasing stress
# 4 ... cyclic stress
#
# ====== LOADING CASES ==================================================
import numpy as np
from collections import namedtuple
nPoints = 200
## Switch for LoadCases:
## Pseudo-switch created by using python dictionary to hold LoadCase functions
def case_one():
case_one.time = np.linspace(0,1,nPoints+1)
case_one.strain = np.array([ 0.05, -0.015, -0.015, 0.000, 0.000, 0.000 ]).reshape(6,1) * case_one.time
case_one.StressDriven = 0
return case_one
def case_two():
nCycles = 3
omega = 0.15
case_two.time = np.linspace(0,nCycles*2*np.pi/omega,nCycles*nPoints+1);
case_two.strain = np.array([ 0.00, -0.000, -0.000, 0.045, 0.000, 0.000 ]).reshape(6,1) * np.sin( omega*case_two.time )
case_two.StressDriven = 0
return case_two
def case_three():
case_three.time = np.linspace(0,1,nPoints+1)
case_three.stress = np.array([[0.100],
[0.000],
[0.000],
[0.000],
[0.000],
[0.000]])*case_three.time + 0.0*np.array([1,1,1,0,0,0]).reshape(6,1)*np.ones( case_three.time.shape )
case_three.StressDriven = 1
return case_three
def case_four():
nCycles = 3
omega = 0.15
case_four.time = np.linspace(0, nCycles*2*np.pi/omega, nCycles*nPoints+1)
case_four.stress = np.array([[0.000],
[0.000],
[0.000], #.01, .03, -.01, .05, 0, -.02
[0.050],
[0.000],
[0.000]])*np.sin( omega*case_four.time ) + 0.0*np.array([1,1,1,0,0,0]).reshape(6,1)*np.ones( case_four.time.shape )
case_four.StressDriven = 1
return case_four
case_switcher = {
1: case_one,
2: case_two,
3: case_three,
4: case_four
}
case = case_switcher.get(LoadCase, lambda: "Invalid LoadCase")
case() #Runs the LoadCase function. Creates: case.time, case.strain | case.stress, case.StressDriven
time, StressDriven = case.time, case.StressDriven
if StressDriven:
stress = case.stress
strain = np.zeros((6,1)) #initialize empty 6x1 strain numpy array for stress-driven scenario
else:
strain = case.strain
stress =
|
np.zeros((6,1))
|
numpy.zeros
|
'''Implements Neumann with the posibility to do batch learning'''
import math
import numpy as np
from sklearn.base import BaseEstimator
import torch.nn as nn
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from pytorchtools import EarlyStopping
class Neumann(nn.Module):
def __init__(self, n_features, depth, residual_connection, mlp_depth,
init_type):
super().__init__()
self.depth = depth
self.n_features = n_features
self.residual_connection = residual_connection
self.mlp_depth = mlp_depth
self.relu = nn.ReLU()
# Create the parameters of the network
l_W = [torch.empty(n_features, n_features, dtype=torch.float)
for _ in range(self.depth)]
Wc = torch.empty(n_features, n_features, dtype=torch.float)
beta = torch.empty(1*n_features, dtype=torch.float)
mu = torch.empty(n_features, dtype=torch.float)
b = torch.empty(1, dtype=torch.float)
l_W_mlp = [torch.empty(n_features, 1*n_features, dtype=torch.float)
for _ in range(mlp_depth)]
l_b_mlp = [torch.empty(1*n_features, dtype=torch.float)
for _ in range(mlp_depth)]
# Initialize the parameters of the network
if init_type == 'normal':
for W in l_W:
nn.init.xavier_normal_(W)
nn.init.xavier_normal_(Wc)
nn.init.normal_(beta)
nn.init.normal_(mu)
nn.init.normal_(b)
for W in l_W_mlp:
nn.init.xavier_normal_(W)
for b_mlp in l_b_mlp:
nn.init.normal_(b_mlp)
elif init_type == 'uniform':
bound = 1 / math.sqrt(n_features)
for W in l_W:
nn.init.kaiming_uniform_(W, a=math.sqrt(5))
nn.init.kaiming_uniform_(Wc, a=math.sqrt(5))
nn.init.uniform_(beta, -bound, bound)
nn.init.uniform_(mu, -bound, bound)
nn.init.normal_(b)
for W in l_W_mlp:
nn.init.kaiming_uniform_(W, a=math.sqrt(5))
for b_mlp in l_b_mlp:
nn.init.uniform_(b_mlp, -bound, bound)
# Make tensors learnable parameters
self.l_W = [torch.nn.Parameter(W) for W in l_W]
for i, W in enumerate(self.l_W):
self.register_parameter('W_{}'.format(i), W)
self.Wc = torch.nn.Parameter(Wc)
self.beta = torch.nn.Parameter(beta)
self.mu = torch.nn.Parameter(mu)
self.b = torch.nn.Parameter(b)
self.l_W_mlp = [torch.nn.Parameter(W) for W in l_W_mlp]
for i, W in enumerate(self.l_W_mlp):
self.register_parameter('W_mlp_{}'.format(i), W)
self.l_b_mlp = [torch.nn.Parameter(b) for b in l_b_mlp]
for i, b in enumerate(self.l_b_mlp):
self.register_parameter('b_mlp_{}'.format(i), b)
def forward(self, x, m, phase='train'):
"""
Parameters:
----------
x: tensor, shape (batch_size, n_features)
The input data imputed by 0.
m: tensor, shape (batch_size, n_features)
The missingness indicator (0 if observed and 1 if missing).
"""
h0 = x + m*self.mu
h = x - (1-m)*self.mu
h_res = x - (1-m)*self.mu
if len(self.l_W) > 0:
S0 = self.l_W[0]
h = torch.matmul(h, S0)*(1-m)
for W in self.l_W[1:self.depth]:
h = torch.matmul(h, W)*(1-m)
if self.residual_connection:
h += h_res
h = torch.matmul(h, self.Wc)*m + h0
if self.mlp_depth > 0:
for W, b in zip(self.l_W_mlp, self.l_b_mlp):
h = torch.matmul(h, W) + b
h = self.relu(h)
y = torch.matmul(h, self.beta)
y = y + self.b
return y
class Neumann_mlp(BaseEstimator):
"""The Neumann neural network
Parameters
----------
depth: int
The number of Neumann iterations. Note that the total depth of the
Neumann network will be `depth`+1 because of W_{mix}.
n_epochs: int
The maximum number of epochs.
batch_size: int
lr: float
The learning rate.
early_stopping: boolean
If True, early stopping is used based on the validaton set, with a
patience of 15 epochs.
residual_connection: boolean
If True, the residual connection of the Neumann network are
implemented.
mlp_depth: int
The depth of the MLP stacked on top of the Neuman iterations.
init_type: str
The type of initialisation for the parameters. Either 'normal' or
'uniform'.
verbose: boolean
"""
def __init__(self, depth, n_epochs, batch_size, lr, early_stopping=False,
residual_connection=False, mlp_depth=0, init_type='normal',
verbose=False):
self.depth = depth
self.n_epochs = n_epochs
self.batch_size = batch_size
self.lr = lr
self.early_stop = early_stopping
self.residual_connection = residual_connection
self.mlp_depth = mlp_depth
self.init_type = init_type
self.verbose = verbose
self.r2_train = []
self.mse_train = []
self.r2_val = []
self.mse_val = []
def fit(self, X, y, X_val=None, y_val=None):
M = np.isnan(X)
X = np.nan_to_num(X)
n_samples, n_features = X.shape
if X_val is not None:
M_val = np.isnan(X_val)
X_val = np.nan_to_num(X_val)
M = torch.as_tensor(M, dtype=torch.float)
X = torch.as_tensor(X, dtype=torch.float)
y = torch.as_tensor(y, dtype=torch.float)
if X_val is not None:
M_val = torch.as_tensor(M_val, dtype=torch.float)
X_val = torch.as_tensor(X_val, dtype=torch.float)
y_val = torch.as_tensor(y_val, dtype=torch.float)
self.net = Neumann(n_features=n_features, depth=self.depth,
residual_connection=self.residual_connection,
mlp_depth=self.mlp_depth, init_type=self.init_type)
self.optimizer = optim.SGD(self.net.parameters(), lr=self.lr)
self.scheduler = ReduceLROnPlateau(
self.optimizer, mode='min', factor=0.2, patience=2,
threshold=1e-4)
if self.early_stop and X_val is not None:
early_stopping = EarlyStopping(verbose=self.verbose)
running_loss = np.inf
criterion = nn.MSELoss()
# Train the network
for i_epoch in range(self.n_epochs):
if self.verbose:
print("epoch nb {}".format(i_epoch))
# Shuffle tensors to have different batches at each epoch
ind = torch.randperm(n_samples)
X = X[ind]
M = M[ind]
y = y[ind]
xx = torch.split(X, split_size_or_sections=self.batch_size, dim=0)
mm = torch.split(M, split_size_or_sections=self.batch_size, dim=0)
yy = torch.split(y, split_size_or_sections=self.batch_size, dim=0)
self.scheduler.step(running_loss/len(xx))
param_group = self.optimizer.param_groups[0]
lr = param_group['lr']
if self.verbose:
print("Current learning rate is: {}".format(lr))
if lr < 5e-6:
break
running_loss = 0
for bx, bm, by in zip(xx, mm, yy):
self.optimizer.zero_grad()
y_hat = self.net(bx, bm)
loss = criterion(y_hat, by)
running_loss += loss.item()
loss.backward()
# Take gradient step
self.optimizer.step()
# Evaluate the train loss
with torch.no_grad():
y_hat = self.net(X, M, phase='test')
loss = criterion(y_hat, y)
mse = loss.item()
self.mse_train.append(mse)
var = ((y - y.mean())**2).mean()
r2 = 1 - mse/var
self.r2_train.append(r2)
if self.verbose:
print("Train loss - r2: {}, mse: {}".format(r2,
running_loss/len(xx)))
# Evaluate the validation loss
if X_val is not None:
with torch.no_grad():
y_hat = self.net(X_val, M_val, phase='test')
loss_val = criterion(y_hat, y_val)
mse_val = loss_val.item()
self.mse_val.append(mse_val)
var = ((y_val - y_val.mean())**2).mean()
r2_val = 1 - mse_val/var
self.r2_val.append(r2_val)
if self.verbose:
print("Validation loss is: {}".format(r2_val))
if self.early_stop:
early_stopping(mse_val, self.net)
if early_stopping.early_stop:
if self.verbose:
print("Early stopping")
break
# load the last checkpoint with the best model
if self.early_stop and early_stopping.early_stop:
self.net.load_state_dict(early_stopping.checkpoint)
def predict(self, X):
M = np.isnan(X)
X =
|
np.nan_to_num(X)
|
numpy.nan_to_num
|
import copy
import itertools
import logging
import math
import numpy as np
from SS_dataset import SSIterator
logger = logging.getLogger(__name__)
def add_random_variables_to_batch(state, rng, batch, prev_batch, evaluate_mode):
"""
This is a helper function, which adds random variables to a batch.
We do it this way, because we want to avoid Theano's random sampling both to speed up and to avoid
known Theano issues with sampling inside scan loops.
The random variable 'ran_var_gaussian_constutterance' is sampled from a standard Gaussian distribution,
which remains constant during each utterance (i.e. between a pair of end-of-utterance tokens).
The random variable 'ran_var_uniform_constutterance' is sampled from a uniform distribution [0, 1],
which remains constant during each utterance (i.e. between a pair of end-of-utterance tokens).
When not in evaluate mode, the random vector 'ran_decoder_drop_mask' is also sampled.
This variable represents the input tokens which are replaced by unk when given to
the decoder RNN. It is required for the noise addition trick used by Bowman et al. (2015).
"""
# If none return none
if not batch:
return batch
# Variables to store random vector sampled at the beginning of each utterance
Ran_Var_Gaussian_ConstUtterance = np.zeros((batch['x'].shape[0], batch['x'].shape[1], state['latent_gaussian_per_utterance_dim']), dtype='float32')
Ran_Var_Uniform_ConstUtterance = np.zeros((batch['x'].shape[0], batch['x'].shape[1], state['latent_piecewise_per_utterance_dim']), dtype='float32')
# Go through each sample, find end-of-utterance indices and sample random variables
for idx in range(batch['x'].shape[1]):
# Find end-of-utterance indices
eos_indices =
|
np.where(batch['x'][:, idx] == state['eos_sym'])
|
numpy.where
|
from __future__ import division
from misc import data, eps, A_to_au, fs_to_au, call_name
import textwrap
import numpy as np
class State(object):
""" Class for BO states
:param integer ndim: Dimension of space
:param integer nat: Number of atoms
"""
def __init__(self, ndim, nat):
# Initialize variables
self.energy = 0.
self.energy_old = 0.
self.force = np.zeros((nat, ndim))
self.coef = 0. + 0.j
self.multiplicity = 1
class Molecule(object):
""" Class for a molecule object including State objects
:param string geometry: A string containing atomic positions and velocities
:param integer ndim: Dimension of space
:param integer nstates: Number of BO states
:param boolean l_qmmm: Use the QM/MM scheme
:param integer natoms_mm: Number of atoms in the MM region
:param integer ndof: Degrees of freedom (if model is False, the molecular DoF is given.)
:param string unit_pos: Unit of atomic positions
:param string unit_vel: Unit of atomic velocities
:param double charge: Total charge of the system
:param boolean l_model: Is the system a model system?
"""
def __init__(self, geometry, ndim=3, nstates=3, l_qmmm=False, natoms_mm=None, ndof=None, \
unit_pos='angs', unit_vel='au', charge=0., l_model=False):
# Save name of Molecule class
self.mol_type = self.__class__.__name__
# Initialize input values
self.ndim = ndim
self.nst = nstates
self.l_model = l_model
# Initialize geometry
self.pos = []
self.vel = []
self.mass = []
self.symbols = []
self.read_geometry(geometry, unit_pos, unit_vel)
# Initialize QM/MM method
self.l_qmmm = l_qmmm
self.nat_mm = natoms_mm
if (self.l_qmmm):
if (self.nat_mm == None):
raise ValueError (f"( {self.mol_type}.{call_name()} ) Number of atoms in MM region is essential for QMMM! {self.nat_mm}")
self.nat_qm = self.nat - self.nat_mm
else:
if (self.nat_mm != None):
raise ValueError (f"( {self.mol_type}.{call_name()} ) Number of atoms in MM region is not necessary! {self.nat_mm}")
self.nat_qm = self.nat
# Initialize system charge and number of electrons
if (not self.l_model):
self.charge = charge
self.get_nr_electrons()
else:
self.charge = 0.
self.nelec = 0
# Initialize degrees of freedom
if (self.l_model):
if (ndof == None):
self.ndof = self.nat * self.ndim
else:
self.ndof = ndof
else:
if (ndof == None):
if (self.nat == 1):
raise ValueError (f"( {self.mol_type}.{call_name()} ) Too small number of atoms! {self.nat}")
elif (self.nat == 2):
# Diatomic molecules
self.ndof = 1
else:
# Non-linear molecules
self.ndof = self.ndim * self.nat - self.ndim * (self.ndim + 1) / 2
else:
self.ndof = ndof
# Initialize BO states
self.states = []
for ist in range(self.nst):
self.states.append(State(self.ndim, self.nat))
# Initialize couplings
self.nacme = np.zeros((self.nst, self.nst))
self.nacme_old = np.zeros((self.nst, self.nst))
self.socme = np.zeros((self.nst, self.nst), dtype=np.complex_)
self.socme_old = np.zeros((self.nst, self.nst), dtype=np.complex_)
# self.laser_coup_me = np.zeros((self.nst, self.nst))
# Initialize other properties
self.nac = np.zeros((self.nst, self.nst, self.nat_qm, self.ndim))
self.nac_old = np.zeros((self.nst, self.nst, self.nat_qm, self.ndim))
self.rho = np.zeros((self.nst, self.nst), dtype=np.complex_)
# self.laser_coup = np.zeros((self.nst, self.nst, self.nat_qm, self.ndim))
# self.laser_coup_old = np.zeros((self.nst, self.nst, self.nat_qm, self.ndim))
self.ekin = 0.
self.ekin_qm = 0.
self.epot = 0.
self.etot = 0.
self.l_nacme = False
# Initialize point charges for QM/MM calculations
if (self.l_qmmm):
self.mm_charge = np.zeros(self.nat_mm)
def read_geometry(self, geometry, unit_pos, unit_vel):
""" Routine to read the geometry in extended xyz format.\n
Example:\n\n
geometry = '''\n
2\n
Hydrogen\n
H 0.0 0.0 0.0 0.0 0.0 0.0\n
H 0.0 0.0 0.8 0.0 0.0 0.0\n
'''\n
self.read_geometry(geometry)
:param string geometry: Cartesian coordinates for position and initial velocity in the extended xyz format
:param string unit_pos: Unit of position (A = angstrom, au = atomic unit [bohr])
:param string unit_vel: Unit of velocity (au = atomic unit, A/ps = angstrom per ps, A/fs = angstromm per fs)
"""
f = geometry.split('\n')
# Read the number of atoms
l_read_nr_atoms = False
count_line = 0
for line_number, line in enumerate(f):
llength = len(line.split())
if (not l_read_nr_atoms and llength == 0):
# Skip the blank lines
continue
elif (count_line == 0 and llength == 1):
# Read the number of atoms
l_read_nr_atoms = True
self.nat = int(line.split()[0])
count_line += 1
elif (count_line == 1):
# Skip the comment line
count_line += 1
else:
# Read the positions and velocities
if (len(line.split()) == 0):
break
assert (len(line.split()) == (1 + 2 * self.ndim))
self.symbols.append(line.split()[0])
self.mass.append(data[line.split()[0]])
self.pos.append(list(map(float, line.split()[1:(self.ndim + 1)])))
self.vel.append(list(map(float, line.split()[(self.ndim + 1):])))
count_line += 1
assert (self.nat == count_line - 2)
self.symbols = np.array(self.symbols)
self.mass = np.array(self.mass)
# Conversion unit
if (unit_pos == 'au'):
fac_pos = 1.
elif (unit_pos == 'angs'):
fac_pos = A_to_au
else:
raise ValueError (f"( {self.mol_type}.{call_name()} ) Invalid unit for position! {unit_pos}")
self.pos = np.array(self.pos) * fac_pos
if (unit_vel == 'au'):
fac_vel = 1.
elif (unit_vel == 'angs/ps'):
fac_vel = A_to_au / (1000.0 * fs_to_au)
elif (unit_vel == 'angs/fs'):
fac_vel = A_to_au / fs_to_au
else:
raise ValueError (f"( {self.mol_type}.{call_name()} ) Invalid unit for velocity! {unit_vel}")
self.vel = np.array(self.vel) * fac_vel
def adjust_nac(self):
""" Adjust phase of nonadiabatic couplings
"""
for ist in range(self.nst):
for jst in range(ist, self.nst):
ovlp = 0.
snac_old = 0.
snac = 0.
snac_old = np.sum(self.nac_old[ist, jst] ** 2)
snac = np.sum(self.nac[ist, jst] ** 2)
snac_old = np.sqrt(snac_old)
snac = np.sqrt(snac)
if (np.sqrt(snac * snac_old) < eps):
ovlp = 1.
else:
dot_nac = 0.
dot_nac = np.sum(self.nac_old[ist, jst] * self.nac[ist, jst])
ovlp = dot_nac / snac / snac_old
if (ovlp < 0.):
self.nac[ist, jst] = - self.nac[ist, jst]
self.nac[jst, ist] = - self.nac[jst, ist]
def get_nacme(self):
""" Get NACME from nonadiabatic couplings and laser couplings
"""
for ist in range(self.nst):
for jst in range(ist + 1, self.nst):
self.nacme[ist, jst] = np.sum(self.nac[ist, jst] * self.vel[0:self.nat_qm])
# self.laser_coup_me [ist, jst] = np.sum(self.laser_coup[ist, jst])
# self.nacme[ist, jst] += self.laser_coup_me [ist, jst]
self.nacme[jst, ist] = - self.nacme[ist, jst]
def update_kinetic(self):
""" Get kinetic energy
"""
self.ekin = np.sum(0.5 * self.mass * np.sum(self.vel ** 2, axis=1))
if (self.l_qmmm):
# Calculate the kinetic energy for QM atoms
self.ekin_qm = np.sum(0.5 * self.mass[0:self.nat_qm] * np.sum(self.vel[0:self.nat_qm] ** 2, axis=1))
else:
self.ekin_qm = self.ekin
def reset_bo(self, calc_coupling):
""" Reset BO energies, forces and nonadiabatic couplings
:param boolean calc_coupling: Check whether the dynamics includes coupling calculation
"""
for states in self.states:
states.energy = 0.
states.force = np.zeros((self.nat, self.ndim))
if (calc_coupling):
if (self.l_nacme):
self.nacme = np.zeros((self.nst, self.nst))
# self.laser_coup_me = np.zeros((self.nst,self.nst))
else:
self.nac = np.zeros((self.nst, self.nst, self.nat_qm, self.ndim))
# self.laser_coup = np.zeros((self.nst, self.nst, self.nat_qm, self.ndim))
def backup_bo(self):
""" Backup BO energies and nonadiabatic couplings
"""
for states in self.states:
states.energy_old = states.energy
self.nac_old =
|
np.copy(self.nac)
|
numpy.copy
|
# Copyright (c) 2003-2015 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy
import treecorr
import os
import fitsio
from test_helper import get_script_name
def test_binnedcorr3():
import math
# Test some basic properties of the base class
def check_arrays(nnn):
numpy.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
numpy.testing.assert_almost_equal(nnn.ubin_size * nnn.nubins, nnn.max_u-nnn.min_u)
numpy.testing.assert_almost_equal(nnn.vbin_size * nnn.nvbins, nnn.max_v-nnn.min_v)
#print('logr = ',nnn.logr1d)
numpy.testing.assert_equal(nnn.logr1d.shape, (nnn.nbins,) )
numpy.testing.assert_almost_equal(nnn.logr1d[0], math.log(nnn.min_sep) + 0.5*nnn.bin_size)
numpy.testing.assert_almost_equal(nnn.logr1d[-1], math.log(nnn.max_sep) - 0.5*nnn.bin_size)
numpy.testing.assert_equal(nnn.logr.shape, (nnn.nbins, nnn.nubins, nnn.nvbins) )
numpy.testing.assert_almost_equal(nnn.logr[:,0,0], nnn.logr1d)
numpy.testing.assert_almost_equal(nnn.logr[:,-1,-1], nnn.logr1d)
assert len(nnn.logr) == nnn.nbins
#print('u = ',nnn.u1d)
numpy.testing.assert_equal(nnn.u1d.shape, (nnn.nubins,) )
numpy.testing.assert_almost_equal(nnn.u1d[0], nnn.min_u + 0.5*nnn.ubin_size)
numpy.testing.assert_almost_equal(nnn.u1d[-1], nnn.max_u - 0.5*nnn.ubin_size)
numpy.testing.assert_equal(nnn.u.shape, (nnn.nbins, nnn.nubins, nnn.nvbins) )
numpy.testing.assert_almost_equal(nnn.u[0,:,0], nnn.u1d)
numpy.testing.assert_almost_equal(nnn.u[-1,:,-1], nnn.u1d)
#print('v = ',nnn.v1d)
numpy.testing.assert_equal(nnn.v1d.shape, (nnn.nvbins,) )
numpy.testing.assert_almost_equal(nnn.v1d[0], nnn.min_v + 0.5*nnn.vbin_size)
numpy.testing.assert_almost_equal(nnn.v1d[-1], nnn.max_v - 0.5*nnn.vbin_size)
numpy.testing.assert_equal(nnn.v.shape, (nnn.nbins, nnn.nubins, nnn.nvbins) )
numpy.testing.assert_almost_equal(nnn.v[0,0,:], nnn.v1d)
numpy.testing.assert_almost_equal(nnn.v[-1,-1,:], nnn.v1d)
def check_defaultuv(nnn):
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == numpy.ceil(1./nnn.bin_size)
assert nnn.min_v == -1.
assert nnn.max_v == 1.
assert nnn.nvbins == 2.*numpy.ceil(1./nnn.bin_size)
# Check the different ways to set up the binning:
# Omit bin_size
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, max, n for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20,
min_u=0.2, max_u=0.9, nubins=12,
min_v=-0.2, max_v=0.2, nvbins=4)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
assert nnn.min_u == 0.2
assert nnn.max_u == 0.9
assert nnn.nubins == 12
assert nnn.min_v == -0.2
assert nnn.max_v == 0.2
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit min_sep
nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify max, n, bs for u,v too.
nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1,
max_u=0.9, nubins=3, ubin_size=0.05,
max_v=0.2, nvbins=4, vbin_size=0.05)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.max_sep == 20.
assert nnn.nbins == 20
assert nnn.ubin_size == 0.05
assert nnn.max_u == 0.9
assert nnn.nubins == 3
assert nnn.vbin_size == 0.05
assert nnn.max_v == 0.2
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit max_sep
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.min_sep == 5.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, n, bs for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1,
min_u=0.7, nubins=4, ubin_size=0.05,
min_v=-0.2, nvbins=4, vbin_size=0.05)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.bin_size == 0.1
assert nnn.nbins == 20
assert nnn.min_u == 0.7
assert nnn.ubin_size == 0.05
assert nnn.nubins == 4
assert nnn.min_v == -0.2
assert nnn.vbin_size == 0.05
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit nbins
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep >= 20. # Expanded a bit.
assert nnn.max_sep < 20. * numpy.exp(nnn.bin_size)
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, max, bs for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep >= 20.
assert nnn.max_sep < 20. * numpy.exp(nnn.bin_size)
assert nnn.bin_size == 0.1
assert nnn.min_u <= 0.2
assert nnn.min_u >= 0.2 - nnn.ubin_size
assert nnn.max_u == 0.9
assert nnn.ubin_size == 0.03
assert nnn.min_v <= -0.2
assert nnn.min_v >= -0.2 - nnn.vbin_size
assert nnn.max_v >= 0.2
assert nnn.min_v <= 0.2 + nnn.vbin_size
assert nnn.vbin_size == 0.07
check_arrays(nnn)
# Check the use of sep_units
# radians
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='radians')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
numpy.testing.assert_almost_equal(nnn.min_sep, 5.)
numpy.testing.assert_almost_equal(nnn.max_sep, 20.)
numpy.testing.assert_almost_equal(nnn._min_sep, 5.)
numpy.testing.assert_almost_equal(nnn._max_sep, 20.)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# arcsec
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcsec')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
numpy.testing.assert_almost_equal(nnn.min_sep, 5.)
numpy.testing.assert_almost_equal(nnn.max_sep, 20.)
numpy.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/3600)
numpy.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/3600)
assert nnn.nbins == 20
numpy.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
# Note that logr is in the separation units, not radians.
numpy.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
numpy.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# arcmin
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcmin')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
numpy.testing.assert_almost_equal(nnn.min_sep, 5.)
numpy.testing.assert_almost_equal(nnn.max_sep, 20.)
numpy.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/60)
numpy.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/60)
assert nnn.nbins == 20
numpy.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
numpy.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
numpy.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# degrees
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='degrees')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
numpy.testing.assert_almost_equal(nnn.min_sep, 5.)
numpy.testing.assert_almost_equal(nnn.max_sep, 20.)
numpy.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180)
numpy.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180)
assert nnn.nbins == 20
numpy.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
numpy.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
numpy.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# hours
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='hours')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
numpy.testing.assert_almost_equal(nnn.min_sep, 5.)
numpy.testing.assert_almost_equal(nnn.max_sep, 20.)
numpy.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/12)
numpy.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/12)
assert nnn.nbins == 20
numpy.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
numpy.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
numpy.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# Check bin_slop
# Start with default behavior
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.1
assert nnn.ubin_size == 0.03
assert nnn.vbin_size == 0.07
numpy.testing.assert_almost_equal(nnn.b, 0.1)
numpy.testing.assert_almost_equal(nnn.bu, 0.03)
numpy.testing.assert_almost_equal(nnn.bv, 0.07)
# Explicitly set bin_slop=1.0 does the same thing.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, bin_slop=1.0,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.1
assert nnn.ubin_size == 0.03
assert nnn.vbin_size == 0.07
numpy.testing.assert_almost_equal(nnn.b, 0.1)
numpy.testing.assert_almost_equal(nnn.bu, 0.03)
numpy.testing.assert_almost_equal(nnn.bv, 0.07)
# Use a smaller bin_slop
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, bin_slop=0.2,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 0.2
assert nnn.bin_size == 0.1
assert nnn.ubin_size == 0.03
assert nnn.vbin_size == 0.07
numpy.testing.assert_almost_equal(nnn.b, 0.02)
numpy.testing.assert_almost_equal(nnn.bu, 0.006)
numpy.testing.assert_almost_equal(nnn.bv, 0.014)
# Use bin_slop == 0
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, bin_slop=0.0,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 0.0
assert nnn.bin_size == 0.1
assert nnn.ubin_size == 0.03
assert nnn.vbin_size == 0.07
numpy.testing.assert_almost_equal(nnn.b, 0.0)
numpy.testing.assert_almost_equal(nnn.bu, 0.0)
numpy.testing.assert_almost_equal(nnn.bv, 0.0)
# Bigger bin_slop
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, bin_slop=2.0,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07, verbose=0)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 2.0
assert nnn.bin_size == 0.1
assert nnn.ubin_size == 0.03
assert nnn.vbin_size == 0.07
numpy.testing.assert_almost_equal(nnn.b, 0.2)
numpy.testing.assert_almost_equal(nnn.bu, 0.06)
numpy.testing.assert_almost_equal(nnn.bv, 0.14)
# With bin_size > 0.1, explicit bin_slop=1.0 is accepted.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.4, bin_slop=1.0,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07, verbose=0)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.4
assert nnn.ubin_size == 0.03
assert nnn.vbin_size == 0.07
numpy.testing.assert_almost_equal(nnn.b, 0.4)
numpy.testing.assert_almost_equal(nnn.bu, 0.03)
numpy.testing.assert_almost_equal(nnn.bv, 0.07)
# But implicit bin_slop is reduced so that b = 0.1
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.4,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_size == 0.4
assert nnn.ubin_size == 0.03
assert nnn.vbin_size == 0.07
numpy.testing.assert_almost_equal(nnn.b, 0.1)
numpy.testing.assert_almost_equal(nnn.bu, 0.03)
numpy.testing.assert_almost_equal(nnn.bv, 0.07)
numpy.testing.assert_almost_equal(nnn.bin_slop, 0.25)
# Separately for each of the three parameters
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.05,
min_u=0.2, max_u=0.9, ubin_size=0.3,
min_v=-0.2, max_v=0.2, vbin_size=0.17)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_size == 0.05
assert nnn.ubin_size == 0.3
assert nnn.vbin_size == 0.17
numpy.testing.assert_almost_equal(nnn.b, 0.05)
numpy.testing.assert_almost_equal(nnn.bu, 0.1)
numpy.testing.assert_almost_equal(nnn.bv, 0.1)
numpy.testing.assert_almost_equal(nnn.bin_slop, 1.0) # The stored bin_slop is just for lnr
def is_ccw(x1,y1, x2,y2, x3,y3):
# Calculate the cross product of 1->2 with 1->3
x2 -= x1
x3 -= x1
y2 -= y1
y3 -= y1
return x2*y3-x3*y2 > 0.
def test_direct_count_auto():
# If the catalogs are small enough, we can do a direct count of the number of triangles
# to see if comes out right. This should exactly match the treecorr code if bin_slop=0.
ngal = 100
s = 10.
numpy.random.seed(8675309)
x = numpy.random.normal(0,s, (ngal,) )
y = numpy.random.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = -0.83
max_v = 0.59
nvbins = 20
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0., verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = numpy.log(min_sep)
log_max_sep = numpy.log(max_sep)
true_ntri = numpy.zeros( (nbins, nubins, nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
dij = numpy.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2)
dik = numpy.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2)
djk = numpy.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
ccw = True
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk;
ccw = is_ccw(x[i],y[i],x[j],y[j],x[k],y[k])
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik;
ccw = is_ccw(x[j],y[j],x[i],y[i],x[k],y[k])
else:
d3 = djk; d2 = dij; d1 = dik;
ccw = is_ccw(x[j],y[j],x[k],y[k],x[i],y[i])
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk;
ccw = is_ccw(x[i],y[i],x[k],y[k],x[j],y[j])
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij;
ccw = is_ccw(x[k],y[k],x[i],y[i],x[j],y[j])
else:
d3 = djk; d2 = dik; d1 = dij;
ccw = is_ccw(x[k],y[k],x[j],y[j],x[i],y[i])
r = d2
u = d3/d2
v = (d1-d2)/d3
if not ccw:
v = -v
kr = int(numpy.floor( (numpy.log(r)-log_min_sep) / bin_size ))
ku = int(numpy.floor( (u-min_u) / ubin_size ))
kv = int(numpy.floor( (v-min_v) / vbin_size ))
if kr < 0: continue
if kr >= nbins: continue
if ku < 0: continue
if ku >= nubins: continue
if kv < 0: continue
if kv >= nvbins: continue
true_ntri[kr,ku,kv] += 1
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
# Repeat with binslop not precisely 0, since the code flow is different for bin_slop == 0.
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=1.e-16, verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=1.e-16, verbose=1, max_top=0)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
# This should be equivalent to processing a cross correlation with each catalog being
# the same thing.
ddd.clear()
ddd.process(cat,cat,cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
def test_direct_count_cross():
# If the catalogs are small enough, we can do a direct count of the number of triangles
# to see if comes out right. This should exactly match the treecorr code if bin_slop=0.
ngal = 100
s = 10.
numpy.random.seed(8675309)
x1 = numpy.random.normal(0,s, (ngal,) )
y1 = numpy.random.normal(0,s, (ngal,) )
cat1 = treecorr.Catalog(x=x1, y=y1)
x2 = numpy.random.normal(0,s, (ngal,) )
y2 = numpy.random.normal(0,s, (ngal,) )
cat2 = treecorr.Catalog(x=x2, y=y2)
x3 = numpy.random.normal(0,s, (ngal,) )
y3 = numpy.random.normal(0,s, (ngal,) )
cat3 = treecorr.Catalog(x=x3, y=y3)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = -0.83
max_v = 0.59
nvbins = 20
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0., verbose=1)
ddd.process(cat1, cat2, cat3)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = numpy.log(min_sep)
log_max_sep = numpy.log(max_sep)
true_ntri = numpy.zeros( (nbins, nubins, nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(ngal):
for k in range(ngal):
d3 = numpy.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2)
d2 = numpy.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2)
d1 = numpy.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2)
if d3 == 0.: continue
if d2 == 0.: continue
if d1 == 0.: continue
if d1 < d2 or d2 < d3: continue;
ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x3[k],y3[k])
r = d2
u = d3/d2
v = (d1-d2)/d3
if not ccw:
v = -v
kr = int(numpy.floor( (numpy.log(r)-log_min_sep) / bin_size ))
ku = int(numpy.floor( (u-min_u) / ubin_size ))
kv = int(numpy.floor( (v-min_v) / vbin_size ))
if kr < 0: continue
if kr >= nbins: continue
if ku < 0: continue
if ku >= nubins: continue
if kv < 0: continue
if kv >= nvbins: continue
true_ntri[kr,ku,kv] += 1
#print('true_ntri = ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
# Repeat with binslop not precisely 0, since the code flow is different for bin_slop == 0.
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=1.e-16, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('binslop > 0: ddd.ntri = ',ddd.ntri)
#print('diff = ',ddd.ntri - true_ntri)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=1.e-16, verbose=1, max_top=0)
ddd.process(cat1, cat2, cat3)
#print('max_top = 0: ddd.ntri = ',ddd.ntri)
#print('true_ntri = ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
def test_direct_partial():
# Test the two ways to only use parts of a catalog:
ngal = 200
s = 10.
numpy.random.seed(8675309)
x1 = numpy.random.normal(0,s, (ngal,) )
y1 = numpy.random.normal(0,s, (ngal,) )
cat1a = treecorr.Catalog(x=x1, y=y1, first_row=28, last_row=144)
x2 = numpy.random.normal(0,s, (ngal,) )
y2 = numpy.random.normal(0,s, (ngal,) )
cat2a = treecorr.Catalog(x=x2, y=y2, first_row=48, last_row=129)
x3 = numpy.random.normal(0,s, (ngal,) )
y3 = numpy.random.normal(0,s, (ngal,) )
cat3a = treecorr.Catalog(x=x3, y=y3, first_row=82, last_row=167)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = -0.83
max_v = 0.59
nvbins = 20
ddda = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0.)
ddda.process(cat1a, cat2a, cat3a)
#print('ddda.ntri = ',ddda.ntri)
log_min_sep = numpy.log(min_sep)
log_max_sep = numpy.log(max_sep)
true_ntri = numpy.zeros( (nbins, nubins, nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(27,144):
for j in range(47,129):
for k in range(81,167):
d3 = numpy.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2)
d2 = numpy.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2)
d1 = numpy.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2)
if d3 == 0.: continue
if d2 == 0.: continue
if d1 == 0.: continue
if d1 < d2 or d2 < d3: continue;
ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x3[k],y3[k])
r = d2
u = d3/d2
v = (d1-d2)/d3
if not ccw:
v = -v
kr = int(numpy.floor( (numpy.log(r)-log_min_sep) / bin_size ))
ku = int(numpy.floor( (u-min_u) / ubin_size ))
kv = int(numpy.floor( (v-min_v) / vbin_size ))
if kr < 0: continue
if kr >= nbins: continue
if ku < 0: continue
if ku >= nubins: continue
if kv < 0: continue
if kv >= nvbins: continue
true_ntri[kr,ku,kv] += 1
#print('true_ntri = ',true_ntri)
#print('diff = ',ddda.ntri - true_ntri)
numpy.testing.assert_array_equal(ddda.ntri, true_ntri)
# Now check that we get the same thing with all the points, but with w=0 for the ones
# we don't want.
w1 = numpy.zeros(ngal)
w1[27:144] = 1.
w2 = numpy.zeros(ngal)
w2[47:129] = 1.
w3 = numpy.zeros(ngal)
w3[81:167] = 1.
cat1b = treecorr.Catalog(x=x1, y=y1, w=w1)
cat2b = treecorr.Catalog(x=x2, y=y2, w=w2)
cat3b = treecorr.Catalog(x=x3, y=y3, w=w3)
dddb = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0.)
dddb.process(cat1b, cat2b, cat3b)
#print('dddb.ntri = ',dddb.ntri)
#print('diff = ',dddb.ntri - true_ntri)
numpy.testing.assert_array_equal(dddb.ntri, true_ntri)
def is_ccw_3d(x1,y1,z1, x2,y2,z2, x3,y3,z3):
# Calculate the cross product of 1->2 with 1->3
x2 -= x1
x3 -= x1
y2 -= y1
y3 -= y1
z2 -= z1
z3 -= z1
# The cross product:
x = y2*z3-y3*z2
y = z2*x3-z3*x2
z = x2*y3-x3*y2
# ccw if the cross product is in the opposite direction of (x1,y1,z1) from (0,0,0)
return x*x1 + y*y1 + z*z1 < 0.
def test_direct_3d_auto():
# This is the same as the above test, but using the 3d correlations
ngal = 100
s = 10.
numpy.random.seed(8675309)
x = numpy.random.normal(312, s, (ngal,) )
y = numpy.random.normal(728, s, (ngal,) )
z = numpy.random.normal(-932, s, (ngal,) )
r = numpy.sqrt( x*x + y*y + z*z )
dec = numpy.arcsin(z/r)
ra = numpy.arctan2(y,x)
cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad')
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = -0.83
max_v = 0.59
nvbins = 20
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0., verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = numpy.log(min_sep)
log_max_sep = numpy.log(max_sep)
true_ntri = numpy.zeros( (nbins, nubins, nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
dij = numpy.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2)
dik = numpy.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2 + (z[i]-z[k])**2)
djk = numpy.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2 + (z[j]-z[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
ccw = True
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk;
ccw = is_ccw_3d(x[i],y[i],z[i],x[j],y[j],z[j],x[k],y[k],z[k])
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik;
ccw = is_ccw_3d(x[j],y[j],z[j],x[i],y[i],z[i],x[k],y[k],z[k])
else:
d3 = djk; d2 = dij; d1 = dik;
ccw = is_ccw_3d(x[j],y[j],z[j],x[k],y[k],z[k],x[i],y[i],z[i])
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk;
ccw = is_ccw_3d(x[i],y[i],z[i],x[k],y[k],z[k],x[j],y[j],z[j])
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij;
ccw = is_ccw_3d(x[k],y[k],z[k],x[i],y[i],z[i],x[j],y[j],z[j])
else:
d3 = djk; d2 = dik; d1 = dij;
ccw = is_ccw_3d(x[k],y[k],z[k],x[j],y[j],z[j],x[i],y[i],z[i])
r = d2
u = d3/d2
v = (d1-d2)/d3
if not ccw:
v = -v
kr = int(numpy.floor( (numpy.log(r)-log_min_sep) / bin_size ))
ku = int(numpy.floor( (u-min_u) / ubin_size ))
kv = int(numpy.floor( (v-min_v) / vbin_size ))
if kr < 0: continue
if kr >= nbins: continue
if ku < 0: continue
if ku >= nubins: continue
if kv < 0: continue
if kv >= nvbins: continue
true_ntri[kr,ku,kv] += 1
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
# Repeat with binslop not precisely 0, since the code flow is different for bin_slop == 0.
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=1.e-16, verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('diff = ',ddd.ntri - true_ntri)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=1.e-16, verbose=1, max_top=0)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
# And compare to the cross correlation
ddd.clear()
ddd.process(cat,cat,cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
# Also compare to using x,y,z rather than ra,dec,r
cat = treecorr.Catalog(x=x, y=y, z=z)
ddd.process(cat)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
def test_direct_3d_cross():
# This is the same as the above test, but using the 3d correlations
ngal = 100
s = 10.
numpy.random.seed(8675309)
x1 = numpy.random.normal(312, s, (ngal,) )
y1 = numpy.random.normal(728, s, (ngal,) )
z1 = numpy.random.normal(-932, s, (ngal,) )
r1 = numpy.sqrt( x1*x1 + y1*y1 + z1*z1 )
dec1 = numpy.arcsin(z1/r1)
ra1 = numpy.arctan2(y1,x1)
cat1 = treecorr.Catalog(ra=ra1, dec=dec1, r=r1, ra_units='rad', dec_units='rad')
x2 = numpy.random.normal(312, s, (ngal,) )
y2 = numpy.random.normal(728, s, (ngal,) )
z2 = numpy.random.normal(-932, s, (ngal,) )
r2 = numpy.sqrt( x2*x2 + y2*y2 + z2*z2 )
dec2 = numpy.arcsin(z2/r2)
ra2 = numpy.arctan2(y2,x2)
cat2 = treecorr.Catalog(ra=ra2, dec=dec2, r=r2, ra_units='rad', dec_units='rad')
x3 = numpy.random.normal(312, s, (ngal,) )
y3 = numpy.random.normal(728, s, (ngal,) )
z3 = numpy.random.normal(-932, s, (ngal,) )
r3 = numpy.sqrt( x3*x3 + y3*y3 + z3*z3 )
dec3 = numpy.arcsin(z3/r3)
ra3 = numpy.arctan2(y3,x3)
cat3 = treecorr.Catalog(ra=ra3, dec=dec3, r=r3, ra_units='rad', dec_units='rad')
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = -0.83
max_v = 0.59
nvbins = 20
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0., verbose=1)
ddd.process(cat1, cat2, cat3)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = numpy.log(min_sep)
log_max_sep = numpy.log(max_sep)
true_ntri = numpy.zeros( (nbins, nubins, nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(ngal):
for k in range(ngal):
d1sq = (x2[j]-x3[k])**2 + (y2[j]-y3[k])**2 + (z2[j]-z3[k])**2
d2sq = (x1[i]-x3[k])**2 + (y1[i]-y3[k])**2 + (z1[i]-z3[k])**2
d3sq = (x1[i]-x2[j])**2 + (y1[i]-y2[j])**2 + (z1[i]-z2[j])**2
d1 = numpy.sqrt(d1sq)
d2 = numpy.sqrt(d2sq)
d3 = numpy.sqrt(d3sq)
if d3 == 0.: continue
if d2 == 0.: continue
if d1 == 0.: continue
if d1 < d2 or d2 < d3: continue;
ccw = is_ccw_3d(x1[i],y1[i],z1[i],x2[j],y2[j],z2[j],x3[k],y3[k],z3[k])
r = d2
u = d3/d2
v = (d1-d2)/d3
if not ccw:
v = -v
kr = int(numpy.floor( (numpy.log(r)-log_min_sep) / bin_size ))
ku = int(numpy.floor( (u-min_u) / ubin_size ))
kv = int(numpy.floor( (v-min_v) / vbin_size ))
if kr < 0: continue
if kr >= nbins: continue
if ku < 0: continue
if ku >= nubins: continue
if kv < 0: continue
if kv >= nvbins: continue
true_ntri[kr,ku,kv] += 1
#print('true_ntri = ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
# Repeat with binslop not precisely 0, since the code flow is different for bin_slop == 0.
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=1.e-16, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('binslop > 0: ddd.ntri = ',ddd.ntri)
#print('diff = ',ddd.ntri - true_ntri)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=1.e-16, verbose=1, max_top=0)
ddd.process(cat1, cat2, cat3)
#print('max_top = 0: ddd.ntri = ',ddd.ntri)
#print('true_ntri = ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
# Also compare to using x,y,z rather than ra,dec,r
cat1 = treecorr.Catalog(x=x1, y=y1, z=z1)
cat2 = treecorr.Catalog(x=x2, y=y2, z=z2)
cat3 = treecorr.Catalog(x=x3, y=y3, z=z3)
ddd.process(cat1, cat2, cat3)
|
numpy.testing.assert_array_equal(ddd.ntri, true_ntri)
|
numpy.testing.assert_array_equal
|
# Copyright 2019, by the California Institute of Technology.
# ALL RIGHTS RESERVED. United States Government Sponsorship acknowledged.
# Any commercial use must be negotiated with the Office of Technology
# Transfer at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting
# this software, the user agrees to comply with all applicable U.S. export
# laws and regulations. User has the responsibility to obtain export
# licenses, or other export authority as may be required before exporting
# such information to foreign countries or providing access to foreign
# persons.
"""
==============
test_subset.py
==============
Test the subsetter functionality.
"""
import json
import operator
import os
import shutil
import tempfile
import unittest
from os import listdir
from os.path import dirname, join, realpath, isfile, basename
import geopandas as gpd
import importlib_metadata
import netCDF4 as nc
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from jsonschema import validate
from shapely.geometry import Point
from podaac.subsetter import subset
from podaac.subsetter.subset import SERVICE_NAME
from podaac.subsetter import xarray_enhancements as xre
class TestSubsetter(unittest.TestCase):
"""
Unit tests for the L2 subsetter. These tests are all related to the
subsetting functionality itself, and should provide coverage on the
following files:
- podaac.subsetter.subset.py
- podaac.subsetter.xarray_enhancements.py
"""
@classmethod
def setUpClass(cls):
cls.test_dir = dirname(realpath(__file__))
cls.test_data_dir = join(cls.test_dir, 'data')
cls.subset_output_dir = tempfile.mkdtemp(dir=cls.test_data_dir)
cls.test_files = [f for f in listdir(cls.test_data_dir)
if isfile(join(cls.test_data_dir, f)) and f.endswith(".nc")]
cls.history_json_schema = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://harmony.earthdata.nasa.gov/history.schema.json",
"title": "Data Processing History",
"description": "A history record of processing that produced a given data file. For more information, see: https://wiki.earthdata.nasa.gov/display/TRT/In-File+Provenance+Metadata+-+TRT-42",
"type": ["array", "object"],
"items": {"$ref": "#/definitions/history_record"},
"definitions": {
"history_record": {
"type": "object",
"properties": {
"date_time": {
"description": "A Date/Time stamp in ISO-8601 format, including time-zone, GMT (or Z) preferred",
"type": "string",
"format": "date-time"
},
"derived_from": {
"description": "List of source data files used in the creation of this data file",
"type": ["array", "string"],
"items": {"type": "string"}
},
"program": {
"description": "The name of the program which generated this data file",
"type": "string"
},
"version": {
"description": "The version identification of the program which generated this data file",
"type": "string"
},
"parameters": {
"description": "The list of parameters to the program when generating this data file",
"type": ["array", "string"],
"items": {"type": "string"}
},
"program_ref": {
"description": "A URL reference that defines the program, e.g., a UMM-S reference URL",
"type": "string"
},
"$schema": {
"description": "The URL to this schema",
"type": "string"
}
},
"required": ["date_time", "program"],
"additionalProperties": False
}
}
}
@classmethod
def tearDownClass(cls):
# Remove the temporary directories used to house subset data
shutil.rmtree(cls.subset_output_dir)
def test_subset_variables(self):
"""
Test that all variables present in the original NetCDF file
are present after the subset takes place, and with the same
attributes.
"""
bbox = np.array(((-180, 90), (-90, 90)))
for file in self.test_files:
output_file = "{}_{}".format(self._testMethodName, file)
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file)
)
in_ds = xr.open_dataset(join(self.test_data_dir, file),
decode_times=False,
decode_coords=False)
out_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False)
for in_var, out_var in zip(in_ds.data_vars.items(), out_ds.data_vars.items()):
# compare names
assert in_var[0] == out_var[0]
# compare attributes
np.testing.assert_equal(in_var[1].attrs, out_var[1].attrs)
# compare type and dimension names
assert in_var[1].dtype == out_var[1].dtype
assert in_var[1].dims == out_var[1].dims
in_ds.close()
out_ds.close()
def test_subset_bbox(self):
"""
Test that all data present is within the bounding box given,
and that the correct bounding box is used. This test assumed
that the scanline *is* being cut.
"""
# pylint: disable=too-many-locals
bbox = np.array(((-180, 90), (-90, 90)))
for file in self.test_files:
output_file = "{}_{}".format(self._testMethodName, file)
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file)
)
out_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
lat_var_name, lon_var_name = subset.get_coord_variable_names(out_ds)
lat_var_name = lat_var_name[0]
lon_var_name = lon_var_name[0]
lon_bounds, lat_bounds = subset.convert_bbox(bbox, out_ds, lat_var_name, lon_var_name)
lats = out_ds[lat_var_name].values
lons = out_ds[lon_var_name].values
np.warnings.filterwarnings('ignore')
# Step 1: Get mask of values which aren't in the bounds.
# For lon spatial condition, need to consider the
# lon_min > lon_max case. If that's the case, should do
# an 'or' instead.
oper = operator.and_ if lon_bounds[0] < lon_bounds[1] else operator.or_
# In these two masks, True == valid and False == invalid
lat_truth = np.ma.masked_where((lats >= lat_bounds[0])
& (lats <= lat_bounds[1]), lats).mask
lon_truth = np.ma.masked_where(oper((lons >= lon_bounds[0]),
(lons <= lon_bounds[1])), lons).mask
# combine masks
spatial_mask = np.bitwise_and(lat_truth, lon_truth)
# Create a mask which represents the valid matrix bounds of
# the spatial mask. This is used in the case where a var
# has no _FillValue.
if lon_truth.ndim == 1:
bound_mask = spatial_mask
else:
rows = np.any(spatial_mask, axis=1)
cols = np.any(spatial_mask, axis=0)
bound_mask = np.array([[r & c for c in cols] for r in rows])
# If all the lat/lon values are valid, the file is valid and
# there is no need to check individual variables.
if np.all(spatial_mask):
continue
# Step 2: Get mask of values which are NaN or "_FillValue in
# each variable.
for _, var in out_ds.data_vars.items():
# remove dimension of '1' if necessary
vals = np.squeeze(var.values)
# Get the Fill Value
fill_value = var.attrs.get('_FillValue')
# If _FillValue isn't provided, check that all values
# are in the valid matrix bounds go to the next variable
if fill_value is None:
combined_mask = np.ma.mask_or(spatial_mask, bound_mask)
np.testing.assert_equal(bound_mask, combined_mask)
continue
# If the shapes of this var doesn't match the mask,
# reshape the var so the comparison can be made. Take
# the first index of the unknown dims. This makes
# assumptions about the ordering of the dimensions.
if vals.shape != out_ds[lat_var_name].shape and vals.shape:
slice_list = []
for dim in var.dims:
if dim in out_ds[lat_var_name].dims:
slice_list.append(slice(None))
else:
slice_list.append(slice(0, 1))
vals = np.squeeze(vals[tuple(slice_list)])
# In this mask, False == NaN and True = valid
var_mask = np.invert(np.ma.masked_invalid(vals).mask)
fill_mask = np.invert(np.ma.masked_values(vals, fill_value).mask)
var_mask = np.bitwise_and(var_mask, fill_mask)
# Step 3: Combine the spatial and var mask with 'or'
combined_mask = np.ma.mask_or(var_mask, spatial_mask)
# Step 4: compare the newly combined mask and the
# spatial mask created from the lat/lon masks. They
# should be equal, because the 'or' of the two masks
# where out-of-bounds values are 'False' will leave
# those values assuming there are only NaN values
# in the data at those locations.
np.testing.assert_equal(spatial_mask, combined_mask)
out_ds.close()
@pytest.mark.skip(reason="This is being tested currently. Temporarily skipped.")
def test_subset_no_bbox(self):
"""
Test that the subsetted file is identical to the given file
when a 'full' bounding box is given.
"""
bbox = np.array(((-180, 180), (-90, 90)))
for file in self.test_files:
output_file = "{}_{}".format(self._testMethodName, file)
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file)
)
# pylint: disable=no-member
in_nc = nc.Dataset(join(self.test_data_dir, file), 'r')
out_nc = nc.Dataset(join(self.subset_output_dir, output_file), 'r')
# Make sure the output dimensions match the input
# dimensions, which means the full file was returned.
for name, dimension in in_nc.dimensions.items():
assert dimension.size == out_nc.dimensions[name].size
in_nc.close()
out_nc.close()
def test_subset_empty_bbox(self):
"""
Test that an empty file is returned when the bounding box
contains no data.
"""
bbox = np.array(((120, 125), (-90, -85)))
for file in self.test_files:
output_file = "{}_{}".format(self._testMethodName, file)
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file)
)
empty_dataset = xr.open_dataset(
join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False,
mask_and_scale=False
)
# Ensure all variables are present but empty.
for variable_name, variable in empty_dataset.data_vars.items():
assert not variable.data
def test_bbox_conversion(self):
"""
Test that the bounding box conversion returns expected
results. Expected results are hand-calculated.
"""
ds_180 = xr.open_dataset(join(self.test_data_dir,
"MODIS_A-JPL-L2P-v2014.0.nc"),
decode_times=False,
decode_coords=False)
ds_360 = xr.open_dataset(join(
self.test_data_dir,
"ascat_20150702_084200_metopa_45145_eps_o_250_2300_ovw.l2.nc"),
decode_times=False,
decode_coords=False)
# Elements in each tuple are:
# ds type, lon_range, expected_result
test_bboxes = [
(ds_180, (-180, 180), (-180, 180)),
(ds_360, (-180, 180), (0, 360)),
(ds_180, (-180, 0), (-180, 0)),
(ds_360, (-180, 0), (180, 360)),
(ds_180, (-80, 80), (-80, 80)),
(ds_360, (-80, 80), (280, 80)),
(ds_180, (0, 180), (0, 180)),
(ds_360, (0, 180), (0, 180)),
(ds_180, (80, -80), (80, -80)),
(ds_360, (80, -80), (80, 280)),
(ds_180, (-80, -80), (-180, 180)),
(ds_360, (-80, -80), (0, 360))
]
lat_var = 'lat'
lon_var = 'lon'
for test_bbox in test_bboxes:
dataset = test_bbox[0]
lon_range = test_bbox[1]
expected_result = test_bbox[2]
actual_result, _ = subset.convert_bbox(np.array([lon_range, [0, 0]]),
dataset, lat_var, lon_var)
np.testing.assert_equal(actual_result, expected_result)
def compare_java(self, java_files, cut):
"""
Run the L2 subsetter and compare the result to the equivelant
legacy (Java) subsetter result.
Parameters
----------
java_files : list of strings
List of paths to each subsetted Java file.
cut : boolean
True if the subsetter should return compact.
"""
bbox_map = [("ascat_20150702_084200", ((-180, 0), (-90, 0))),
("ascat_20150702_102400", ((-180, 0), (-90, 0))),
("MODIS_A-JPL", ((65.8, 86.35), (40.1, 50.15))),
("MODIS_T-JPL", ((-78.7, -60.7), (-54.8, -44))),
("VIIRS", ((-172.3, -126.95), (62.3, 70.65))),
("AMSR2-L2B_v08_r38622", ((-180, 0), (-90, 0)))]
for file_str, bbox in bbox_map:
java_file = [file for file in java_files if file_str in file][0]
test_file = [file for file in self.test_files if file_str in file][0]
output_file = "{}_{}".format(self._testMethodName, test_file)
subset.subset(
file_to_subset=join(self.test_data_dir, test_file),
bbox=np.array(bbox),
output_file=join(self.subset_output_dir, output_file),
cut=cut
)
j_ds = xr.open_dataset(join(self.test_data_dir, java_file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
py_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
for var_name, var in j_ds.data_vars.items():
# Compare shape
np.testing.assert_equal(var.shape, py_ds[var_name].shape)
# Compare meta
np.testing.assert_equal(var.attrs, py_ds[var_name].attrs)
# Compare data
np.testing.assert_equal(var.values, py_ds[var_name].values)
# Compare meta. History will always be different, so remove
# from the headers for comparison.
del j_ds.attrs['history']
del py_ds.attrs['history']
del py_ds.attrs['history_json']
np.testing.assert_equal(j_ds.attrs, py_ds.attrs)
def test_compare_java_compact(self):
"""
Tests that the results of the subsetting operation is
equivalent to the Java subsetting result on the same bounding
box. For simplicity the subsetted Java granules have been
manually run and copied into this project. This test DOES
cut the scanline.
"""
java_result_files = [join("java_results", "cut", f) for f in
listdir(join(self.test_data_dir, "java_results", "cut")) if
isfile(join(self.test_data_dir, "java_results", "cut", f))
and f.endswith(".nc")]
self.compare_java(java_result_files, cut=True)
def test_compare_java(self):
"""
Tests that the results of the subsetting operation is
equivalent to the Java subsetting result on the same bounding
box. For simplicity the subsetted Java granules have been
manually run and copied into this project. This runs does NOT
cut the scanline.
"""
java_result_files = [join("java_results", "uncut", f) for f in
listdir(join(self.test_data_dir, "java_results", "uncut")) if
isfile(join(self.test_data_dir, "java_results", "uncut", f))
and f.endswith(".nc")]
self.compare_java(java_result_files, cut=False)
def test_history_metadata_append(self):
"""
Tests that the history metadata header is appended to when it
already exists.
"""
test_file = next(filter(
lambda f: '20180101005944-REMSS-L2P_GHRSST-SSTsubskin-AMSR2-L2B_rt_r29918-v02.0-fv01.0.nc' in f
, self.test_files))
output_file = "{}_{}".format(self._testMethodName, test_file)
subset.subset(
file_to_subset=join(self.test_data_dir, test_file),
bbox=np.array(((-180, 180), (-90.0, 90))),
output_file=join(self.subset_output_dir, output_file)
)
in_nc = xr.open_dataset(join(self.test_data_dir, test_file))
out_nc = xr.open_dataset(join(self.subset_output_dir, output_file))
# Assert that the original granule contains history
assert in_nc.attrs.get('history') is not None
# Assert that input and output files have different history
self.assertNotEqual(in_nc.attrs['history'], out_nc.attrs['history'])
# Assert that last line of history was created by this service
assert SERVICE_NAME in out_nc.attrs['history'].split('\n')[-1]
# Assert that the old history is still in the subsetted granule
assert in_nc.attrs['history'] in out_nc.attrs['history']
def test_history_metadata_create(self):
"""
Tests that the history metadata header is created when it does
not exist. All test granules contain this header already, so
for this test the header will be removed manually from a granule.
"""
test_file = next(filter(
lambda f: '20180101005944-REMSS-L2P_GHRSST-SSTsubskin-AMSR2-L2B_rt_r29918-v02.0-fv01.0.nc' in f
, self.test_files))
output_file = "{}_{}".format(self._testMethodName, test_file)
# Remove the 'history' metadata from the granule
in_nc = xr.open_dataset(join(self.test_data_dir, test_file))
del in_nc.attrs['history']
in_nc.to_netcdf(join(self.subset_output_dir, 'int_{}'.format(output_file)), 'w')
subset.subset(
file_to_subset=join(self.subset_output_dir, "int_{}".format(output_file)),
bbox=np.array(((-180, 180), (-90.0, 90))),
output_file=join(self.subset_output_dir, output_file)
)
out_nc = xr.open_dataset(join(self.subset_output_dir, output_file))
# Assert that the input granule contains no history
assert in_nc.attrs.get('history') is None
# Assert that the history was created by this service
assert SERVICE_NAME in out_nc.attrs['history']
# Assert that the history created by this service is the only
# line present in the history.
assert '\n' not in out_nc.attrs['history']
def test_specified_variables(self):
"""
Test that the variables which are specified when calling the subset
operation are present in the resulting subsetted data file,
and that the variables which are specified are not present.
"""
bbox = np.array(((-180, 90), (-90, 90)))
for file in self.test_files:
output_file = "{}_{}".format(self._testMethodName, file)
in_ds = xr.open_dataset(join(self.test_data_dir, file),
decode_times=False,
decode_coords=False)
included_variables = set([variable[0] for variable in in_ds.data_vars.items()][::2])
included_variables = list(included_variables)
excluded_variables = list(set(variable[0] for variable in in_ds.data_vars.items())
- set(included_variables))
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file),
variables=included_variables
)
# Get coord variables
lat_var_names, lon_var_names = subset.get_coord_variable_names(in_ds)
lat_var_name = lat_var_names[0]
lon_var_name = lon_var_names[0]
time_var_name = subset.get_time_variable_name(in_ds, in_ds[lat_var_name])
included_variables.append(lat_var_name)
included_variables.append(lon_var_name)
included_variables.append(time_var_name)
included_variables.extend(in_ds.coords.keys())
if lat_var_name in excluded_variables:
excluded_variables.remove(lat_var_name)
if lon_var_name in excluded_variables:
excluded_variables.remove(lon_var_name)
if time_var_name in excluded_variables:
excluded_variables.remove(time_var_name)
out_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False)
out_vars = [out_var for out_var in out_ds.data_vars.keys()]
out_vars.extend(out_ds.coords.keys())
assert set(out_vars) == set(included_variables)
assert set(out_vars).isdisjoint(excluded_variables)
in_ds.close()
out_ds.close()
def test_calculate_chunks(self):
"""
Test that the calculate chunks function in the subset module
correctly calculates and returns the chunks dims dictionary.
"""
rs = np.random.RandomState(0)
dataset = xr.DataArray(
rs.randn(2, 4000, 4001),
dims=['x', 'y', 'z']
).to_dataset(name='foo')
chunk_dict = subset.calculate_chunks(dataset)
assert chunk_dict.get('x') is None
assert chunk_dict.get('y') is None
assert chunk_dict.get('z') == 4000
def test_missing_coord_vars(self):
"""
As of right now, the subsetter expects the data to contain lat
and lon variables. If not present, an error is thrown.
"""
file = 'MODIS_T-JPL-L2P-v2014.0.nc'
ds = xr.open_dataset(join(self.test_data_dir, file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
# Manually remove var which will cause error when attempting
# to subset.
ds = ds.drop_vars(['lat'])
output_file = '{}_{}'.format('missing_coords', file)
ds.to_netcdf(join(self.subset_output_dir, output_file))
bbox = np.array(((-180, 180), (-90, 90)))
with pytest.raises(ValueError):
subset.subset(
file_to_subset=join(self.subset_output_dir, output_file),
bbox=bbox,
output_file=''
)
def test_data_1D(self):
"""
Test that subsetting a 1-D granule does not result in failure.
"""
merged_jason_filename = 'JA1_GPN_2PeP001_002_20020115_060706_20020115_070316.nc'
output_file = "{}_{}".format(self._testMethodName, merged_jason_filename)
subset.subset(
file_to_subset=join(self.test_data_dir, merged_jason_filename),
bbox=np.array(((-180, 0), (-90, 0))),
output_file=join(self.subset_output_dir, output_file)
)
xr.open_dataset(join(self.subset_output_dir, output_file))
def test_get_coord_variable_names(self):
"""
Test that the expected coord variable names are returned
"""
file = 'MODIS_T-JPL-L2P-v2014.0.nc'
ds = xr.open_dataset(join(self.test_data_dir, file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
old_lat_var_name = 'lat'
old_lon_var_name = 'lon'
lat_var_name, lon_var_name = subset.get_coord_variable_names(ds)
assert lat_var_name[0] == old_lat_var_name
assert lon_var_name[0] == old_lon_var_name
new_lat_var_name = 'latitude'
new_lon_var_name = 'x'
ds = ds.rename({old_lat_var_name: new_lat_var_name,
old_lon_var_name: new_lon_var_name})
lat_var_name, lon_var_name = subset.get_coord_variable_names(ds)
assert lat_var_name[0] == new_lat_var_name
assert lon_var_name[0] == new_lon_var_name
def test_cannot_get_coord_variable_names(self):
"""
Test that, when given a dataset with coord vars which are not
expected, a ValueError is raised.
"""
file = 'MODIS_T-JPL-L2P-v2014.0.nc'
ds = xr.open_dataset(join(self.test_data_dir, file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
old_lat_var_name = 'lat'
new_lat_var_name = 'foo'
ds = ds.rename({old_lat_var_name: new_lat_var_name})
# Remove 'coordinates' attribute
for var_name, var in ds.items():
if 'coordinates' in var.attrs:
del var.attrs['coordinates']
self.assertRaises(ValueError, subset.get_coord_variable_names, ds)
def test_get_spatial_bounds(self):
"""
Test that the get_spatial_bounds function works as expected.
The get_spatial_bounds function should return lat/lon min/max
which is masked and scaled for both variables. The values
should also be adjusted for -180,180/-90,90 coordinate types
"""
ascat_filename = 'ascat_20150702_084200_metopa_45145_eps_o_250_2300_ovw.l2.nc'
ghrsst_filename = '20190927000500-JPL-L2P_GHRSST-SSTskin-MODIS_A-D-v02.0-fv01.0.nc'
ascat_dataset = xr.open_dataset(
join(self.test_data_dir, ascat_filename),
decode_times=False,
decode_coords=False,
mask_and_scale=False
)
ghrsst_dataset = xr.open_dataset(
join(self.test_data_dir, ghrsst_filename),
decode_times=False,
decode_coords=False,
mask_and_scale=False
)
# ascat1 longitude is -0 360, ghrsst modis A is -180 180
# Both have metadata for valid_min
# Manually calculated spatial bounds
ascat_expected_lat_min = -89.4
ascat_expected_lat_max = 89.2
ascat_expected_lon_min = -180.0
ascat_expected_lon_max = 180.0
ghrsst_expected_lat_min = -77.2
ghrsst_expected_lat_max = -53.6
ghrsst_expected_lon_min = -170.5
ghrsst_expected_lon_max = -101.7
min_lon, max_lon, min_lat, max_lat = subset.get_spatial_bounds(
dataset=ascat_dataset,
lat_var_names=['lat'],
lon_var_names=['lon']
).flatten()
assert np.isclose(min_lat, ascat_expected_lat_min)
assert np.isclose(max_lat, ascat_expected_lat_max)
assert np.isclose(min_lon, ascat_expected_lon_min)
assert np.isclose(max_lon, ascat_expected_lon_max)
# Remove the label from the dataset coordinate variables indicating the valid_min.
del ascat_dataset['lat'].attrs['valid_min']
del ascat_dataset['lon'].attrs['valid_min']
min_lon, max_lon, min_lat, max_lat = subset.get_spatial_bounds(
dataset=ascat_dataset,
lat_var_names=['lat'],
lon_var_names=['lon']
).flatten()
assert np.isclose(min_lat, ascat_expected_lat_min)
assert np.isclose(max_lat, ascat_expected_lat_max)
assert np.isclose(min_lon, ascat_expected_lon_min)
assert np.isclose(max_lon, ascat_expected_lon_max)
# Repeat test, but with GHRSST granule
min_lon, max_lon, min_lat, max_lat = subset.get_spatial_bounds(
dataset=ghrsst_dataset,
lat_var_names=['lat'],
lon_var_names=['lon']
).flatten()
assert np.isclose(min_lat, ghrsst_expected_lat_min)
assert np.isclose(max_lat, ghrsst_expected_lat_max)
assert np.isclose(min_lon, ghrsst_expected_lon_min)
assert np.isclose(max_lon, ghrsst_expected_lon_max)
# Remove the label from the dataset coordinate variables indicating the valid_min.
del ghrsst_dataset['lat'].attrs['valid_min']
del ghrsst_dataset['lon'].attrs['valid_min']
min_lon, max_lon, min_lat, max_lat = subset.get_spatial_bounds(
dataset=ghrsst_dataset,
lat_var_names=['lat'],
lon_var_names=['lon']
).flatten()
assert np.isclose(min_lat, ghrsst_expected_lat_min)
assert np.isclose(max_lat, ghrsst_expected_lat_max)
assert np.isclose(min_lon, ghrsst_expected_lon_min)
assert np.isclose(max_lon, ghrsst_expected_lon_max)
def test_shapefile_subset(self):
"""
Test that using a shapefile to subset data instead of a bbox
works as expected
"""
shapefile = 'test.shp'
ascat_filename = 'ascat_20150702_084200_metopa_45145_eps_o_250_2300_ovw.l2.nc'
output_filename = f'{self._testMethodName}_{ascat_filename}'
shapefile_file_path = join(self.test_data_dir, 'test_shapefile_subset', shapefile)
ascat_file_path = join(self.test_data_dir, ascat_filename)
output_file_path = join(self.subset_output_dir, output_filename)
subset.subset(
file_to_subset=ascat_file_path,
bbox=None,
output_file=output_file_path,
shapefile=shapefile_file_path
)
# Check that each point of data is within the shapefile
shapefile_df = gpd.read_file(shapefile_file_path)
with xr.open_dataset(output_file_path) as result_dataset:
def in_shape(lon, lat):
if np.isnan(lon) or np.isnan(lat):
return
point = Point(lon, lat)
point_in_shapefile = shapefile_df.contains(point)
assert point_in_shapefile[0]
in_shape_vec =
|
np.vectorize(in_shape)
|
numpy.vectorize
|
import cPickle
import numpy as np
from matplotlib import pyplot as plt
Results = cPickle.load(open("Results.p","rb"))
rewards = {}
for traj in Results:
for ep in traj:
ao, ac ,r = ep
for key in r:
if key not in rewards:
rewards[key] = [np.sum(r[key])]
else:
rewards[key].append(
|
np.sum(r[key])
|
numpy.sum
|
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_text as text
import tensorflow_hub as hub
from sklearn.metrics.pairwise import paired_cosine_distances
from .base import Similarity
def split_chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
class UniversalSentenceEncoderSimilarity(Similarity):
BATCH_SIZE = 5
def __init__(self):
self.model = hub.load('https://tfhub.dev/google/universal-sentence-encoder-multilingual/3')
def score(self, dataset: pd.DataFrame) -> pd.DataFrame:
sentences1 = dataset['ementa1'].tolist()
sentences2 = dataset['ementa2'].tolist()
sentences = list(set(sentences1 + sentences2))
embeddings = self.__embeddings(sentences)
emb_dict = {sent: emb for sent, emb in zip(sentences, embeddings)}
embeddings1 = [emb_dict[sent] for sent in sentences1]
embeddings2 = [emb_dict[sent] for sent in sentences2]
cosine_scores = 1 - paired_cosine_distances(embeddings1, embeddings2)
dataset['score'] = cosine_scores
return dataset
def __embeddings(self, documents):
chunks = split_chunks(documents, self.BATCH_SIZE)
embeddings = None
for chunk in chunks:
documents_embed = self.model(chunk)
documents_embed = documents_embed.numpy()
if embeddings is None:
embeddings = documents_embed
else:
embeddings =
|
np.concatenate((embeddings, documents_embed), axis=0)
|
numpy.concatenate
|
"""Figure plotting utilities for figures in Omholt and Kirkwood (2021)."""
import os
import numpy as np
from matplotlib import pyplot as plt
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def plot_fig_1(
t_m_captivity,
t_m_wild,
t_m_hyp_wt,
captivity_mean,
captivity_std,
wild_mean,
hyp_wt_mean,
hyp_wt_std,
):
captivity_x = np.arange(0, t_m_captivity)
wild_x = np.arange(0, t_m_wild)
fig, ax = plt.subplots(figsize=(6, 6))
ax.plot(captivity_x, captivity_mean, 'r-')
ax.fill_between(
range(t_m_captivity),
captivity_mean - 3.0 * captivity_std,
captivity_mean + 3.0 * captivity_std,
color='pink',
alpha=0.5,
)
ax.plot(wild_x, wild_mean, 'b-')
ax.fill_between(
range(t_m_hyp_wt),
hyp_wt_mean - 3.0 * hyp_wt_std,
hyp_wt_mean + 3.0 * hyp_wt_std,
color='lightblue',
alpha=0.5,
)
# Plotting wild data (extraction from Austad (1989))
X_C = np.genfromtxt(f'{ROOT_DIR}/data/austad_1989/captivity_x.txt')
Y_C = np.genfromtxt(f'{ROOT_DIR}/data/austad_1989/captivity_y.txt')
ax.plot(X_C, Y_C, 'ro', markersize=4)
X_W = [0, 5 * 100 / 30, 10 * 100 / 30, 15 * 100 / 30, 20 * 100 / 30, 30 * 100 / 30]
Y_W = [1.0, 0.5498, 0.268, 0.148, 0.05, 0.0]
ax.plot(X_W, Y_W, 'bo', markersize=4)
afont = {'fontname': 'Arial'}
ax.set_xlabel('Time', fontsize=12, **afont)
ax.set_ylabel('Cohort survivorship', fontsize=12, **afont)
x_size = 10
plt.rc('xtick', labelsize=x_size)
y_size = 10
plt.rc('ytick', labelsize=y_size)
fig.tight_layout()
figure = plt.gcf()
figure.set_size_inches(3.42, 3.42)
figure.savefig(f'{ROOT_DIR}/figures/PNAS_fig1_Frontinella.pdf', dpi=1200, bbox_inches='tight')
def plot_fig_2(t_m, mean_diff, std_diff, repetition_count, save_pdf=True):
C = np.arange(0, t_m, 1, dtype=int)
fig1, ax = plt.subplots(figsize=(6, 6))
(l1,) = ax.plot(C, mean_diff[0])
ax.fill_between(
range(t_m),
mean_diff[0] - 3.0 * std_diff[0] / np.sqrt(repetition_count),
mean_diff[0] + 3.0 * std_diff[0] / np.sqrt(repetition_count),
alpha=0.5,
)
(l2,) = ax.plot(C, mean_diff[1])
ax.fill_between(
range(t_m),
mean_diff[1] - 3.0 * std_diff[1] / np.sqrt(repetition_count),
mean_diff[1] + 3.0 * std_diff[1] / np.sqrt(repetition_count),
alpha=0.5,
)
(l3,) = ax.plot(C, mean_diff[2])
ax.fill_between(
range(t_m),
mean_diff[2] - 3.0 * std_diff[2] / np.sqrt(repetition_count),
mean_diff[2] + 3.0 * std_diff[2] / np.sqrt(repetition_count),
alpha=0.5,
)
(l4,) = ax.plot(C, mean_diff[3])
ax.fill_between(
range(t_m),
mean_diff[3] - 3.0 * std_diff[3] / np.sqrt(repetition_count),
mean_diff[3] + 3.0 * std_diff[3] / np.sqrt(repetition_count),
alpha=0.5,
)
ax.legend(
(l1, l2, l3, l4),
('$\epsilon$=0.01', '$\epsilon$=0.02', '$\epsilon$=0.03', '$\epsilon$=0.04'),
)
afont = {'fontname': 'Arial'}
ax.set_xlabel('Time', fontsize=12, **afont)
ax.set_ylabel("# of mutant - wild type individuals", fontsize=12, **afont)
x_size = 10
plt.rc('xtick', labelsize=x_size)
y_size = 10
plt.rc('ytick', labelsize=y_size)
fig1.tight_layout()
figure = plt.gcf()
figure.set_size_inches(3.42, 3.42)
if save_pdf:
plt.savefig(f'{ROOT_DIR}/figures/PNAS_fig2_Frontinella.pdf', dpi=1200, bbox_inches='tight')
def plot_fig_3(fitness_stats_wt, fitness_stats_mut):
r0_wt_mean = fitness_stats_wt['r0_mean']
r0_wt_sem = fitness_stats_wt['r0_sem']
r_wt_mean = fitness_stats_wt['r_mean']
r_wt_sem = fitness_stats_wt['r_sem']
r0_mut_mean = fitness_stats_mut['r0_mean']
r0_mut_sem = fitness_stats_mut['r0_sem']
r_mut_mean = fitness_stats_mut['r_mean']
r_mut_sem = fitness_stats_mut['r_sem']
y1_pos = np.array([0, 3, 6, 9])
y2_pos = np.array([1, 4, 7, 10])
y3_pos = np.array([2, 5, 8, 11])
y4_pos = np.array([12, 15, 18, 21])
y5_pos = np.array([13, 16, 19, 22])
y6_pos = np.array([14, 17, 20])
y7_pos = [11]
dummy_R0 = np.zeros(4)
dummy_r = np.zeros(3)
fig, ax1 = plt.subplots(figsize=(6, 6))
ax1.bar(
y1_pos,
r0_wt_mean,
width=1.0,
yerr=r0_wt_sem,
align='center',
alpha=0.4,
ecolor='black',
capsize=3,
color='C0',
)
ax1.bar(
y2_pos,
r0_mut_mean,
width=1.0,
yerr=r0_mut_sem,
align='center',
alpha=0.8,
ecolor='black',
capsize=3,
color='C0',
)
ax1.bar(
y3_pos, dummy_R0, width=0.3, color='w'
) # The width spec does not seem to work. width only refers to the relative width in the slot.
ax1.set_ylabel('R0', fontsize=14)
ax1.set_ylim(0.92 * r0_wt_mean[0], 1.005 * r0_wt_mean[0])
ax2 = ax1.twinx()
ax2.bar(
y4_pos,
r_wt_mean,
width=1.0,
yerr=r_wt_sem,
align='center',
alpha=0.4,
ecolor='black',
capsize=3,
color='C3',
)
ax2.bar(
y5_pos,
r_mut_mean,
width=1.0,
yerr=r_mut_sem,
align='center',
alpha=0.8,
ecolor='black',
capsize=3,
color='C3',
)
ax2.bar(y6_pos, dummy_r, width=0.3, color='w')
ax2.set_ylabel('r', fontsize=14)
ax2.set_ylim(0.95 * r_wt_mean[0], 1.0045 * r_wt_mean[0])
ax2.set_xticks(y7_pos)
xticks = ['wt($\epsilon$) vs mut($\epsilon$)']
ax1.set_xticklabels(xticks, fontsize=13)
fig.tight_layout()
figure = plt.gcf()
figure.set_size_inches(3.42, 3.42)
plt.savefig(f'{ROOT_DIR}/figures/PNAS_fig3_Frontinella.pdf', dpi=1200, bbox_inches='tight')
def plot_fig_4(
t_m_cap_f,
t_m_cap_m,
t_m_wild_f,
t_m_wild_m,
mean_cap_f,
mean_cap_m,
mean_wild_f,
mean_wild_m,
):
fig, ax = plt.subplots(figsize=(6, 6))
# Plotting captive females (extraction from Kawasaki et al (2008))
X_C_F = np.genfromtxt(f'{ROOT_DIR}/data/kawasaki_2008/captivity_females_x.txt')
Y_C_F = np.genfromtxt(f'{ROOT_DIR}/data/kawasaki_2008/captivity_females_y.txt')
ax.plot(X_C_F, Y_C_F, 'ro', markersize=4)
C1 = np.arange(0, t_m_cap_f)
ax.plot(C1, mean_cap_f, 'r-')
# Plotting captive males (extraction from Kawasaki et al (2008))
X_C_M = np.genfromtxt(f'{ROOT_DIR}/data/kawasaki_2008/captivity_males_x.txt')
Y_C_M = np.genfromtxt(f'{ROOT_DIR}/data/kawasaki_2008/captivity_males_y.txt')
ax.plot(X_C_M, Y_C_M, 'bo', markersize=4)
C2 = np.arange(0, t_m_cap_m)
ax.plot(C2, mean_cap_m, 'b-')
# Plotting wild females (extraction from Kawasaki et al (2008))
X_W_F = np.genfromtxt(f'{ROOT_DIR}/data/kawasaki_2008/wild_females_x.txt')
Y_W_F = np.genfromtxt(f'{ROOT_DIR}/data/kawasaki_2008/wild_females_y.txt')
ax.plot(X_W_F, Y_W_F, 'ro', markersize=4)
C3 = np.arange(0, t_m_wild_f)
ax.plot(C3, mean_wild_f, 'r-')
# Plotting wild males (extraction from Kawasaki et al (2008))
X_W_M = np.genfromtxt(f'{ROOT_DIR}/data/kawasaki_2008/wild_males_x.txt')
Y_W_M =
|
np.genfromtxt(f'{ROOT_DIR}/data/kawasaki_2008/wild_males_y.txt')
|
numpy.genfromtxt
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Test desimodel.focalplane.
"""
import unittest
import numpy as np
from ..focalplane import *
from .. import io
from astropy.table import Table
class TestFocalplane(unittest.TestCase):
"""Test desimodel.focalplane.
"""
def test_random_offsets(self):
"""Test generating random positioner offsets.
"""
dx, dy = generate_random_centroid_offsets(1.0)
self.assertEqual(dx.shape, dy.shape)
dr = np.sqrt(dx**2 + dy**2)
self.assertAlmostEqual(np.sqrt(np.average(dr**2)), 1.0)
self.assertLess(np.max(dr), 7)
def test_check_radec(self):
"""Test the RA, Dec bounds checking.
"""
F = FocalPlane()
with self.assertRaises(ValueError):
F._check_radec(365.0, 0.0)
with self.assertRaises(ValueError):
F._check_radec(0.0, 100.0)
def test_set_tele_pointing(self):
"""Test setting the RA, Dec by hand.
"""
F = FocalPlane()
self.assertEqual(F.ra, 0.0)
self.assertEqual(F.dec, 0.0)
F.set_tele_pointing(180.0, 45.0)
self.assertEqual(F.ra, 180.0)
self.assertEqual(F.dec, 45.0)
def test_get_radius(self):
"""Tests converting x, y coordinates on the focal plane to a radius in degrees and mm
"""
ps = io.load_platescale()
for i in np.linspace(0, len(ps)-1, 10).astype(int):
degree = get_radius_deg(0, ps['radius'][i])
self.assertAlmostEqual(degree, ps['theta'][i])
radius = get_radius_mm(ps['theta'][i])
self.assertAlmostEqual(radius, ps['radius'][i])
#- Rotational invariance
d1 = get_radius_deg(0, ps['radius'][10])
d2 = get_radius_deg(ps['radius'][10], 0)
d3 = get_radius_deg(0, -ps['radius'][10])
d4 = get_radius_deg(-ps['radius'][10], 0)
self.assertAlmostEqual(d1, d2)
self.assertAlmostEqual(d1, d3)
self.assertAlmostEqual(d1, d4)
#- lists and arrays should also work
degrees = get_radius_deg([0,0,0,0], ps['radius'][0:4])
self.assertTrue(np.allclose(degrees, ps['theta'][0:4]))
radius = get_radius_mm(ps['theta'][100:110])
self.assertTrue(np.allclose(radius, ps['radius'][100:110]))
def test_FocalPlane(self):
n = 20
x, y = np.random.uniform(-100, 100, size=(2,n))
f = FocalPlane(ra=100, dec=20)
ra, dec = f.xy2radec(x, y)
xx, yy = f.radec2xy(ra, dec)
self.assertLess(np.max(np.abs(xx-x)), 1e-4)
self.assertLess(np.max(np.abs(yy-y)), 1e-4)
def test_xy2qs(self):
x, y = qs2xy(0.0, 100); self.assertAlmostEqual(y, 0.0)
x, y = qs2xy(90.0, 100); self.assertAlmostEqual(x, 0.0)
x, y = qs2xy(180.0, 100); self.assertAlmostEqual(y, 0.0)
x, y = qs2xy(270.0, 100); self.assertAlmostEqual(x, 0.0)
q, s = xy2qs(0.0, 100.0); self.assertAlmostEqual(q, 90.0)
q, s = xy2qs(100.0, 0.0); self.assertAlmostEqual(q, 0.0)
q, s = xy2qs(-100.0, 100.0); self.assertAlmostEqual(q, 135.0)
n = 100
s = 410*np.sqrt(np.random.uniform(0, 1, size=n))
q = np.random.uniform(0, 360, size=n)
x, y = qs2xy(q, s)
qq, ss = xy2qs(x, y)
xx, yy = qs2xy(qq, ss)
self.assertLess(np.max(np.abs(xx-x)), 1e-4)
self.assertLess(np.max(np.abs(yy-y)), 1e-4)
self.assertLess(np.max(np.abs(ss-s)), 1e-4)
dq = (qq-q) % 360
dq[dq>180] -= 360
self.assertLess(np.max(np.abs(dq)), 1e-4)
def test_xy2radec_roundtrip(self):
"""Tests the consistency between the conversion functions
radec2xy and xy2radec. Also tests the accuracy of the xy2radec
on particular cases.
"""
n = 100
r = 410 * np.sqrt(np.random.uniform(0, 1, size=n))
theta = np.random.uniform(0, 2*np.pi, size=n)
x = r*np.cos(theta)
y = r*np.sin(theta)
test_telra = [0.0, 0.0, 90.0, 30.0]
test_teldec = [0.0, 90.0, 0.0, -30.0]
test_telra = np.concatenate([test_telra, np.random.uniform(0, 360, size=5)])
test_teldec = np.concatenate([test_teldec, np.random.uniform(-90, 90, size=5)])
for telra, teldec in zip(test_telra, test_teldec):
ra, dec = xy2radec(telra, teldec, x, y)
xx, yy = radec2xy(telra, teldec, ra, dec)
dx = xx-x
dy = yy-y
self.assertLess(np.max(np.abs(dx)), 1e-4) #- 0.1 um
self.assertLess(np.max(np.abs(dy)), 1e-4) #- 0.1 um
def test_xy2radec_orientation(self):
"""Test that +x = -RA and +y = +dec"""
n = 5
x = np.linspace(-400, 400, n)
y = np.zeros_like(x)
ii = np.arange(n, dtype=int)
for teldec in [-30, 0, 30]:
ra, dec = xy2radec(0.0, teldec, x, y)
self.assertTrue(np.all(np.argsort((ra+180) % 360) == ii[-1::-1]))
ra, dec = xy2radec(359.9, teldec, x, y)
self.assertTrue(np.all(np.argsort((ra+180) % 360) == ii[-1::-1]))
ra, dec = xy2radec(30.0, teldec, x, y)
self.assertTrue(np.all(np.argsort(ra) == ii[-1::-1]))
y =
|
np.linspace(-400, 400, n)
|
numpy.linspace
|
#!/usr/bin/env python3
"""
The plotting wrappers that add functionality to various `~matplotlib.axes.Axes`
methods. "Wrapped" `~matplotlib.axes.Axes` methods accept the additional
arguments documented in the wrapper function.
"""
# NOTE: Two possible workflows are 1) make horizontal functions use wrapped
# vertical functions, then flip things around inside apply_cycle or by
# creating undocumented 'plot', 'scatter', etc. methods in Axes that flip
# arguments around by reading a 'orientation' key or 2) make separately
# wrapped chains of horizontal functions and vertical functions whose 'extra'
# wrappers jointly refer to a hidden helper function and create documented
# 'plotx', 'scatterx', etc. that flip arguments around before sending to
# superclass 'plot', 'scatter', etc. Opted for the latter approach.
import functools
import inspect
import re
import sys
from numbers import Integral
import matplotlib.artist as martist
import matplotlib.axes as maxes
import matplotlib.cm as mcm
import matplotlib.collections as mcollections
import matplotlib.colors as mcolors
import matplotlib.container as mcontainer
import matplotlib.contour as mcontour
import matplotlib.font_manager as mfonts
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.patheffects as mpatheffects
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import numpy as np
import numpy.ma as ma
from .. import colors as pcolors
from .. import constructor
from .. import ticker as pticker
from ..config import rc
from ..internals import ic # noqa: F401
from ..internals import (
_dummy_context,
_getattr_flexible,
_not_none,
_pop_props,
_state_context,
docstring,
warnings,
)
from ..utils import edges, edges2d, to_rgb, to_xyz, units
try:
from cartopy.crs import PlateCarree
except ModuleNotFoundError:
PlateCarree = object
__all__ = [
'default_latlon',
'default_transform',
'standardize_1d',
'standardize_2d',
'indicate_error',
'apply_cmap',
'apply_cycle',
'colorbar_extras',
'legend_extras',
'text_extras',
'vlines_extras',
'hlines_extras',
'scatter_extras',
'scatterx_extras',
'bar_extras',
'barh_extras',
'fill_between_extras',
'fill_betweenx_extras',
'boxplot_extras',
'violinplot_extras',
]
# Positional args that can be passed as out-of-order keywords. Used by standardize_1d
# NOTE: The 'barh' interpretation represent a breaking change from default
# (y, width, height, left) behavior. Want to have consistent interpretation
# of vertical or horizontal bar 'width' with 'width' key or 3rd positional arg.
# Interal hist() func uses positional arguments when calling bar() so this is fine.
KEYWORD_TO_POSITIONAL_INSERT = {
'fill_between': ('x', 'y1', 'y2'),
'fill_betweenx': ('y', 'x1', 'x2'),
'vlines': ('x', 'ymin', 'ymax'),
'hlines': ('y', 'xmin', 'xmax'),
'bar': ('x', 'height'),
'barh': ('y', 'height'),
'parametric': ('x', 'y', 'values'),
'boxplot': ('positions',), # use as x-coordinates during wrapper processing
'violinplot': ('positions',),
}
KEYWORD_TO_POSITIONAL_APPEND = {
'bar': ('width', 'bottom'),
'barh': ('width', 'left'),
}
# Consistent keywords for cmap plots. Used by apply_cmap to pass correct plural
# or singular form to matplotlib function.
STYLE_ARGS_TRANSLATE = {
'contour': ('colors', 'linewidths', 'linestyles'),
'tricontour': ('colors', 'linewidths', 'linestyles'),
'pcolor': ('edgecolors', 'linewidth', 'linestyle'),
'pcolormesh': ('edgecolors', 'linewidth', 'linestyle'),
'pcolorfast': ('edgecolors', 'linewidth', 'linestyle'),
'tripcolor': ('edgecolors', 'linewidth', 'linestyle'),
'parametric': ('color', 'linewidth', 'linestyle'),
'hexbin': ('edgecolors', 'linewidths', 'linestyles'),
'hist2d': ('edgecolors', 'linewidths', 'linestyles'),
'barbs': ('barbcolor', 'linewidth', 'linestyle'),
'quiver': ('color', 'linewidth', 'linestyle'), # applied to arrow *outline*
'streamplot': ('color', 'linewidth', 'linestyle'),
'spy': ('color', 'linewidth', 'linestyle'),
'matshow': ('color', 'linewidth', 'linestyle'),
}
docstring.snippets['axes.autoformat'] = """
data : dict-like, optional
A dict-like dataset container (e.g., `~pandas.DataFrame` or `~xarray.DataArray`).
If passed, positional arguments must be valid `data` keys and the arrays used for
plotting are retrieved with ``data[key]``. This is a native `matplotlib feature \
<https://matplotlib.org/stable/gallery/misc/keyword_plotting.html>`__
previously restricted to just `~matplotlib.axes.Axes.plot`
and `~matplotlib.axes.Axes.scatter`.
autoformat : bool, optional
Whether *x* axis labels, *y* axis labels, axis formatters, axes titles,
legend labels, and colorbar labels are automatically configured when
a `~pandas.Series`, `~pandas.DataFrame` or `~xarray.DataArray` is passed
to the plotting command. Default is :rc:`autoformat`.
"""
docstring.snippets['axes.cmap_norm'] = """
cmap : colormap spec, optional
The colormap specifer, passed to the `~proplot.constructor.Colormap`
constructor.
cmap_kw : dict-like, optional
Passed to `~proplot.constructor.Colormap`.
norm : normalizer spec, optional
The colormap normalizer, used to warp data before passing it
to `~proplot.colors.DiscreteNorm`. This is passed to the
`~proplot.constructor.Norm` constructor.
norm_kw : dict-like, optional
Passed to `~proplot.constructor.Norm`.
extend : {{'neither', 'min', 'max', 'both'}}, optional
Whether to assign unique colors to out-of-bounds data and draw
"extensions" (triangles, by default) on the colorbar.
"""
docstring.snippets['axes.levels_values'] = """
N
Shorthand for `levels`.
levels : int or list of float, optional
The number of level edges or a list of level edges. If the former,
`locator` is used to generate this many level edges at "nice" intervals.
If the latter, the levels should be monotonically increasing or
decreasing (note that decreasing levels will only work with ``pcolor``
plots, not ``contour`` plots). Default is :rc:`image.levels`.
values : int or list of float, optional
The number of level centers or a list of level centers. If the former,
`locator` is used to generate this many level centers at "nice" intervals.
If the latter, levels are inferred using `~proplot.utils.edges`.
This will override any `levels` input.
discrete : bool, optional
If ``False``, the `~proplot.colors.DiscreteNorm` is not applied to the
colormap when ``levels=N`` or ``levels=array_of_values`` are not
explicitly requested. Instead, the number of levels in the colormap will be
roughly controlled by :rcraw:`image.lut`. This has a similar effect
to using `levels=large_number` but it may improve rendering speed.
By default, this is ``False`` only for `~matplotlib.axes.Axes.imshow`,
`~matplotlib.axes.Axes.matshow`, `~matplotlib.axes.Axes.spy`,
`~matplotlib.axes.Axes.hexbin`, and `~matplotlib.axes.Axes.hist2d` plots.
"""
docstring.snippets['axes.vmin_vmax'] = """
vmin, vmax : float, optional
Used to determine level locations if `levels` or `values` is an integer.
Actual levels may not fall exactly on `vmin` and `vmax`, but the minimum
level will be no smaller than `vmin` and the maximum level will be
no larger than `vmax`. If `vmin` or `vmax` are not provided, the
minimum and maximum data values are used.
"""
docstring.snippets['axes.auto_levels'] = """
inbounds : bool, optional
If ``True`` (the edefault), when automatically selecting levels in the presence
of hard *x* and *y* axis limits (i.e., when `~matplotlib.axes.Axes.set_xlim`
or `~matplotlib.axes.Axes.set_ylim` have been called previously), only the
in-bounds data is sampled. Default is :rc:`image.inbounds`.
locator : locator-spec, optional
The locator used to determine level locations if `levels` or `values`
is an integer and `vmin` and `vmax` were not provided. Passed to the
`~proplot.constructor.Locator` constructor. Default is
`~matplotlib.ticker.MaxNLocator` with ``levels`` integer levels.
locator_kw : dict-like, optional
Passed to `~proplot.constructor.Locator`.
symmetric : bool, optional
If ``True``, automatically generated levels are symmetric
about zero.
positive : bool, optional
If ``True``, automatically generated levels are positive
with a minimum at zero.
negative : bool, optional
If ``True``, automatically generated levels are negative
with a maximum at zero.
nozero : bool, optional
If ``True``, ``0`` is removed from the level list. This is
mainly useful for `~matplotlib.axes.Axes.contour` plots.
"""
_lines_docstring = """
Support overlaying and stacking successive columns of data and support
different colors for "negative" and "positive" lines.
Important
---------
This function wraps `~matplotlib.axes.Axes.{prefix}lines`.
Parameters
----------
*args : ({y}1,), ({x}, {y}1), or ({x}, {y}1, {y}2)
The *{x}* and *{y}* coordinates. If `{x}` is not provided, it will be
inferred from `{y}1`. If `{y}1` and `{y}2` are provided, this function will
draw lines between these points. If `{y}1` or `{y}2` are 2D, this
function is called with each column. The default value for `{y}2` is ``0``.
stack, stacked : bool, optional
Whether to "stack" successive columns of the `{y}1` array. If this is
``True`` and `{y}2` was provided, it will be ignored.
negpos : bool, optional
Whether to color lines greater than zero with `poscolor` and lines less
than zero with `negcolor`.
negcolor, poscolor : color-spec, optional
Colors to use for the negative and positive lines. Ignored if `negpos`
is ``False``. Defaults are :rc:`negcolor` and :rc:`poscolor`.
color, colors : color-spec or list thereof, optional
The line color(s).
linestyle, linestyles : linestyle-spec or list thereof, optional
The line style(s).
lw, linewidth, linewidths : linewidth-spec or list thereof, optional
The line width(s).
See also
--------
standardize_1d
apply_cycle
"""
docstring.snippets['axes.vlines'] = _lines_docstring.format(
x='x', y='y', prefix='v', orientation='vertical',
)
docstring.snippets['axes.hlines'] = _lines_docstring.format(
x='y', y='x', prefix='h', orientation='horizontal',
)
_scatter_docstring = """
Support `apply_cmap` features and support style keywords that are
consistent with `~{package}.axes.Axes.plot{suffix}` keywords.
Important
---------
This function wraps `~{package}.axes.Axes.scatter{suffix}`.
Parameters
----------
*args : {y} or {x}, {y}
The input *{x}* or *{x}* and *{y}* coordinates. If only *{y}* is provided,
*{x}* will be inferred from *{y}*.
s, size, markersize : float or list of float, optional
The marker size(s). The units are optionally scaled by
`smin` and `smax`.
smin, smax : float, optional
The minimum and maximum marker size in units ``points^2`` used to scale
`s`. If not provided, the marker sizes are equivalent to the values in `s`.
c, color, markercolor : color-spec or list thereof, or array, optional
The marker fill color(s). If this is an array of scalar values, colors
will be generated using the colormap `cmap` and normalizer `norm`.
%(axes.vmin_vmax)s
%(axes.cmap_norm)s
%(axes.levels_values)s
%(axes.auto_levels)s
lw, linewidth, linewidths, markeredgewidth, markeredgewidths \
: float or list thereof, optional
The marker edge width.
edgecolors, markeredgecolor, markeredgecolors \
: color-spec or list thereof, optional
The marker edge color.
Other parameters
----------------
**kwargs
Passed to `~{package}.axes.Axes.scatter{suffix}`.
See also
--------
{package}.axes.Axes.bar{suffix}
standardize_1d
indicate_error
apply_cycle
"""
docstring.snippets['axes.scatter'] = docstring.add_snippets(
_scatter_docstring.format(x='x', y='y', suffix='', package='matplotlib')
)
docstring.snippets['axes.scatterx'] = docstring.add_snippets(
_scatter_docstring.format(x='y', y='x', suffix='', package='proplot')
)
_fill_between_docstring = """
Support overlaying and stacking successive columns of data support
different colors for "negative" and "positive" regions.
Important
---------
This function wraps `~matplotlib.axes.Axes.fill_between{suffix}` and
`~proplot.axes.Axes.area{suffix}`.
Parameters
----------
*args : ({y}1,), ({x}, {y}1), or ({x}, {y}1, {y}2)
The *{x}* and *{y}* coordinates. If `{x}` is not provided, it will be
inferred from `{y}1`. If `{y}1` and `{y}2` are provided, this function will
shade between these points. If `{y}1` or `{y}2` are 2D, this function
is called with each column. The default value for `{y}2` is ``0``.
stack, stacked : bool, optional
Whether to "stack" successive columns of the `{y}1` array. If this is
``True`` and `{y}2` was provided, it will be ignored.
negpos : bool, optional
Whether to shade where ``{y}1 >= {y}2`` with `poscolor` and where ``{y}1 < {y}2``
with `negcolor`. For example, to shade positive values red and negative values
blue, simply use ``ax.fill_between{suffix}({x}, {y}, negpos=True)``.
negcolor, poscolor : color-spec, optional
Colors to use for the negative and positive shaded regions. Ignored if `negpos`
is ``False``. Defaults are :rc:`negcolor` and :rc:`poscolor`.
where : ndarray, optional
Boolean ndarray mask for points you want to shade. See `this example \
<https://matplotlib.org/stable/gallery/pyplots/whats_new_98_4_fill_between.html>`__.
lw, linewidth : float, optional
The edge width for the area patches.
edgecolor : color-spec, optional
The edge color for the area patches.
Other parameters
----------------
**kwargs
Passed to `~matplotlib.axes.Axes.fill_between`.
See also
--------
matplotlib.axes.Axes.fill_between{suffix}
proplot.axes.Axes.area{suffix}
standardize_1d
apply_cycle
"""
docstring.snippets['axes.fill_between'] = _fill_between_docstring.format(
x='x', y='y', suffix='',
)
docstring.snippets['axes.fill_betweenx'] = _fill_between_docstring.format(
x='y', y='x', suffix='x',
)
_bar_docstring = """
Support grouping and stacking successive columns of data, specifying bar widths
relative to coordinate spacing, and using different colors for "negative" and
"positive" bar heights.
Important
---------
This function wraps `~matplotlib.axes.Axes.bar{suffix}`.
Parameters
----------
{x}, height, width, {bottom} : float or list of float, optional
The dimensions of the bars. If the *{x}* coordinates are not provided,
they are set to ``np.arange(0, len(height))``. The units for width
are *relative* by default.
absolute_width : bool, optional
Whether to make the units for width *absolute*. This restores
the default matplotlib behavior.
stack, stacked : bool, optional
Whether to stack columns of the input array or plot the bars
side-by-side in groups.
negpos : bool, optional
Whether to shade bars greater than zero with `poscolor` and bars less
than zero with `negcolor`.
negcolor, poscolor : color-spec, optional
Colors to use for the negative and positive bars. Ignored if `negpos`
is ``False``. Defaults are :rc:`negcolor` and :rc:`poscolor`.
lw, linewidth : float, optional
The edge width for the bar patches.
edgecolor : color-spec, optional
The edge color for the bar patches.
Other parameters
----------------
**kwargs
Passed to `~matplotlib.axes.Axes.bar{suffix}`.
See also
--------
matplotlib.axes.Axes.bar{suffix}
standardize_1d
indicate_error
apply_cycle
"""
docstring.snippets['axes.bar'] = _bar_docstring.format(
x='x', bottom='bottom', suffix='',
)
docstring.snippets['axes.barh'] = _bar_docstring.format(
x='y', bottom='left', suffix='h',
)
def _load_objects():
"""
Delay loading expensive modules. We just want to detect if *input
arrays* belong to these types -- and if this is the case, it means the
module has already been imported! So, we only try loading these classes
within autoformat calls. This saves >~500ms of import time.
"""
global DataArray, DataFrame, Series, Index, ndarray
ndarray = np.ndarray
DataArray = getattr(sys.modules.get('xarray', None), 'DataArray', ndarray)
DataFrame = getattr(sys.modules.get('pandas', None), 'DataFrame', ndarray)
Series = getattr(sys.modules.get('pandas', None), 'Series', ndarray)
Index = getattr(sys.modules.get('pandas', None), 'Index', ndarray)
_load_objects()
def _is_number(data):
"""
Test whether input is numeric array rather than datetime or strings.
"""
return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)
def _is_string(data):
"""
Test whether input is array of strings.
"""
return len(data) and isinstance(_to_ndarray(data).flat[0], str)
def _to_arraylike(data):
"""
Convert list of lists to array-like type.
"""
_load_objects()
if data is None:
raise ValueError('Cannot convert None data.')
return None
if not isinstance(data, (ndarray, DataArray, DataFrame, Series, Index)):
data = np.asarray(data)
if not np.iterable(data):
data = np.atleast_1d(data)
return data
def _to_ndarray(data):
"""
Convert arbitrary input to ndarray cleanly. Returns a masked
array if input is a masked array.
"""
return np.atleast_1d(getattr(data, 'values', data))
def _mask_array(mask, *args):
"""
Apply the mask to the input arrays. Values matching ``False`` are
set to `np.nan`.
"""
invalid = ~mask # True if invalid
args_masked = []
for arg in args:
if arg.size > 1 and arg.shape != invalid.shape:
raise ValueError('Shape mismatch between mask and array.')
arg_masked = arg.astype(np.float64)
if arg.size == 1:
pass
elif invalid.size == 1:
arg_masked = np.nan if invalid.item() else arg_masked
elif arg.size > 1:
arg_masked[invalid] = np.nan
args_masked.append(arg_masked)
return args_masked[0] if len(args_masked) == 1 else args_masked
def default_latlon(self, *args, latlon=True, **kwargs):
"""
Make ``latlon=True`` the default for `~proplot.axes.BasemapAxes` plots.
This means you no longer have to pass ``latlon=True`` if your data
coordinates are longitude and latitude.
Important
---------
This function wraps {methods} for `~proplot.axes.BasemapAxes`.
"""
method = kwargs.pop('_method')
return method(self, *args, latlon=latlon, **kwargs)
def default_transform(self, *args, transform=None, **kwargs):
"""
Make ``transform=cartopy.crs.PlateCarree()`` the default for
`~proplot.axes.CartopyAxes` plots. This means you no longer have to
pass ``transform=cartopy.crs.PlateCarree()`` if your data
coordinates are longitude and latitude.
Important
---------
This function wraps {methods} for `~proplot.axes.CartopyAxes`.
"""
# Apply default transform
# TODO: Do some cartopy methods reset backgroundpatch or outlinepatch?
# Deleted comment reported this issue
method = kwargs.pop('_method')
if transform is None:
transform = PlateCarree()
return method(self, *args, transform=transform, **kwargs)
def _basemap_redirect(self, *args, **kwargs):
"""
Docorator that calls the basemap version of the function of the
same name. This must be applied as the innermost decorator.
"""
method = kwargs.pop('_method')
name = method.__name__
if getattr(self, 'name', None) == 'proplot_basemap':
return getattr(self.projection, name)(*args, ax=self, **kwargs)
else:
return method(self, *args, **kwargs)
def _basemap_norecurse(self, *args, called_from_basemap=False, **kwargs):
"""
Decorator to prevent recursion in basemap method overrides.
See `this post https://stackoverflow.com/a/37675810/4970632`__.
"""
method = kwargs.pop('_method')
name = method.__name__
if called_from_basemap:
return getattr(maxes.Axes, name)(self, *args, **kwargs)
else:
return method(self, *args, called_from_basemap=True, **kwargs)
def _get_data(data, *args):
"""
Try to convert positional `key` arguments to `data[key]`. If argument is string
it could be a valid positional argument like `fmt` so do not raise error.
"""
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, str):
try:
array = data[arg]
except KeyError:
pass
else:
args[i] = array
return args
def _get_label(obj):
"""
Return a valid non-placeholder artist label from the artist or a tuple of
artists destined for a legend. Prefer final artist (drawn last and on top).
"""
# NOTE: BarContainer and StemContainer are instances of tuple
while not hasattr(obj, 'get_label') and isinstance(obj, tuple) and len(obj) > 1:
obj = obj[-1]
label = getattr(obj, 'get_label', lambda: None)()
return label if label and label[:1] != '_' else None
def _get_labels(data, axis=0, always=True):
"""
Return the array-like "labels" along axis `axis` from an array-like
object. These might be an xarray `DataArray` or pandas `Index`. If
`always` is ``False`` we return ``None`` for simple ndarray input.
"""
# NOTE: Previously inferred 'axis 1' metadata of 1D variable using the
# data values metadata but that is incorrect. The paradigm for 1D plots
# is we have row coordinates representing x, data values representing y,
# and column coordinates representing individual series.
if axis not in (0, 1, 2):
raise ValueError(f'Invalid axis {axis}.')
labels = None
_load_objects()
if isinstance(data, ndarray):
if not always:
pass
elif axis < data.ndim:
labels = np.arange(data.shape[axis])
else: # requesting 'axis 1' on a 1D array
labels = np.array([0])
# Xarray object
# NOTE: Even if coords not present .coords[dim] auto-generates indices
elif isinstance(data, DataArray):
if axis < data.ndim:
labels = data.coords[data.dims[axis]]
elif not always:
pass
else:
labels = np.array([0])
# Pandas object
elif isinstance(data, (DataFrame, Series, Index)):
if axis == 0 and isinstance(data, (DataFrame, Series)):
labels = data.index
elif axis == 1 and isinstance(data, (DataFrame,)):
labels = data.columns
elif not always:
pass
else: # beyond dimensionality
labels = np.array([0])
# Everything else
# NOTE: We ensure data is at least 1D in _to_arraylike so this covers everything
else:
raise ValueError(f'Unrecognized array type {type(data)}.')
return labels
def _get_title(data, units=True):
"""
Return the "title" associated with an array-like object with metadata. This
might be a pandas `DataFrame` `name` or a name constructed from xarray `DataArray`
attributes. In the latter case we search for `long_name` and `standard_name`,
preferring the former, and append `(units)` if `units` is ``True``. If no
names are available but units are available we just use the units string.
"""
title = None
_load_objects()
if isinstance(data, ndarray):
pass
# Xarray object with possible long_name, standard_name, and units attributes.
# Output depends on if units is True
elif isinstance(data, DataArray):
title = getattr(data, 'name', None)
for key in ('standard_name', 'long_name'):
title = data.attrs.get(key, title)
if units:
units = data.attrs.get('units', None)
if title and units:
title = f'{title} ({units})'
elif units:
title = units
# Pandas object. Note DataFrame has no native name attribute but user can add one
# See: https://github.com/pandas-dev/pandas/issues/447
elif isinstance(data, (DataFrame, Series, Index)):
title = getattr(data, 'name', None) or None
# Standardize result
if title is not None:
title = str(title).strip()
return title
def _parse_string_coords(*args, which='x', **kwargs):
"""
Convert string arrays and lists to index coordinates.
"""
# NOTE: Why FixedLocator and not IndexLocator? The latter requires plotting
# lines or else error is raised... very strange.
# NOTE: Why IndexFormatter and not FixedFormatter? The former ensures labels
# correspond to indices while the latter can mysteriously truncate labels.
res = []
for arg in args:
arg = _to_arraylike(arg)
if _is_string(arg) and arg.ndim > 1:
raise ValueError('Non-1D string coordinate input is unsupported.')
if not _is_string(arg):
res.append(arg)
continue
idx = np.arange(len(arg))
kwargs.setdefault(which + 'locator', mticker.FixedLocator(idx))
kwargs.setdefault(which + 'formatter', pticker._IndexFormatter(_to_ndarray(arg))) # noqa: E501
kwargs.setdefault(which + 'minorlocator', mticker.NullLocator())
res.append(idx)
return *res, kwargs
def _auto_format_1d(
self, x, *ys, name='plot', autoformat=False,
label=None, values=None, labels=None, **kwargs
):
"""
Try to retrieve default coordinates from array-like objects and apply default
formatting. Also update the keyword arguments.
"""
# Parse input
projection = hasattr(self, 'projection')
parametric = name in ('parametric',)
scatter = name in ('scatter',)
hist = name in ('hist',)
box = name in ('boxplot', 'violinplot')
pie = name in ('pie',)
vert = kwargs.get('vert', True) and kwargs.get('orientation', None) != 'horizontal'
vert = vert and name not in ('plotx', 'scatterx', 'fill_betweenx', 'barh')
stem = name in ('stem',)
nocycle = name in ('stem', 'hexbin', 'hist2d', 'parametric')
labels = _not_none(
label=label,
values=values,
labels=labels,
colorbar_kw_values=kwargs.get('colorbar_kw', {}).pop('values', None),
legend_kw_labels=kwargs.get('legend_kw', {}).pop('labels', None),
)
# Retrieve the x coords
# NOTE: Allow for "ragged array" input to boxplot and violinplot.
# NOTE: Where columns represent distributions, like for box and violin plots or
# where we use 'means' or 'medians', columns coords (axis 1) are 'x' coords.
# Otherwise, columns represent e.g. lines, and row coords (axis 0) are 'x' coords
dists = box or any(kwargs.get(s) for s in ('mean', 'means', 'median', 'medians'))
ragged = any(getattr(y, 'dtype', None) == 'object' for y in ys)
xaxis = 1 if dists and not ragged else 0
if x is None and not hist:
x = _get_labels(ys[0], axis=xaxis) # infer from rows or columns
# Default legend or colorbar labels and title. We want default legend
# labels if this is an object with 'title' metadata and/or coords are string
# WARNING: Confusing terminology differences here -- for box and violin plots
# 'labels' refer to indices along x axis. Get interpreted that way down the line.
if autoformat and not stem:
# The inferred labels and title
title = None
if labels is not None:
title = _get_title(labels)
else:
yaxis = xaxis if box or pie else xaxis + 1
labels = _get_labels(ys[0], axis=yaxis, always=False)
title = _get_title(labels) # e.g. if labels is a Series
if labels is None:
pass
elif not title and not any(isinstance(_, str) for _ in labels):
labels = None
# Apply the title
if title:
kwargs.setdefault('colorbar_kw', {}).setdefault('title', title)
if not nocycle:
kwargs.setdefault('legend_kw', {}).setdefault('title', title)
# Apply the labels
if labels is not None:
if not nocycle:
kwargs['labels'] = _to_ndarray(labels)
elif parametric:
values, colorbar_kw = _parse_string_coords(labels, which='')
kwargs['values'] = _to_ndarray(values)
kwargs.setdefault('colorbar_kw', {}).update(colorbar_kw)
# The basic x and y settings
if not projection:
# Apply label
# NOTE: Do not overwrite existing labels!
sx, sy = 'xy' if vert else 'yx'
sy = sx if hist else sy # histogram 'y' values end up along 'x' axis
kw_format = {}
if autoformat: # 'y' axis
title = _get_title(ys[0])
if title and not getattr(self, f'get_{sy}label')():
kw_format[sy + 'label'] = title
if autoformat and not hist: # 'x' axis
title = _get_title(x)
if title and not getattr(self, f'get_{sx}label')():
kw_format[sx + 'label'] = title
# Handle string-type coordinates
if not pie and not hist:
x, kw_format = _parse_string_coords(x, which=sx, **kw_format)
if not hist and not box and not pie:
*ys, kw_format = _parse_string_coords(*ys, which=sy, **kw_format)
if not hist and not scatter and not parametric and x.ndim == 1 and x.size > 1 and x[1] < x[0]: # noqa: E501
kw_format[sx + 'reverse'] = True # auto reverse
# Appply
if kw_format:
self.format(**kw_format)
# Finally strip metadata
# WARNING: Most methods that accept 2D arrays use columns of data, but when
# pandas DataFrame specifically is passed to hist, boxplot, or violinplot, rows
# of data assumed! Converting to ndarray necessary.
return _to_ndarray(x), *map(_to_ndarray, ys), kwargs
def _basemap_1d(x, *ys, projection=None):
"""
Fix basemap geographic 1D data arrays.
"""
xmin, xmax = projection.lonmin, projection.lonmax
x_orig, ys_orig = x, ys
ys = []
for y_orig in ys_orig:
x, y = _fix_span(*_fix_coords(x_orig, y_orig), xmin, xmax)
ys.append(y)
return x, *ys
def _fix_coords(x, y):
"""
Ensure longitudes are monotonic and make `~numpy.ndarray` copies so the
contents can be modified. Ignores 2D coordinate arrays.
"""
if x.ndim != 1 or all(x < x[0]): # skip 2D arrays and monotonic backwards data
return x, y
lon1 = x[0]
filter_ = x < lon1
while filter_.sum():
filter_ = x < lon1
x[filter_] += 360
return x, y
def _fix_span(x, y, xmin, xmax):
"""
Ensure data for basemap plots is restricted between the minimum and
maximum longitude of the projection. Input is the ``x`` and ``y``
coordinates. The ``y`` coordinates are rolled along the rightmost axis.
"""
if x.ndim != 1:
return x, y
# Roll in same direction if some points on right-edge extend
# more than 360 above min longitude; *they* should be on left side
lonroll = np.where(x > xmin + 360)[0] # tuple of ids
if lonroll.size: # non-empty
roll = x.size - lonroll.min()
x = np.roll(x, roll)
y = np.roll(y, roll, axis=-1)
x[:roll] -= 360 # make monotonic
# Set NaN where data not in range xmin, xmax. Must be done
# for regional smaller projections or get weird side-effects due
# to having valid data way outside of the map boundaries
y = y.copy()
if x.size - 1 == y.shape[-1]: # test western/eastern grid cell edges
y[..., (x[1:] < xmin) | (x[:-1] > xmax)] = np.nan
elif x.size == y.shape[-1]: # test the centers and pad by one for safety
where = np.where((x < xmin) | (x > xmax))[0]
y[..., where[1:-1]] = np.nan
return x, y
@docstring.add_snippets
def standardize_1d(self, *args, data=None, autoformat=None, **kwargs):
"""
Interpret positional arguments for all "1D" plotting commands so the syntax
is consistent. The arguments are standardized as follows:
* If a 2D array is passed, the corresponding plot command is called for
each column of data (except for ``boxplot`` and ``violinplot``, in which
case each column is interpreted as a distribution).
* If *x* and *y* or *latitude* and *longitude* coordinates were not provided,
and a `~pandas.DataFrame` or `~xarray.DataArray`, we try to infer them from
the metadata. Otherwise, ``np.arange(0, data.shape[0])`` is used.
Important
---------
This function wraps {methods}
Parameters
----------
%(axes.autoformat)s
See also
--------
apply_cycle
indicate_error
"""
method = kwargs.pop('_method')
name = method.__name__
bar = name in ('bar', 'barh')
box = name in ('boxplot', 'violinplot')
hist = name in ('hist',)
parametric = name in ('parametric',)
onecoord = name in ('hist',)
twocoords = name in ('vlines', 'hlines', 'fill_between', 'fill_betweenx')
allowempty = name in ('fill', 'plot', 'plotx',)
autoformat = _not_none(autoformat, rc['autoformat'])
# Find and translate input args
args = list(args)
keys = KEYWORD_TO_POSITIONAL_INSERT.get(name, {})
for idx, key in enumerate(keys):
if key in kwargs:
args.insert(idx, kwargs.pop(key))
if data is not None:
args = _get_data(data, *args)
if not args:
if allowempty:
return [] # match matplotlib behavior
else:
raise TypeError('Positional arguments are required.')
# Translate between 'orientation' and 'vert' for flexibility
# NOTE: Users should only pass these to hist, boxplot, or violinplot. To change
# the bar plot orientation users should use 'bar' and 'barh'. Internally,
# matplotlib has a single central bar function whose behavior is configured
# by the 'orientation' key, so critical not to strip the argument here.
vert = kwargs.pop('vert', None)
orientation = kwargs.pop('orientation', None)
if orientation is not None:
vert = _not_none(vert=vert, orientation=(orientation == 'vertical'))
if orientation not in (None, 'horizontal', 'vertical'):
raise ValueError("Orientation must be either 'horizontal' or 'vertical'.")
if vert is None:
pass
elif box:
kwargs['vert'] = vert
elif bar or hist:
kwargs['orientation'] = 'vertical' if vert else 'horizontal' # used internally
else:
raise TypeError("Unexpected keyword argument(s) 'vert' and 'orientation'.")
# Parse positional args
if parametric and len(args) == 3: # allow positional values
kwargs['values'] = args.pop(2)
if parametric and 'c' in kwargs: # handle aliases
kwargs['values'] = kwargs.pop('c')
if onecoord or len(args) == 1: # allow hist() positional bins
x, ys, args = None, args[:1], args[1:]
elif twocoords:
x, ys, args = args[0], args[1:3], args[3:]
else:
x, ys, args = args[0], args[1:2], args[2:]
if x is not None:
x = _to_arraylike(x)
ys = tuple(map(_to_arraylike, ys))
# Append remaining positional args
# NOTE: This is currently just used for bar and barh. More convenient to pass
# 'width' as positional so that matplotlib native 'barh' sees it as 'height'.
keys = KEYWORD_TO_POSITIONAL_APPEND.get(name, {})
for key in keys:
if key in kwargs:
args.append(kwargs.pop(key))
# Automatic formatting and coordinates
# NOTE: For 'hist' the 'x' coordinate remains None then is ignored in apply_cycle.
x, *ys, kwargs = _auto_format_1d(
self, x, *ys, name=name, autoformat=autoformat, **kwargs
)
# Ensure data is monotonic and falls within map bounds
if getattr(self, 'name', None) == 'proplot_basemap' and kwargs.get('latlon', None):
x, *ys = _basemap_1d(x, *ys, projection=self.projection)
# Call function
if box:
kwargs.setdefault('positions', x) # *this* is how 'x' is passed to boxplot
return method(self, x, *ys, *args, **kwargs)
def _auto_format_2d(self, x, y, *zs, name=None, order='C', autoformat=False, **kwargs):
"""
Try to retrieve default coordinates from array-like objects and apply default
formatting. Also apply optional transpose and update the keyword arguments.
"""
# Retrieve coordinates
allow1d = name in ('barbs', 'quiver') # these also allow 1D data
projection = hasattr(self, 'projection')
if x is None and y is None:
z = zs[0]
if z.ndim == 1:
x = _get_labels(z, axis=0)
y = np.zeros(z.shape) # default barb() and quiver() behavior in matplotlib
else:
x = _get_labels(z, axis=1)
y = _get_labels(z, axis=0)
if order == 'F':
x, y = y, x
# Check coordinate and data shapes
shapes = tuple(z.shape for z in zs)
if any(len(_) != 2 and not (allow1d and len(_) == 1) for _ in shapes):
raise ValueError(f'Data arrays must be 2d, but got shapes {shapes}.')
shapes = set(shapes)
if len(shapes) > 1:
raise ValueError(f'Data arrays must have same shape, but got shapes {shapes}.')
if any(_.ndim not in (1, 2) for _ in (x, y)):
raise ValueError('x and y coordinates must be 1d or 2d.')
if x.ndim != y.ndim:
raise ValueError('x and y coordinates must have same dimensionality.')
if order == 'F': # TODO: double check this
x, y = x.T, y.T # in case they are 2-dimensional
zs = tuple(z.T for z in zs)
# The labels and XY axis settings
if not projection:
# Apply labels
# NOTE: Do not overwrite existing labels!
kw_format = {}
if autoformat:
for s, d in zip('xy', (x, y)):
title = _get_title(d)
if title and not getattr(self, f'get_{s}label')():
kw_format[s + 'label'] = title
# Handle string-type coordinates
x, kw_format = _parse_string_coords(x, which='x', **kw_format)
y, kw_format = _parse_string_coords(y, which='y', **kw_format)
for s, d in zip('xy', (x, y)):
if d.size > 1 and d.ndim == 1 and _to_ndarray(d)[1] < _to_ndarray(d)[0]:
kw_format[s + 'reverse'] = True
# Apply formatting
if kw_format:
self.format(**kw_format)
# Default colorbar label
# WARNING: This will fail for any funcs wrapped by standardize_2d but not
# wrapped by apply_cmap. So far there are none.
if autoformat:
kwargs.setdefault('colorbar_kw', {})
title = _get_title(zs[0])
if title and True:
kwargs['colorbar_kw'].setdefault('label', title)
# Finally strip metadata
return _to_ndarray(x), _to_ndarray(y), *map(_to_ndarray, zs), kwargs
def _add_poles(y, z):
"""
Add data points on the poles as the average of highest latitude data.
"""
# Get means
with np.errstate(all='ignore'):
p1 = z[0, :].mean() # pole 1, make sure is not 0D DataArray!
p2 = z[-1, :].mean() # pole 2
if hasattr(p1, 'item'):
p1 = np.asscalar(p1) # happens with DataArrays
if hasattr(p2, 'item'):
p2 = np.asscalar(p2)
# Concatenate
ps = (-90, 90) if (y[0] < y[-1]) else (90, -90)
z1 = np.repeat(p1, z.shape[1])[None, :]
z2 = np.repeat(p2, z.shape[1])[None, :]
y = ma.concatenate((ps[:1], y, ps[1:]))
z = ma.concatenate((z1, z, z2), axis=0)
return y, z
def _enforce_centers(x, y, z):
"""
Enforce that coordinates are centers. Convert from edges if possible.
"""
xlen, ylen = x.shape[-1], y.shape[0]
if z.ndim == 2 and z.shape[1] == xlen - 1 and z.shape[0] == ylen - 1:
# Get centers given edges
if all(z.ndim == 1 and z.size > 1 and _is_number(z) for z in (x, y)):
x = 0.5 * (x[1:] + x[:-1])
y = 0.5 * (y[1:] + y[:-1])
else:
if (
x.ndim == 2 and x.shape[0] > 1 and x.shape[1] > 1
and _is_number(x)
):
x = 0.25 * (x[:-1, :-1] + x[:-1, 1:] + x[1:, :-1] + x[1:, 1:])
if (
y.ndim == 2 and y.shape[0] > 1 and y.shape[1] > 1
and _is_number(y)
):
y = 0.25 * (y[:-1, :-1] + y[:-1, 1:] + y[1:, :-1] + y[1:, 1:])
elif z.shape[-1] != xlen or z.shape[0] != ylen:
# Helpful error message
raise ValueError(
f'Input shapes x {x.shape} and y {y.shape} '
f'must match z centers {z.shape} '
f'or z borders {tuple(i+1 for i in z.shape)}.'
)
return x, y
def _enforce_edges(x, y, z):
"""
Enforce that coordinates are edges. Convert from centers if possible.
"""
xlen, ylen = x.shape[-1], y.shape[0]
if z.ndim == 2 and z.shape[1] == xlen and z.shape[0] == ylen:
# Get edges given centers
if all(z.ndim == 1 and z.size > 1 and _is_number(z) for z in (x, y)):
x = edges(x)
y = edges(y)
else:
if (
x.ndim == 2 and x.shape[0] > 1 and x.shape[1] > 1
and _is_number(x)
):
x = edges2d(x)
if (
y.ndim == 2 and y.shape[0] > 1 and y.shape[1] > 1
and _is_number(y)
):
y = edges2d(y)
elif z.shape[-1] != xlen - 1 or z.shape[0] != ylen - 1:
# Helpful error message
raise ValueError(
f'Input shapes x {x.shape} and y {y.shape} must match '
f'array centers {z.shape} or '
f'array borders {tuple(i + 1 for i in z.shape)}.'
)
return x, y
def _cartopy_2d(x, y, *zs, globe=False):
"""
Fix cartopy 2D geographic data arrays.
"""
# Fix coordinates
x, y = _fix_coords(x, y)
# Fix data
x_orig, y_orig, zs_orig = x, y, zs
zs = []
for z_orig in zs_orig:
# Bail for 2D coordinates
if not globe or x_orig.ndim > 1 or y_orig.ndim > 1:
zs.append(z_orig)
continue
# Fix holes over poles by *interpolating* there
y, z = _add_poles(y_orig, z_orig)
# Fix seams by ensuring circular coverage (cartopy can plot over map edges)
if x_orig[0] % 360 != (x_orig[-1] + 360) % 360:
x = ma.concatenate((x_orig, [x_orig[0] + 360]))
z = ma.concatenate((z, z[:, :1]), axis=1)
zs.append(z)
return x, y, *zs
def _basemap_2d(x, y, *zs, globe=False, projection=None):
"""
Fix basemap 2D geographic data arrays.
"""
# Fix coordinates
x, y = _fix_coords(x, y)
# Fix data
xmin, xmax = projection.lonmin, projection.lonmax
x_orig, y_orig, zs_orig = x, y, zs
zs = []
for z_orig in zs_orig:
# Ensure data is within map bounds
x, z_orig = _fix_span(x_orig, z_orig, xmin, xmax)
# Bail for 2D coordinates
if not globe or x_orig.ndim > 1 or y_orig.ndim > 1:
zs.append(z_orig)
continue
# Fix holes over poles by *interpolating* there
y, z = _add_poles(y_orig, z_orig)
# Fix seams at map boundary
if x[0] == xmin and x.size - 1 == z.shape[1]: # scenario 1
# Edges (e.g. pcolor) fit perfectly against seams. Size is unchanged.
pass
elif x.size - 1 == z.shape[1]: # scenario 2
# Edges (e.g. pcolor) do not fit perfectly. Size augmented by 1.
x = ma.append(xmin, x)
x[-1] = xmin + 360
z = ma.concatenate((z[:, -1:], z), axis=1)
elif x.size == z.shape[1]: # scenario 3
# Centers (e.g. contour) must be interpolated to edge. Size augmented by 2.
xi = np.array([x[-1], x[0] + 360])
if xi[0] == xi[1]: # impossible to interpolate
pass
else:
zq = ma.concatenate((z[:, -1:], z[:, :1]), axis=1)
xq = xmin + 360
zq = (zq[:, :1] * (xi[1] - xq) + zq[:, 1:] * (xq - xi[0])) / (xi[1] - xi[0]) # noqa: E501
x = ma.concatenate(([xmin], x, [xmin + 360]))
z = ma.concatenate((zq, z, zq), axis=1)
else:
raise ValueError('Unexpected shapes of coordinates or data arrays.')
zs.append(z)
# Convert coordinates
if x.ndim == 1 and y.ndim == 1:
x, y = np.meshgrid(x, y)
x, y = projection(x, y)
return x, y, *zs
@docstring.add_snippets
def standardize_2d(
self, *args, data=None, autoformat=None, order='C', globe=False, **kwargs
):
"""
Interpret positional arguments for all "2D" plotting commands so the syntax is
consistent. The arguments are standardized as follows:
* If *x* and *y* or *latitude* and *longitude* coordinates were not
provided, and a `~pandas.DataFrame` or `~xarray.DataArray` is passed, we
try to infer them from the metadata. Otherwise, ``np.arange(0, data.shape[0])``
and ``np.arange(0, data.shape[1])`` are used.
* For ``pcolor`` and ``pcolormesh``, coordinate *edges* are calculated
if *centers* were provided. This uses the `~proplot.utils.edges` and
`~propot.utils.edges2d` functions. For all other methods, coordinate
*centers* are calculated if *edges* were provided.
Important
---------
This function wraps {methods}
Parameters
----------
%(axes.autoformat)s
order : {{'C', 'F'}}, optional
If ``'C'``, arrays should be shaped ``(y, x)``. If ``'F'``, arrays
should be shaped ``(x, y)``. Default is ``'C'``.
globe : bool, optional
Whether to ensure global coverage for `~proplot.axes.GeoAxes` plots.
Default is ``False``. When set to ``True`` this does the following:
#. Interpolates input data to the North and South poles by setting the data
values at the poles to the mean from latitudes nearest each pole.
#. Makes meridional coverage "circular", i.e. the last longitude coordinate
equals the first longitude coordinate plus 360\N{DEGREE SIGN}.
#. For `~proplot.axes.BasemapAxes`, 1D longitude vectors are also cycled to
fit within the map edges. For example, if the projection central longitude
is 90\N{DEGREE SIGN}, the data is shifted so that it spans
-90\N{DEGREE SIGN} to 270\N{DEGREE SIGN}.
See also
--------
apply_cmap
proplot.utils.edges
proplot.utils.edges2d
"""
method = kwargs.pop('_method')
name = method.__name__
pcolor = name in ('pcolor', 'pcolormesh', 'pcolorfast')
allow1d = name in ('barbs', 'quiver') # these also allow 1D data
autoformat = _not_none(autoformat, rc['autoformat'])
# Find and translate input args
if data is not None:
args = _get_data(data, *args)
if not args:
raise TypeError('Positional arguments are required.')
# Parse input args
if len(args) > 2:
x, y, *args = args
else:
x = y = None
if x is not None:
x = _to_arraylike(x)
if y is not None:
y = _to_arraylike(y)
zs = tuple(map(_to_arraylike, args))
# Automatic formatting
x, y, *zs, kwargs = _auto_format_2d(
self, x, y, *zs, name=name, order=order, autoformat=autoformat, **kwargs
)
# Standardize coordinates
if pcolor:
x, y = _enforce_edges(x, y, zs[0])
else:
x, y = _enforce_centers(x, y, zs[0])
# Cartopy projection axes
if (
not allow1d and getattr(self, 'name', None) == 'proplot_cartopy'
and isinstance(kwargs.get('transform', None), PlateCarree)
):
x, y, *zs = _cartopy_2d(x, y, *zs, globe=globe)
# Basemap projection axes
elif (
not allow1d and getattr(self, 'name', None) == 'proplot_basemap'
and kwargs.get('latlon', None)
):
x, y, *zs = _basemap_2d(x, y, *zs, globe=globe, projection=self.projection)
kwargs['latlon'] = False
# Call function
return method(self, x, y, *zs, **kwargs)
def _get_error_data(
data, y, errdata=None, stds=None, pctiles=None,
stds_default=None, pctiles_default=None,
reduced=True, absolute=False, label=False,
):
"""
Return values that can be passed to the `~matplotlib.axes.Axes.errorbar`
`xerr` and `yerr` keyword args.
"""
# Parse stds arguments
# NOTE: Have to guard against "truth value of an array is ambiguous" errors
if stds is True:
stds = stds_default
elif stds is False or stds is None:
stds = None
else:
stds = np.atleast_1d(stds)
if stds.size == 1:
stds = sorted((-stds.item(), stds.item()))
elif stds.size != 2:
raise ValueError('Expected scalar or length-2 stdev specification.')
# Parse pctiles arguments
if pctiles is True:
pctiles = pctiles_default
elif pctiles is False or pctiles is None:
pctiles = None
else:
pctiles = np.atleast_1d(pctiles)
if pctiles.size == 1:
delta = (100 - pctiles.item()) / 2.0
pctiles = sorted((delta, 100 - delta))
elif pctiles.size != 2:
raise ValueError('Expected scalar or length-2 pctiles specification.')
# Incompatible settings
if stds is not None and pctiles is not None:
warnings._warn_proplot(
'You passed both a standard deviation range and a percentile range for '
'drawing error indicators. Using the former.'
)
pctiles = None
if not reduced and (stds is not None or pctiles is not None):
raise ValueError(
'To automatically compute standard deviations or percentiles on columns '
'of data you must pass means=True or medians=True.'
)
if reduced and errdata is not None:
stds = pctiles = None
warnings._warn_proplot(
'You explicitly provided the error bounds but also requested '
'automatically calculating means or medians on data columns. '
'It may make more sense to use the "stds" or "pctiles" keyword args '
'and have *proplot* calculate the error bounds.'
)
# Compute error data in format that can be passed to maxes.Axes.errorbar()
# NOTE: Include option to pass symmetric deviation from central points
if errdata is not None:
# Manual error data
if y.ndim != 1:
raise ValueError(
'errdata with 2D y coordinates is not yet supported.'
)
label_default = 'uncertainty'
err = _to_ndarray(errdata)
if (
err.ndim not in (1, 2)
or err.shape[-1] != y.size
or err.ndim == 2 and err.shape[0] != 2
):
raise ValueError(
f'errdata must have shape (2, {y.shape[-1]}), but got {err.shape}.'
)
if err.ndim == 1:
abserr = err
err = np.empty((2, err.size))
err[0, :] = y - abserr # translated back to absolute deviations below
err[1, :] = y + abserr
elif stds is not None:
# Standard deviations
label_default = fr'{abs(stds[1])}$\sigma$ range'
err = y + np.nanstd(data, axis=0)[None, :] * _to_ndarray(stds)[:, None]
elif pctiles is not None:
# Percentiles
label_default = f'{pctiles[1] - pctiles[0]}% range'
err = np.nanpercentile(data, pctiles, axis=0)
else:
raise ValueError('You must provide error bounds.')
# Return label possibly
if label is True:
label = label_default
elif not label:
label = None
# Make relative data for maxes.Axes.errorbar() ingestion
if not absolute:
err = err - y
err[0, :] *= -1 # absolute deviations from central points
# Return data with legend entry
return err, label
def indicate_error(
self, *args,
mean=None, means=None, median=None, medians=None,
barstd=None, barstds=None, barpctile=None, barpctiles=None, bardata=None,
boxstd=None, boxstds=None, boxpctile=None, boxpctiles=None, boxdata=None,
shadestd=None, shadestds=None, shadepctile=None, shadepctiles=None, shadedata=None,
fadestd=None, fadestds=None, fadepctile=None, fadepctiles=None, fadedata=None,
boxmarker=None, boxmarkercolor='white',
boxcolor=None, barcolor=None, shadecolor=None, fadecolor=None,
shadelabel=False, fadelabel=False, shadealpha=0.4, fadealpha=0.2,
boxlinewidth=None, boxlw=None, barlinewidth=None, barlw=None, capsize=None,
boxzorder=2.5, barzorder=2.5, shadezorder=1.5, fadezorder=1.5,
**kwargs
):
"""
Support on-the-fly error bars and error shading. Use the input error data or
optionally interpret columns of data as distributions, pass the column
means or medians to the relevant plotting command, and draw error
indications from the specified standard deviation or percentile range.
Important
---------
This function wraps {methods}
Parameters
----------
*args
The input data.
mean, means : bool, optional
Whether to plot the means of each column in the input data. If no other
arguments specified, this also sets ``barstd=True`` (and ``boxstd=True``
for violin plots).
median, medians : bool, optional
Whether to plot the medians of each column in the input data. If no other
arguments specified, this also sets ``barstd=True`` (and ``boxstd=True``
for violin plots).
barstd, barstds : float, (float, float), or bool, optional
Standard deviation multiples for *thin error bars* with optional whiskers
(i.e. caps). If scalar, then +/- that number is used. If ``True``, the
default of +/-3 standard deviations is used. This argument is only valid
if `means` or `medians` is ``True``.
barpctile, barpctiles : float, (float, float) or bool, optional
As with `barstd`, but instead using *percentiles* for the error bars. The
percentiles are calculated with `numpy.percentile`. If scalar, that width
surrounding the 50th percentile is used (e.g. ``90`` shows the 5th to 95th
percentiles). If ``True``, the default percentile range of 0 to 100 is
used. This argument is only valid if `means` or `medians` is ``True``.
bardata : 2 x N array or 1D array, optional
If shape is 2 x N these are the lower and upper bounds for the thin error bars.
If array is 1D these are the absolute, symmetric deviations from the central
points. This should be used if `means` and `medians` are both ``False`` (i.e.
you did not provide dataset columns from which statistical properties can be
calculated automatically).
boxstd, boxstds, boxpctile, boxpctiles, boxdata : optional
As with `barstd`, `barpctile`, and `bardata`, but for *thicker error bars*
representing a smaller interval than the thin error bars. If `boxstds` is
``True``, the default standard deviation range of +/-1 is used. If `boxpctiles`
is ``True``, the default percentile range of 25 to 75 is used (i.e. the
interquartile range). When "boxes" and "bars" are combined, this has the effect
of drawing miniature box-and-whisker plots.
shadestd, shadestds, shadepctile, shadepctiles, shadedata : optional
As with `barstd`, `barpctile`, and `bardata`, but using *shading* to indicate
the error range. If `shadestds` is ``True``, the default standard deviation
range of +/-2 is used. If `shadepctiles` is ``True``, the default
percentile range of 10 to 90 is used. Shading is generally useful for
`~matplotlib.axes.Axes.plot` plots.
fadestd, fadestds, fadepctile, fadepctiles, fadedata : optional
As with `shadestd`, `shadepctile`, and `shadedata`, but for an additional,
more faded, *secondary* shaded region. If `fadestds` is ``True``, the default
standard deviation range of +/-3 is used. If `fadepctiles` is ``True``,
the default percentile range of 0 to 100 is used.
barcolor, boxcolor, shadecolor, fadecolor : color-spec, optional
Colors for the different error indicators. For error bars, the default is
``'k'``. For shading, the default behavior is to inherit color from the
primary `~matplotlib.artist.Artist`.
shadelabel, fadelabel : bool or str, optional
Labels for the shaded regions to be used as separate legend entries. To toggle
labels "on" and apply a *default* label, use e.g. ``shadelabel=True``. To apply
a *custom* label, use e.g. ``shadelabel='label'``. Otherwise, the shading is
drawn underneath the line and/or marker in the legend entry.
barlinewidth, boxlinewidth, barlw, boxlw : float, optional
Line widths for the thin and thick error bars, in points. The defaults
are ``barlw=0.8`` and ``boxlw=4 * barlw``.
boxmarker : bool, optional
Whether to draw a small marker in the middle of the box denoting the mean or
median position. Ignored if `boxes` is ``False``.
boxmarkercolor : color-spec, optional
Color for the `boxmarker` marker. Default is ``'w'``.
capsize : float, optional
The cap size for thin error bars in points.
barzorder, boxzorder, shadezorder, fadezorder : float, optional
The "zorder" for the thin error bars, thick error bars, and shading.
Returns
-------
h, err1, err2, ...
The original plot object and the error bar or shading objects.
"""
method = kwargs.pop('_method')
name = method.__name__
bar = name in ('bar',)
flip = name in ('barh', 'plotx', 'scatterx') or kwargs.get('vert') is False
plot = name in ('plot', 'scatter')
violin = name in ('violinplot',)
means = _not_none(mean=mean, means=means)
medians = _not_none(median=median, medians=medians)
barstds = _not_none(barstd=barstd, barstds=barstds)
boxstds = _not_none(boxstd=boxstd, boxstds=boxstds)
shadestds = _not_none(shadestd=shadestd, shadestds=shadestds)
fadestds = _not_none(fadestd=fadestd, fadestds=fadestds)
barpctiles = _not_none(barpctile=barpctile, barpctiles=barpctiles)
boxpctiles = _not_none(boxpctile=boxpctile, boxpctiles=boxpctiles)
shadepctiles = _not_none(shadepctile=shadepctile, shadepctiles=shadepctiles)
fadepctiles = _not_none(fadepctile=fadepctile, fadepctiles=fadepctiles)
bars = any(_ is not None for _ in (bardata, barstds, barpctiles))
boxes = any(_ is not None for _ in (boxdata, boxstds, boxpctiles))
shade = any(_ is not None for _ in (shadedata, shadestds, shadepctiles))
fade = any(_ is not None for _ in (fadedata, fadestds, fadepctiles))
if means and medians:
warnings._warn_proplot('Cannot have both means=True and medians=True. Using former.') # noqa: E501
# Get means or medians while preserving metadata for autoformat
# TODO: Permit 3D array with error dimension coming first
# NOTE: Previously went to great pains to preserve metadata but now retrieval
# of default legend handles moved to _auto_format_1d so can strip.
x, y, *args = args
data = y
if means or medians:
if data.ndim != 2:
raise ValueError(f'Expected 2D array for means=True. Got {data.ndim}D.')
if not any((bars, boxes, shade, fade)):
bars = barstds = True
if violin:
boxes = boxstds = True
if means:
y = np.nanmean(data, axis=0)
elif medians:
y = np.nanpercentile(data, 50, axis=0)
# Parse keyword args and apply defaults
# NOTE: Should not use plot() 'linewidth' for bar elements
# NOTE: violinplot_extras passes some invalid keyword args with expectation
# that indicate_error pops them and uses them for error bars.
getter = kwargs.pop if violin else kwargs.get if bar else lambda *args: None
boxmarker = _not_none(boxmarker, True if violin else False)
capsize = _not_none(capsize, 3.0)
linewidth = _not_none(getter('linewidth', None), getter('lw', None), 1.0)
barlinewidth = _not_none(barlinewidth=barlinewidth, barlw=barlw, default=linewidth)
boxlinewidth = _not_none(boxlinewidth=boxlinewidth, boxlw=boxlw, default=4 * barlinewidth) # noqa: E501
edgecolor = _not_none(getter('edgecolor', None), 'k')
barcolor = _not_none(barcolor, edgecolor)
boxcolor = _not_none(boxcolor, barcolor)
shadecolor_infer = shadecolor is None
fadecolor_infer = fadecolor is None
shadecolor = _not_none(shadecolor, kwargs.get('color'), kwargs.get('facecolor'), edgecolor) # noqa: E501
fadecolor = _not_none(fadecolor, shadecolor)
# Draw dark and light shading
getter = kwargs.pop if plot else kwargs.get
eobjs = []
fill = self.fill_betweenx if flip else self.fill_between
if fade:
edata, label = _get_error_data(
data, y, errdata=fadedata, stds=fadestds, pctiles=fadepctiles,
stds_default=(-3, 3), pctiles_default=(0, 100), absolute=True,
reduced=means or medians, label=fadelabel,
)
eobj = fill(
x, *edata, linewidth=0, label=label,
color=fadecolor, alpha=fadealpha, zorder=fadezorder,
)
eobjs.append(eobj)
if shade:
edata, label = _get_error_data(
data, y, errdata=shadedata, stds=shadestds, pctiles=shadepctiles,
stds_default=(-2, 2), pctiles_default=(10, 90), absolute=True,
reduced=means or medians, label=shadelabel,
)
eobj = fill(
x, *edata, linewidth=0, label=label,
color=shadecolor, alpha=shadealpha, zorder=shadezorder,
)
eobjs.append(eobj)
# Draw thin error bars and thick error boxes
sy = 'x' if flip else 'y' # yerr
ex, ey = (y, x) if flip else (x, y)
if boxes:
edata, _ = _get_error_data(
data, y, errdata=boxdata, stds=boxstds, pctiles=boxpctiles,
stds_default=(-1, 1), pctiles_default=(25, 75),
reduced=means or medians,
)
if boxmarker:
self.scatter(
ex, ey, s=boxlinewidth, marker='o', color=boxmarkercolor, zorder=5
)
eobj = self.errorbar(
ex, ey, color=boxcolor, linewidth=boxlinewidth, linestyle='none',
capsize=0, zorder=boxzorder, **{sy + 'err': edata}
)
eobjs.append(eobj)
if bars: # now impossible to make thin bar width different from cap width!
edata, _ = _get_error_data(
data, y, errdata=bardata, stds=barstds, pctiles=barpctiles,
stds_default=(-3, 3), pctiles_default=(0, 100),
reduced=means or medians,
)
eobj = self.errorbar(
ex, ey, color=barcolor, linewidth=barlinewidth, linestyle='none',
markeredgecolor=barcolor, markeredgewidth=barlinewidth,
capsize=capsize, zorder=barzorder, **{sy + 'err': edata}
)
eobjs.append(eobj)
# Call main function
# NOTE: Provide error objects for inclusion in legend, but *only* provide
# the shading. Never want legend entries for error bars.
xy = (x, data) if violin else (x, y)
kwargs.setdefault('_errobjs', eobjs[:int(shade + fade)])
res = obj = method(self, *xy, *args, **kwargs)
# Apply inferrred colors to objects
i = 0
if isinstance(res, tuple): # pull out patch from e.g. BarContainers
obj = res[0]
for b, infer in zip((fade, shade), (fadecolor_infer, shadecolor_infer)):
if not b or not infer:
continue
if hasattr(obj, 'get_facecolor'):
color = obj.get_facecolor()
elif hasattr(obj, 'get_color'):
color = obj.get_color()
else:
color = None
if color is not None:
eobjs[i].set_facecolor(color)
i += 1
# Return objects
# NOTE: For now 'errobjs' can only be returned with 1D y coordinates
# NOTE: Avoid expanding matplolib collections that are list subclasses here
if not eobjs:
return res
elif isinstance(res, tuple) and not isinstance(res, mcontainer.Container):
return ((*res, *eobjs),) # for plot()
else:
return (res, *eobjs)
def _apply_plot(self, *args, cmap=None, values=None, **kwargs):
"""
Apply horizontal or vertical lines.
"""
# Deprecated functionality
if cmap is not None:
warnings._warn_proplot(
'Drawing "parametric" plots with ax.plot(x, y, values=values, cmap=cmap) '
'is deprecated and will be removed in the next major release. Please use '
'ax.parametric(x, y, values, cmap=cmap) instead.'
)
return self.parametric(*args, cmap=cmap, values=values, **kwargs)
# Plot line(s)
method = kwargs.pop('_method')
name = method.__name__
sx = 'y' if 'x' in name else 'x' # i.e. plotx
objs = []
args = list(args)
while args:
# Support e.g. x1, y1, fmt, x2, y2, fmt2 input
# NOTE: Copied from _process_plot_var_args.__call__ to avoid relying
# on public API. ProPlot already supports passing extra positional
# arguments beyond x, y so can feed (x1, y1, fmt) through wrappers.
# Instead represent (x2, y2, fmt, ...) as successive calls to plot().
iargs, args = args[:2], args[2:]
if args and isinstance(args[0], str):
iargs.append(args[0])
args = args[1:]
# Call function
iobjs = method(self, *iargs, values=values, **kwargs)
# Add sticky edges
# NOTE: Skip edges when error bars present or caps are flush against axes edge
lines = all(isinstance(obj, mlines.Line2D) for obj in iobjs)
if lines and not getattr(self, '_no_sticky_edges', False):
for obj in iobjs:
data = getattr(obj, 'get_' + sx + 'data')()
if not data.size:
continue
convert = getattr(self, 'convert_' + sx + 'units')
edges = getattr(obj.sticky_edges, sx)
edges.append(convert(min(data)))
edges.append(convert(max(data)))
objs.extend(iobjs)
return tuple(objs)
def _plot_extras(self, *args, **kwargs):
"""
Pre-processing for `plot`.
"""
return _apply_plot(self, *args, **kwargs)
def _plotx_extras(self, *args, **kwargs):
"""
Pre-processing for `plotx`.
"""
# NOTE: The 'horizontal' orientation will be inferred by downstream
# wrappers using the function name.
return _apply_plot(self, *args, **kwargs)
def _stem_extras(
self, *args, linefmt=None, basefmt=None, markerfmt=None, **kwargs
):
"""
Make `use_line_collection` the default to suppress warning message.
"""
# Set default colors
# NOTE: 'fmt' strings can only be 2 to 3 characters and include color shorthands
# like 'r' or cycle colors like 'C0'. Cannot use full color names.
# NOTE: Matplotlib defaults try to make a 'reddish' color the base and 'bluish'
# color the stems. To make this more robust we temporarily replace the cycler
# with a negcolor/poscolor cycler.
method = kwargs.pop('_method')
fmts = (linefmt, basefmt, markerfmt)
if not any(isinstance(_, str) and re.match(r'\AC[0-9]', _) for _ in fmts):
cycle = constructor.Cycle((rc['negcolor'], rc['poscolor']), name='_neg_pos')
context = rc.context({'axes.prop_cycle': cycle})
else:
context = _dummy_context()
# Add stem lines with bluish stem color and reddish base color
with context:
kwargs['linefmt'] = linefmt = _not_none(linefmt, 'C0-')
kwargs['basefmt'] = _not_none(basefmt, 'C1-')
kwargs['markerfmt'] = _not_none(markerfmt, linefmt[:-1] + 'o')
kwargs.setdefault('use_line_collection', True)
try:
return method(self, *args, **kwargs)
except TypeError:
del kwargs['use_line_collection'] # older version
return method(self, *args, **kwargs)
def _parametric_extras(self, x, y, c=None, *, values=None, interp=0, **kwargs):
"""
Interpolate the array.
"""
# Parse input
# NOTE: Critical to put this here instead of parametric() so that the
# interpolated 'values' are used to select colormap levels in apply_cmap.
method = kwargs.pop('_method')
c = _not_none(c=c, values=values)
if c is None:
raise ValueError('Values must be provided.')
c = _to_ndarray(c)
ndim = tuple(_.ndim for _ in (x, y, c))
size = tuple(_.size for _ in (x, y, c))
if any(_ != 1 for _ in ndim):
raise ValueError(f'Input coordinates must be 1D. Instead got dimensions {ndim}.') # noqa: E501
if any(_ != size[0] for _ in size):
raise ValueError(f'Input coordinates must have identical size. Instead got sizes {size}.') # noqa: E501
# Interpolate values to allow for smooth gradations between values
# (interp=False) or color switchover halfway between points
# (interp=True). Then optionally interpolate the colormap values.
# NOTE: The 'extras' wrapper handles input before ingestion by other wrapper
# functions. *This* method is analogous to a native matplotlib method.
if interp > 0:
x_orig, y_orig, v_orig = x, y, c
x, y, c = [], [], []
for j in range(x_orig.shape[0] - 1):
idx = slice(None)
if j + 1 < x_orig.shape[0] - 1:
idx = slice(None, -1)
x.extend(np.linspace(x_orig[j], x_orig[j + 1], interp + 2)[idx].flat)
y.extend(np.linspace(y_orig[j], y_orig[j + 1], interp + 2)[idx].flat)
c.extend(np.linspace(v_orig[j], v_orig[j + 1], interp + 2)[idx].flat) # noqa: E501
x, y, c = np.array(x), np.array(y), np.array(c)
return method(self, x, y, values=c, **kwargs)
def _check_negpos(name, **kwargs):
"""
Issue warnings if we are ignoring arguments for "negpos" pplt.
"""
for key, arg in kwargs.items():
if arg is None:
continue
warnings._warn_proplot(
f'{name}() argument {key}={arg!r} is incompatible with '
'negpos=True. Ignoring.'
)
def _apply_lines(
self, *args,
stack=None, stacked=None,
negpos=False, negcolor=None, poscolor=None,
color=None, colors=None,
linestyle=None, linestyles=None,
lw=None, linewidth=None, linewidths=None,
**kwargs
):
"""
Apply hlines or vlines command. Support default "minima" at zero.
"""
# Parse input arguments
method = kwargs.pop('_method')
name = method.__name__
stack = _not_none(stack=stack, stacked=stacked)
colors = _not_none(color=color, colors=colors)
linestyles = _not_none(linestyle=linestyle, linestyles=linestyles)
linewidths = _not_none(lw=lw, linewidth=linewidth, linewidths=linewidths)
args = list(args)
if len(args) > 3:
raise ValueError(f'Expected 1-3 positional args, got {len(args)}.')
if len(args) == 3 and stack:
warnings._warn_proplot(
f'{name}() cannot have three positional arguments with stack=True. '
'Ignoring second argument.'
)
if len(args) == 2: # empty possible
args.insert(1, np.array([0.0])) # default base
# Support "negative" and "positive" lines
x, y1, y2, *args = args # standardized
if not negpos:
# Plot basic lines
kwargs['stack'] = stack
if colors is not None:
kwargs['colors'] = colors
result = method(self, x, y1, y2, *args, **kwargs)
objs = (result,)
else:
# Plot negative and positive colors
_check_negpos(name, stack=stack, colors=colors)
y1neg, y2neg = _mask_array(y2 < y1, y1, y2)
color = _not_none(negcolor, rc['negcolor'])
negobj = method(self, x, y1neg, y2neg, color=color, **kwargs)
y1pos, y2pos = _mask_array(y2 >= y1, y1, y2)
color = _not_none(poscolor, rc['poscolor'])
posobj = method(self, x, y1pos, y2pos, color=color, **kwargs)
objs = result = (negobj, posobj)
# Apply formatting unavailable in matplotlib
for obj in objs:
if linewidths is not None:
obj.set_linewidth(linewidths) # LineCollection setters
if linestyles is not None:
obj.set_linestyle(linestyles)
return result
@docstring.add_snippets
def vlines_extras(self, *args, **kwargs):
"""
%(axes.vlines)s
"""
return _apply_lines(self, *args, **kwargs)
@docstring.add_snippets
def hlines_extras(self, *args, **kwargs):
"""
%(axes.hlines)s
"""
# NOTE: The 'horizontal' orientation will be inferred by downstream
# wrappers using the function name.
return _apply_lines(self, *args, **kwargs)
def _apply_scatter(
self, *args,
vmin=None, vmax=None, smin=None, smax=None,
cmap=None, cmap_kw=None, norm=None, norm_kw=None,
extend='neither', levels=None, N=None, values=None,
locator=None, locator_kw=None, discrete=None,
symmetric=False, positive=False, negative=False, nozero=False, inbounds=None,
**kwargs
):
"""
Apply scatter or scatterx markers. Permit up to 4 positional arguments
including `s` and `c`.
"""
# Manage input arguments
method = kwargs.pop('_method')
props = _pop_props(kwargs, 'lines')
c = props.pop('color', None)
s = props.pop('markersize', None)
args = list(args)
if len(args) > 4:
raise ValueError(f'Expected 1-4 positional arguments, got {len(args)}.')
if len(args) == 4:
c = _not_none(c_positional=args.pop(-1), c=c)
if len(args) == 3:
s = _not_none(s_positional=args.pop(-1), s=s)
# Get colormap
cmap_kw = cmap_kw or {}
if cmap is not None:
cmap_kw.setdefault('luminance', 90) # matches to_listed behavior
cmap = constructor.Colormap(cmap, **cmap_kw)
# Get normalizer and levels
ticks = None
carray = np.atleast_1d(c)
discrete = _not_none(
getattr(self, '_image_discrete', None),
discrete,
rc['image.discrete'],
True
)
if (
discrete and np.issubdtype(carray.dtype, np.number)
and not (carray.ndim == 2 and carray.shape[1] in (3, 4))
):
carray = carray.ravel()
levels = _not_none(levels=levels, N=N)
norm, cmap, _, ticks = _build_discrete_norm(
self, carray, # sample data for getting suitable levels
levels=levels, values=values,
cmap=cmap, norm=norm, norm_kw=norm_kw, extend=extend,
vmin=vmin, vmax=vmax, locator=locator, locator_kw=locator_kw,
symmetric=symmetric, positive=positive, negative=negative,
nozero=nozero, inbounds=inbounds,
)
# Fix 2D arguments but still support scatter(x_vector, y_2d) usage
# NOTE: numpy.ravel() preserves masked arrays
# NOTE: Since we are flattening vectors the coordinate metadata is meaningless,
# so converting to ndarray and stripping metadata is no problem.
if len(args) == 2 and all(_to_ndarray(arg).squeeze().ndim > 1 for arg in args):
args = tuple(np.ravel(arg) for arg in args)
# Scale s array
if np.iterable(s) and (smin is not None or smax is not None):
smin_true, smax_true = min(s), max(s)
if smin is None:
smin = smin_true
if smax is None:
smax = smax_true
s = smin + (smax - smin) * (np.array(s) - smin_true) / (smax_true - smin_true)
# Call function
obj = objs = method(self, *args, c=c, s=s, cmap=cmap, norm=norm, **props, **kwargs)
if not isinstance(objs, tuple):
objs = (obj,)
for iobj in objs:
iobj._colorbar_extend = extend
iobj._colorbar_ticks = ticks
return obj
@docstring.add_snippets
def scatter_extras(self, *args, **kwargs):
"""
%(axes.scatter)s
"""
return _apply_scatter(self, *args, **kwargs)
@docstring.add_snippets
def scatterx_extras(self, *args, **kwargs):
"""
%(axes.scatterx)s
"""
# NOTE: The 'horizontal' orientation will be inferred by downstream
# wrappers using the function name.
return _apply_scatter(self, *args, **kwargs)
def _apply_fill_between(
self, *args, where=None, negpos=None, negcolor=None, poscolor=None,
lw=None, linewidth=None, color=None, facecolor=None,
stack=None, stacked=None, **kwargs
):
"""
Apply `fill_between` or `fill_betweenx` shading. Permit up to 4
positional arguments including `where`.
"""
# Parse input arguments
method = kwargs.pop('_method')
name = method.__name__
sx = 'y' if 'x' in name else 'x' # i.e. fill_betweenx
sy = 'x' if sx == 'y' else 'y'
stack = _not_none(stack=stack, stacked=stacked)
color = _not_none(color=color, facecolor=facecolor)
linewidth = _not_none(lw=lw, linewidth=linewidth, default=0)
args = list(args)
if len(args) > 4:
raise ValueError(f'Expected 1-4 positional args, got {len(args)}.')
if len(args) == 4:
where = _not_none(where_positional=args.pop(3), where=where)
if len(args) == 3 and stack:
warnings._warn_proplot(
f'{name}() cannot have three positional arguments with stack=True. '
'Ignoring second argument.'
)
if len(args) == 2: # empty possible
args.insert(1, np.array([0.0])) # default base
# Draw patches with default edge width zero
x, y1, y2 = args
kwargs['linewidth'] = linewidth
if not negpos:
# Plot basic patches
kwargs.update({'where': where, 'stack': stack})
if color is not None:
kwargs['color'] = color
result = method(self, x, y1, y2, **kwargs)
objs = (result,)
else:
# Plot negative and positive patches
if y1.ndim > 1 or y2.ndim > 1:
raise ValueError(f'{name}() arguments with negpos=True must be 1D.')
kwargs.setdefault('interpolate', True)
_check_negpos(name, where=where, stack=stack, color=color)
color = _not_none(negcolor, rc['negcolor'])
negobj = method(self, x, y1, y2, where=(y2 < y1), facecolor=color, **kwargs)
color = _not_none(poscolor, rc['poscolor'])
posobj = method(self, x, y1, y2, where=(y2 >= y1), facecolor=color, **kwargs)
result = objs = (posobj, negobj) # may be tuple of tuples due to apply_cycle
# Add sticky edges in x-direction, and sticky edges in y-direction
# *only* if one of the y limits is scalar. This should satisfy most users.
# NOTE: Could also retrieve data from PolyCollection but that's tricky.
# NOTE: Standardize function guarantees ndarray input by now
if not getattr(self, '_no_sticky_edges', False):
xsides = (np.min(x), np.max(x))
ysides = []
for y in (y1, y2):
if y.size == 1:
ysides.append(y.item())
objs = tuple(obj for _ in objs for obj in (_ if isinstance(_, tuple) else (_,)))
for obj in objs:
for s, sides in zip((sx, sy), (xsides, ysides)):
convert = getattr(self, 'convert_' + s + 'units')
edges = getattr(obj.sticky_edges, s)
edges.extend(convert(sides))
return result
@docstring.add_snippets
def fill_between_extras(self, *args, **kwargs):
"""
%(axes.fill_between)s
"""
return _apply_fill_between(self, *args, **kwargs)
@docstring.add_snippets
def fill_betweenx_extras(self, *args, **kwargs):
"""
%(axes.fill_betweenx)s
"""
# NOTE: The 'horizontal' orientation will be inferred by downstream
# wrappers using the function name.
return _apply_fill_between(self, *args, **kwargs)
def _convert_bar_width(x, width=1, ncols=1):
"""
Convert bar plot widths from relative to coordinate spacing. Relative
widths are much more convenient for users.
"""
# WARNING: This will fail for non-numeric non-datetime64 singleton
# datatypes but this is good enough for vast majority of cases.
x_test = np.atleast_1d(_to_ndarray(x))
if len(x_test) >= 2:
x_step = x_test[1:] - x_test[:-1]
x_step = np.concatenate((x_step, x_step[-1:]))
elif x_test.dtype == np.datetime64:
x_step = np.timedelta64(1, 'D')
else:
x_step = np.array(0.5)
if np.issubdtype(x_test.dtype, np.datetime64):
# Avoid integer timedelta truncation
x_step = x_step.astype('timedelta64[ns]')
return width * x_step / ncols
def _apply_bar(
self, *args, stack=None, stacked=None,
lw=None, linewidth=None, color=None, facecolor=None, edgecolor='black',
negpos=False, negcolor=None, poscolor=None, absolute_width=False, **kwargs
):
"""
Apply bar or barh command. Support default "minima" at zero.
"""
# Parse args
# TODO: Stacked feature is implemented in `apply_cycle`, but makes more
# sense do document here. Figure out way to move it here?
method = kwargs.pop('_method')
name = method.__name__
stack = _not_none(stack=stack, stacked=stacked)
color = _not_none(color=color, facecolor=facecolor)
linewidth = _not_none(lw=lw, linewidth=linewidth, default=rc['patch.linewidth'])
kwargs.update({'linewidth': linewidth, 'edgecolor': edgecolor})
args = list(args)
if len(args) > 4:
raise ValueError(f'Expected 1-4 positional args, got {len(args)}.')
if len(args) == 4 and stack:
warnings._warn_proplot(
f'{name}() cannot have four positional arguments with stack=True. '
'Ignoring fourth argument.' # i.e. ignore default 'bottom'
)
if len(args) == 2:
args.append(np.array([0.8])) # default width
if len(args) == 3:
args.append(
|
np.array([0.0])
|
numpy.array
|
###############################################################################
# ferre.py: module for interacting with <NAME>'s FERRE code
###############################################################################
import os, os.path
import copy
import subprocess
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import numpy
from functools import wraps
import tempfile
import apogee.tools.path as appath
from apogee.tools import paramIndx,_ELEM_SYMBOL
from apogee.tools.read import modelspecOnApStarWavegrid
import apogee.spec.window as apwindow
import apogee.spec.cannon as cannon
from apogee.modelspec import specFitInput, _chi2
try:
import apogee.util.emcee
except ImportError:
pass
def paramArrayInputDecorator(startIndx):
"""Decorator to parse spectral input parameters given as arrays,
assumes the arguments are: something,somethingelse,teff,logg,metals,am,nm,cm,vmicro=,
startindx is the index in arguments where the teff,logg,... sequence starts"""
def wrapper(func):
@wraps(func)
def scalar_wrapper(*args,**kwargs):
if numpy.array(args[startIndx]).shape == ():
newargs= ()
for ii in range(startIndx):
newargs= newargs+(args[ii],)
for ii in range(6):
newargs= newargs+(numpy.array([args[ii+startIndx]]),)
for ii in range(len(args)-6-startIndx):
newargs= newargs+(args[ii+startIndx+6],)
args= newargs
if not kwargs.get('vm',None) is None:
kwargs['vm']=
|
numpy.array([kwargs['vm']])
|
numpy.array
|
import os
import cv2
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.model_selection import train_test_split
ATTRS_FILE = "datasets/lfw_attributes.txt" # http://www.cs.columbia.edu/CAVE/databases/pubfig/download/lfw_attributes.txt
IMAGES_DIR = "datasets/lfw-deepfunneled" # http://vis-www.cs.umass.edu/lfw/lfw-deepfunneled.tgz
RAW_IMAGES_DIR = "datasets/lfw" # http://vis-www.cs.umass.edu/lfw/lfw.tgz
def load_dataset(use_raw=False, dx=80, dy=80, dimx=45, dimy=45):
"""
Loads the `Labeled Faces in the Wild` dataset with
train/test split and attributes into memory.
Args:
use_raw (bool, optional):
Flag for using raw data or not. If unspecified,
defaults to `False`.
dx (int, optional):
x co-ordinate to crop the images. If unspecified,
defaults to 80.
dy (int, optional):
y co-ordinate to crop the images. If unspecified,
defaults to 80.
dimx (int, optional):
Width dim of the images. If unspecified, defaults
to 45.
dimy (int, optional):
Height dim of the images. If unspecified, defaults
to 45.
Returns:
numpy.ndarray:
Training data for the model.
numpy.ndarray:
Testing data for the model.
list:
Shape of images in the training set.
pandas.DataFrame:
Dataframe consisting of attribute data of people in the dataset.
"""
# read attrs
df_attrs = pd.read_csv(ATTRS_FILE, sep='\t', skiprows=1)
df_attrs.columns = list(df_attrs.columns)[1: ] + ["NaN"]
df_attrs = df_attrs.drop("NaN", axis=1)
imgs_with_attrs = set(map(tuple, df_attrs[["person", "imagenum"]].values))
# read photos
X = []
photo_ids = []
image_dir = RAW_IMAGES_DIR if use_raw else IMAGES_DIR
folders = os.listdir(image_dir)
for folder in tqdm(folders, total=len(folders), desc='Preprocessing', leave=False):
files = os.listdir(os.path.join(image_dir, folder))
for file in files:
if not os.path.isfile(os.path.join(image_dir, folder, file)) or not file.endswith(".jpg"):
continue
# preprocess image
img = cv2.imread(os.path.join(image_dir, folder, file))
img = img[dy:-dy, dx:-dx]
img = cv2.resize(img, (dimx, dimy))
# parse person
fname = os.path.split(file)[-1]
fname_splitted = fname[:-4].replace('_', ' ').split()
person_id = ' '.join(fname_splitted[:-1])
photo_number = int(fname_splitted[-1])
if (person_id, photo_number) in imgs_with_attrs:
X.append(img)
photo_ids.append({'person': person_id, 'imagenum': photo_number})
photo_ids = pd.DataFrame(photo_ids)
X =
|
np.stack(X)
|
numpy.stack
|
import os
import requests
import numpy as np
# Data retrieval
fname = []
for j in range(3):
fname.append('steinmetz_part%d.npz'%j)
url = ["https://osf.io/agvxh/download"]
url.append("https://osf.io/uv3mw/download")
url.append("https://osf.io/ehmw2/download")
for j in range(len(url)):
if not os.path.isfile(fname[j]):
try:
r = requests.get(url[j])
except requests.ConnectionError:
print("!!! Failed to download data !!!")
else:
if r.status_code != requests.codes.ok:
print("!!! Failed to download data !!!")
else:
with open(fname[j], "wb") as fid:
fid.write(r.content)
# Data loading
alldat =
|
np.array([])
|
numpy.array
|
import numpy as np
import random
from sklearn import mixture
from sklearn import svm
def createSyntheticDataSet(nbClasses, nbInst, dictionnary, dictionnaryProbability):
varNorm = 0.1
listClass = []
for i in range(0, nbClasses):
arr = np.zeros((1, nbClasses))
arr[0][i] = 1
listClass.append(arr)
dataForLearnModel = np.zeros((0, nbClasses))
classToFit = np.zeros(0)
for i in range(0, nbClasses):
for j in range(0, 1000):
point = listClass[i] + np.random.normal(0, varNorm, nbClasses)
dataForLearnModel = np.append(dataForLearnModel, point, axis=0)
classToFit = np.append(classToFit, i)
# g = mixture.GMM(n_components=nbClasses)
# g.fit(dataForLearnModel, classToFit)
sequence =
|
np.zeros((0, nbClasses))
|
numpy.zeros
|
from keras.models import Sequential
from keras.models import model_from_json
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.utils import np_utils
import keras
import numpy as np
import json
model = None
## Loads model weights.
with open('03.MNIST_cnn_model_LeNet5.config', 'r') as text_file:
json_config = text_file.read()
model = Sequential()
model = model_from_json(json_config)
model.load_weights('03.MNIST_cnn_model_LeNet5.weights')
print(model.summary())
# Compile model.
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
## Read image
num_images = 30
test_images = np.array([])
for i in range(num_images):
img = load_img('02-04.images/numbers_' + str(i) +'.jpg', color_mode='grayscale', target_size=(28, 28))
# img = img.resize((28, 28)) # Resize image.
img_arr = np.expand_dims( (img_to_array(img)/255) , axis=0)
if test_images.shape[0] == 0:
test_images = img_arr
else:
test_images =
|
np.append(test_images, img_arr, axis=0)
|
numpy.append
|
"""Functions for plotting data."""
import scipy.interpolate
import pandas as pd
from pathlib import Path
import numpy as np
from datetime import date, datetime, timedelta
from tempfile import NamedTemporaryFile
import matplotlib.pyplot as plt
from matplotlib.image import imread
import matplotlib.ticker as ticker
import matplotlib.pylab as pl
import matplotlib.dates as mdates
from .scores import *
from .__init__ import figures_dir, data_dir
import os
def save_figures(name):
global figures_dir
figures_dir = Path(figures_dir)
(figures_dir / name).parent.mkdir(parents=True, exist_ok=True) # Create all necessary parent dirs
plt.savefig(figures_dir / (name + '.svg'),
bbox_inches = 'tight',
dpi=300)
def plotdifferencescdfpdf(Scoreboard, model_target, quiet=False):
model_targets = ['Case', 'Death']
if model_target not in model_targets:
raise ValueError("Invalid sim type. Expected one of: %s" % model_targets)
if model_target == 'Case':
titlelabel= 'Weekly Incidental Cases'
elif model_target == 'Death':
titlelabel= 'Cumulative Deaths'
plt.figure(figsize=(4, 2.5), dpi=180, facecolor='w', edgecolor='k')
plt.hist(Scoreboard['prange']-Scoreboard['sumpdf'],bins=50)
plt.xlabel("Difference between integrated pdf and given cdf")
plt.title('US COVID-19 ' + titlelabel)
if not quiet:
print('===========================')
print('Maximum % conversion error:')
print(100*max(Scoreboard['prange']-Scoreboard['sumpdf']))
save_figures(model_target+'_diffcdfpdf')
def plotUSCumDeaths(US_deaths) -> None:
plt.figure(figsize=(4, 2.5), dpi=180, facecolor='w', edgecolor='k')
plt.plot(US_deaths.DateObserved,US_deaths.Deaths)
plt.xticks(rotation=45)
plt.title('US Cumulative Deaths')
plt.ylabel('Deaths')
save_figures('USDeaths')
def plotUSIncCases(US_cases) -> None:
plt.figure(figsize=(4, 2.5), dpi=180, facecolor='w', edgecolor='k')
plt.plot(US_cases.DateObserved,US_cases.Cases)
plt.xticks(rotation=45)
plt.title('US Weekly Incidental Cases')
plt.ylabel('Cases')
save_figures('USCases')
def perdelta(start, end, delta):
"""Generate a list of datetimes in an
interval for plotting purposes (date labeling)
Args:
start (date): Start date.
end (date): End date.
delta (date): Variable date as interval.
Yields:
Bunch of dates
"""
curr = start
while curr < end:
yield curr
curr += delta
def numberofteamsincovidhub(FirstForecasts, figures_dir)->None:
fig = plt.figure(num=None, figsize=(6, 4), dpi=120, facecolor='w', edgecolor='k')
FirstForecasts['forecast_date']= pd.to_datetime(FirstForecasts['forecast_date'])
plt.plot(FirstForecasts['forecast_date'],FirstForecasts['cumnumteams'])
plt.xticks(rotation=70)
plt.ylabel('Total Number of Modeling Teams')
plt.xlabel('First Date of Entry')
plt.title('Number of Teams in Covid-19 Forecast Hub Increases')
plt.fmt_xdata = mdates.DateFormatter('%m-%d')
save_figures('numberofmodels')
plt.show(fig)
def plotallscoresdist(Scoreboard, model_target, figsize=(8, 6), interval='Weeks') -> None:
assert interval in ('Weeks', 'Days')
if interval == 'Weeks':
delta = 'deltaW'
else:
delta = 'delta'
model_targets = ['Case', 'Death']
if model_target not in model_targets:
raise ValueError("Invalid sim type. Expected one of: %s" % model_targets)
if model_target == 'Case':
filelabel = 'INCCASE'
titlelabel= 'Weekly Incidental Cases'
elif model_target == 'Death':
filelabel = 'CUMDEATH'
titlelabel= 'Weekly Cumulative Deaths'
minn = min(Scoreboard[delta])
maxx = max(Scoreboard[delta])
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=300, facecolor='w', edgecolor='k')
Scoreboard.plot.scatter(x=delta, y='score', marker='.', ax=ax[0], color='k')
ax[0].set_xlabel('N-%s Forward Forecast' % interval)
ax[0].set_xticks(np.arange(minn, maxx+1, 5.0))
ax[0].xaxis.set_tick_params(rotation=90)
ax[0].set_title(titlelabel + ' Forecasts')
binwidth = 1
Scoreboard[delta].hist(bins=np.arange(minn, maxx + binwidth, binwidth), ax=ax[1])
ax[1].set_title(titlelabel + ' Forecasts')
ax[1].set_xlabel('N-%s Forward Forecast' % interval)
ax[1].set_ylabel('Number of forecasts made')
ax[1].set_xticks(np.arange(minn, maxx+1, 5.0))
ax[1].xaxis.set_tick_params(rotation=90)
ax[1].grid(b=None)
plt.tight_layout()
save_figures(filelabel+'_x-%s_Forward_Forecast_And_Hist' % interval)
def plotlongitudinal(Actual, Scoreboard, scoretype, WeeksAhead, curmodlist) -> None:
"""Plots select model predictions against actual data longitudinally
Args:
Actual (pd.DataFrame): The actual data
Scoreboard (pd.DataFrame): The scoreboard dataframe
scoretype (str): "Cases" or "Deaths"
WeeksAhead (int): Forecasts from how many weeks ahead
curmodlist (list): List of name of the model whose forecast will be shown
Returns:
None
"""
Scoreboardx = Scoreboard[Scoreboard['deltaW']==WeeksAhead].copy()
Scoreboardx.sort_values('target_end_date',inplace=True)
Scoreboardx.reset_index(inplace=True)
plt.figure(num=None, figsize=(6, 6), dpi=300, facecolor='w', edgecolor='k')
models = Scoreboardx['model'].unique()
colors = pl.cm.jet(np.linspace(0,1,len(curmodlist)))
i = 0
for curmod in curmodlist:
dates = Scoreboardx[Scoreboardx['model']==curmod].target_end_date
PE = Scoreboardx[Scoreboardx['model']==curmod].PE
CIlow = Scoreboardx[Scoreboardx['model']==curmod].CILO
CIhi = Scoreboardx[Scoreboardx['model']==curmod].CIHI
modcol = (colors[i].tolist()[0],
colors[i].tolist()[1],
colors[i].tolist()[2])
plt.plot(dates,PE,color=modcol,label=curmod)
plt.fill_between(dates, CIlow, CIhi, color=modcol, alpha=.1)
i = i+1
plt.plot(Actual['DateObserved'], Actual[scoretype],color='k', linewidth=3.0)
plt.ylim([(Actual[scoretype].min())*0.6, (Actual[scoretype].max())*1.4])
plt.ylabel('US Cumulative '+scoretype)
plt.xlabel('Date')
plt.xticks(rotation=45)
#plt.yticks(fontsize=13)
plt.fmt_xdata = mdates.DateFormatter('%m-%d')
plt.title(str(WeeksAhead)+'-week-ahead Forecasts')
plt.legend(loc="upper left", labelspacing=.9)
save_figures(scoretype+'_'+''.join(curmodlist)+'_'+str(WeeksAhead)+'wk')
def plotlongitudinalUNWEIGHTED(Actual, Scoreboard, scoretype, numweeks, numweeks_start=1, max_weeks_ahead=8) -> None:
"""Plots select model predictions against actual data longitudinally
Args:
Actual (pd.DataFrame): The actual data
Scoreboard (pd.DataFrame): The scoreboard dataframe
scoretype (str): "Cases" or "Deaths"
numweeks (int): number of weeks to plot
Returns:
None
"""
scoretypes = ['Cases', 'Deaths']
if scoretype not in scoretypes:
raise ValueError("Invalid sim type. Expected one of: %s" % scoretypes)
if scoretype == 'Cases':
titlelabel= 'weekly incidental cases'
elif scoretype == 'Deaths':
titlelabel= 'weekly deaths'
numweeks += 1
labelp = 'Average Unweighted Forecasts'
colors = pl.cm.jet(np.linspace(0, 1, max_weeks_ahead))
plt.figure(num=None, figsize=(6, 4), dpi=80, facecolor='w', edgecolor='k')
for weeks_ahead in range(numweeks_start, numweeks):
Scoreboardx = Scoreboard[Scoreboard['deltaW']==weeks_ahead].copy()
Scoreboardx.sort_values('target_end_date',inplace=True)
Scoreboardx.reset_index(inplace=True)
MerdfPRED = Scoreboardx.copy()
MerdfPRED = (MerdfPRED.groupby(['target_end_date'],
as_index=False)[['CILO','PE','CIHI']].agg(lambda x:
|
np.median(x)
|
numpy.median
|
# -*- coding: iso-8859-1 -*-
"""
Purpose of this code is to plot the figures from the Discussion section of our paper.
"""
########################
###Import useful libraries
########################
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pdb
import cookbook
from matplotlib.pyplot import cm
import cPickle as pickle
########################
###Define useful constants, all in CGS (via http://www.astro.wisc.edu/~dolan/constants.html)
########################
#Unit conversions
km2m=1.e3 #1 km in m
km2cm=1.e5 #1 km in cm
cm2km=1.e-5 #1 cm in km
cm2inch=1./2.54 #1 cm in inches
amu2g=1.66054e-24 #1 amu in g
bar2atm=0.9869 #1 bar in atm
Pascal2bar=1.e-5 #1 Pascal in bar
Pa2bar=1.e-5 #1 Pascal in bar
bar2Pa=1.e5 #1 bar in Pascal
deg2rad=np.pi/180.
bar2barye=1.e6 #1 Bar in Barye (the cgs unit of pressure)
barye2bar=1.e-6 #1 Barye in Bar
micron2m=1.e-6 #1 micron in m
micron2cm=1.e-4 #1 micron in cm
metricton2kg=1000. #1 metric ton in kg
#Fundamental constants
c=2.997924e10 #speed of light, cm/s
h=6.6260755e-27 #planck constant, erg/s
k=1.380658e-16 #boltzmann constant, erg/K
sigma=5.67051e-5 #Stefan-Boltzmann constant, erg/(cm^2 K^4 s)
R_earth=6371.*km2m#radius of earth in m
R_sun=69.63e9 #radius of sun in cm
AU=1.496e13#1AU in cm
#Mean molecular masses
m_co2=44.01*amu2g #co2, in g
m_h2o=18.02*amu2g #h2o, in g
#Mars parameters
g=371. #surface gravity of Mars, cm/s**2, from: http://nssdc.gsfc.nasa.gov/planetary/factsheet/marsfact.html
deg2rad=np.pi/180. #1 degree in radian
########################
###Which plots to generate?
########################
plot_doses_pco2=True #plot the dependence of dose rate on pCO2 in a CO2-H2O atmosphere (fixed temperature) NOTE: may want to do for low-albedo. Both more physically plausible and avoids weird uptick effect
plot_doses_clouds=True #plot the dependence of dose rate on CO2 cloud optical depth in a CO2-H2O atmosphere (fixed temperature)
plot_doses_dust_pco2=True #plot the dependence of dose rate as a function of dust level for different pCO2
plot_doses_dust_clouds=True #plot the dependence of dose rate as a function of dust level for different cloud levels.
plot_doses_pso2_pco2=True #plot the dependence of dose rate as a function of pSO2 for different pCO2
plot_doses_pso2_clouds=True #plot the dependence of dose rate as a function of dust level for different cloud levels.
plot_doses_ph2s_pco2=True #plot the dependence of dose rate as a function of pH2S for different pCO2
plot_doses_ph2s_clouds=True #plot the dependence of dose rate as a function of pH2S for different pCO2
plot_reldoses_pso2=True #plot the ratio between the "bad" dose rate and the "good" dose rate as function of PSO2. Plot for 1) pCO2=0.02, cloud=1000 and 2) pCO2=2 bar, cloud=0.
plot_reldoses_ph2s=True #plot the ratio between the "bad" dose rate and the "good" dose rate as function of PH2S. Plot for 1) pCO2=0.02, cloud=1000 and 2) pCO2=2 bar, cloud=0.
plot_reldoses_dust=True #plot the ratio between the "bad" dose rate and the "good" dose rate as function of dust level. Plot for 1) pCO2=0.02, cloud=1000 and 2) pCO2=2 bar, cloud=0.
########################
###
########################
if plot_reldoses_dust:
"""
The purpose of this script is to plot the relative dose rates of the "stressor" photoprocess compared to the "eustressor" photoprocess as a function of pH2S. We evaluate this for varying levels of atmospheric dust loading. We do this for two cases: 1) pCO2=2 bar, and 2) pCO2=0.02 bar and a tau=1000 cloud deck emplaced at 20.5 km. This is so we can separate out absorption amplification due to cloud decks (relatively flat) and due to Rayleigh scattering (not flat).
#Conditions: T_0=250, A=desert, z=0, no TD XCs, DeltaScaling
"""
########################
###Read in doses
########################
###Read in computed doses
od_list=np.array(['dustod=0.1', 'dustod=1', 'dustod=10']) #list of dust optical depths (500 nm)
od_axis=np.array([1.e-1, 1., 1.e1])
titles_list=np.array([r'pCO$_2$=2 bar, $\tau_{cloud}=0$',r'pCO$_2$=0.02 bar, $\tau_{cloud}=1000$ (unscaled)'])
num_cases=len(titles_list)
num_od=len(od_list)
dose_100_165=np.zeros([num_od,num_cases]) #surface radiance integrated 100-165 nm
dose_200_300=np.zeros([num_od,num_cases]) #surface radiance integrated 200-300 nm
dose_ump_193=np.zeros([num_od,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=193
dose_ump_230=np.zeros([num_od,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=230
dose_ump_254=np.zeros([num_od,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=254
dose_cucn3_254=np.zeros([num_od,num_cases]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=254
dose_cucn3_300=np.zeros([num_od,num_cases]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=300
for ind in range(0, num_od):
od=od_list[ind]
dose_100_165[ind,0],dose_200_300[ind,0],dose_ump_193[ind,0],dose_ump_230[ind,0],dose_ump_254[ind,0],dose_cucn3_254[ind,0],dose_cucn3_300[ind,0]=np.genfromtxt('./DoseRates/dose_rates_colddrymars_2bar_250K_z=0_A=desert_noTD_DS_'+od+'.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
dose_100_165[ind,1],dose_200_300[ind,1],dose_ump_193[ind,1],dose_ump_230[ind,1],dose_ump_254[ind,1],dose_cucn3_254[ind,1],dose_cucn3_300[ind,1]=np.genfromtxt('./DoseRates/dose_rates_colddrymars_0.02bar_250K_z=0_A=desert_noTD_DS_'+od+'_co2cloudod=1000_z=20.5_reff=10.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
########################
###Plot results
########################
fig, ax=plt.subplots(num_cases, figsize=(16.5*cm2inch,10.), sharex=True, sharey=False)
markersizeval=3.
colors=cm.rainbow(np.linspace(0,1,6))
for ind2 in range(0, num_cases):
ax[ind2].set_title(titles_list[ind2])
ax[ind2].plot(od_axis, dose_ump_193[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[0], label=r'UMP-193/CuCN3-254')
ax[ind2].plot(od_axis, dose_ump_230[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[1], label=r'UMP-230/CuCN3-254')
ax[ind2].plot(od_axis, dose_ump_254[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[2], label=r'UMP-254/CuCN3-254')
ax[ind2].plot(od_axis, dose_ump_193[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[3], label=r'UMP-193/CuCN3-300')
ax[ind2].plot(od_axis, dose_ump_230[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[4], label=r'UMP-230/CuCN3-300')
ax[ind2].plot(od_axis, dose_ump_254[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[5], label=r'UMP-254/CuCN3-300')
#ax.set_ylim([1.e-2, 1.e4])
ax[ind2].set_yscale('log')
ax[ind2].set_ylabel(r'$\bar{D}_{UMP-X}/\bar{D}_{CuCN3-Y}$')
#ax.set_xlim([100, 500])
ax[num_cases-1].set_xscale('log')
ax[num_cases-1].set_xlabel(r'$\tau_{d}$ (unscaled)', fontsize=12)
plt.tight_layout(rect=(0,0,1., 0.9))
ax[0].legend(bbox_to_anchor=[0, 1.2, 1., 0.7], loc=3, ncol=2, mode='expand', borderaxespad=0., fontsize=10)
plt.savefig('./Plots/discussion_doses_reldoses_dust.eps', orientation='portrait',papertype='letter', format='eps')
plt.show()
########################
###
########################
if plot_reldoses_ph2s:
"""
The purpose of this script is to plot the relative dose rates of the "stressor" photoprocess compared to the "eustressor" photoprocess as a function of pH2S. We evaluate this for pSO2=2e-9 -- 2e-4 bar. We do this for two cases: 1) pCO2=2 bar, and 2) pCO2=0.02 bar and a tau=1000 cloud deck emplaced at 20.5 km. This is so we can separate out absorption amplification due to cloud decks (relatively flat) and due to Rayleigh scattering (not flat).
#Conditions: T_0=250, A=desert, z=0, no TD XCs, DeltaScaling
"""
########################
###Read in doses
########################
###Read in computed doses
nocloud_list=np.array(['2bar_250K_0so2_1ppbh2s', '2bar_250K_0so2_10ppbh2s','2bar_250K_0so2_100ppbh2s','2bar_250K_0so2_1ppmh2s', '2bar_250K_0so2_10ppmh2s', '2bar_250K_0so2_100ppmh2s']) #list of pH2S for pCO2=2
cloud_list=np.array(['0.02bar_250K_0so2_100ppbh2s','0.02bar_250K_0so2_1ppmh2s','0.02bar_250K_0so2_10ppmh2s', '0.02bar_250K_0so2_100ppmh2s', '0.02bar_250K_0so2_1000ppmh2s', '0.02bar_250K_0so2_10000ppmh2s']) #list of pH2S for pCO2=0.02
ph2s_axis=np.array([2.e-9, 2.e-8, 2.e-7, 2.e-6, 2.e-5, 2.e-4]) #pH2S in bar
titles_list=np.array([r'pCO$_2$=2 bar, $\tau_{cloud}=0$',r'pCO$_2$=0.02 bar, $\tau_{cloud}=1000$ (unscaled)'])
num_cases=len(titles_list)
num_h2s=len(nocloud_list)
dose_100_165=np.zeros([num_h2s,num_cases]) #surface radiance integrated 100-165 nm
dose_200_300=np.zeros([num_h2s,num_cases]) #surface radiance integrated 200-300 nm
dose_ump_193=np.zeros([num_h2s,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=193
dose_ump_230=np.zeros([num_h2s,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=230
dose_ump_254=np.zeros([num_h2s,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=254
dose_cucn3_254=np.zeros([num_h2s,num_cases]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=254
dose_cucn3_300=np.zeros([num_h2s,num_cases]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=300
for ind in range(0, num_h2s):
dose_100_165[ind,0],dose_200_300[ind,0],dose_ump_193[ind,0],dose_ump_230[ind,0],dose_ump_254[ind,0],dose_cucn3_254[ind,0],dose_cucn3_300[ind,0]=np.genfromtxt('./DoseRates/dose_rates_volcanicmars_'+nocloud_list[ind]+'_z=0_A=desert_noTD_noDS_noparticles.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
dose_100_165[ind,1],dose_200_300[ind,1],dose_ump_193[ind,1],dose_ump_230[ind,1],dose_ump_254[ind,1],dose_cucn3_254[ind,1],dose_cucn3_300[ind,1]=np.genfromtxt('./DoseRates/dose_rates_volcanicmars_'+cloud_list[ind]+'_z=0_A=desert_noTD_DS_co2cloudod=1000_z=20.5_reff=10.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
########################
###Plot results
########################
fig, ax=plt.subplots(num_cases, figsize=(16.5*cm2inch,10.), sharex=True, sharey=False)
markersizeval=3.
colors=cm.rainbow(np.linspace(0,1,6))
for ind2 in range(0, num_cases):
ax[ind2].set_title(titles_list[ind2])
ax[ind2].plot(ph2s_axis, dose_ump_193[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[0], label=r'UMP-193/CuCN3-254')
ax[ind2].plot(ph2s_axis, dose_ump_230[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[1], label=r'UMP-230/CuCN3-254')
ax[ind2].plot(ph2s_axis, dose_ump_254[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[2], label=r'UMP-254/CuCN3-254')
ax[ind2].plot(ph2s_axis, dose_ump_193[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[3], label=r'UMP-193/CuCN3-300')
ax[ind2].plot(ph2s_axis, dose_ump_230[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[4], label=r'UMP-230/CuCN3-300')
ax[ind2].plot(ph2s_axis, dose_ump_254[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[5], label=r'UMP-254/CuCN3-300')
#ax.set_ylim([1.e-2, 1.e4])
ax[ind2].set_yscale('log')
ax[ind2].set_ylabel(r'$\bar{D}_{UMP-X}/\bar{D}_{CuCN3-Y}$')
#ax.set_xlim([100, 500])
ax[num_cases-1].set_xscale('log')
ax[num_cases-1].set_xlabel(r'pH$_2$S', fontsize=12)
plt.tight_layout(rect=(0,0,1., 0.9))
ax[0].legend(bbox_to_anchor=[0, 1.2, 1., 0.7], loc=3, ncol=2, mode='expand', borderaxespad=0., fontsize=10)
plt.savefig('./Plots/discussion_doses_reldoses_ph2s.eps', orientation='portrait',papertype='letter', format='eps')
plt.show()
########################
###
########################
if plot_reldoses_pso2:
"""
The purpose of this script is to plot the relative dose rates of the "stressor" photoprocess compared to the "eustressor" photoprocess as a function of pSO2. We evaluate this for pSO2=2e-9 -- 2e-5 bar. We do this for two cases: 1) pCO2=2 bar, and 2) pCO2=0.02 bar and a tau=1000 cloud deck emplaced at 20.5 km. This is so we can separate out absorption amplification due to cloud decks (relatively flat) and due to Rayleigh scattering (not flat).
#Conditions: T_0=250, A=desert, z=0, no TD XCs, DeltaScaling
"""
########################
###Read in doses
########################
###Read in computed doses
nocloud_list=np.array(['2bar_250K_1ppbso2_0h2s','2bar_250K_10ppbso2_0h2s','2bar_250K_100ppbso2_0h2s', '2bar_250K_1ppmso2_0h2s', '2bar_250K_10ppmso2_0h2s']) #list of pSO2 for pCO2=2
cloud_list=np.array(['0.02bar_250K_100ppbso2_0h2s','0.02bar_250K_1ppmso2_0h2s','0.02bar_250K_10ppmso2_0h2s', '0.02bar_250K_100ppmso2_0h2s', '0.02bar_250K_1000ppmso2_0h2s']) #list of pSO2 for pCO2=0.02
pso2_axis=np.array([2.e-9, 2.e-8, 2.e-7, 2.e-6, 2.e-5]) #pSO2 in bar
titles_list=np.array([r'pCO$_2$=2 bar, $\tau_{cloud}=0$',r'pCO$_2$=0.02 bar, $\tau_{cloud}=1000$ (unscaled)'])
num_cases=len(titles_list)
num_so2=len(nocloud_list)
dose_100_165=np.zeros([num_so2,num_cases]) #surface radiance integrated 100-165 nm
dose_200_300=np.zeros([num_so2,num_cases]) #surface radiance integrated 200-300 nm
dose_ump_193=np.zeros([num_so2,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=193
dose_ump_230=np.zeros([num_so2,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=230
dose_ump_254=np.zeros([num_so2,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=254
dose_cucn3_254=np.zeros([num_so2,num_cases]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=254
dose_cucn3_300=np.zeros([num_so2,num_cases]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=300
for ind in range(0, num_so2):
dose_100_165[ind,0],dose_200_300[ind,0],dose_ump_193[ind,0],dose_ump_230[ind,0],dose_ump_254[ind,0],dose_cucn3_254[ind,0],dose_cucn3_300[ind,0]=np.genfromtxt('./DoseRates/dose_rates_volcanicmars_'+nocloud_list[ind]+'_z=0_A=desert_noTD_noDS_noparticles.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
dose_100_165[ind,1],dose_200_300[ind,1],dose_ump_193[ind,1],dose_ump_230[ind,1],dose_ump_254[ind,1],dose_cucn3_254[ind,1],dose_cucn3_300[ind,1]=
|
np.genfromtxt('./DoseRates/dose_rates_volcanicmars_'+cloud_list[ind]+'_z=0_A=desert_noTD_DS_co2cloudod=1000_z=20.5_reff=10.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
|
numpy.genfromtxt
|
from copy import deepcopy
from typing import Union
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
from hypothesis import given, assume
from numpy.testing import assert_array_equal
from mygrad import Tensor, astensor
from mygrad.tensor_creation.funcs import (
arange,
empty,
empty_like,
eye,
full,
full_like,
geomspace,
identity,
linspace,
logspace,
ones,
ones_like,
zeros,
zeros_like,
)
from tests.custom_strategies import tensors
def check_tensor_array(tensor, array, constant):
assert isinstance(tensor, Tensor)
assert_array_equal(tensor.data, array)
assert tensor.dtype is array.dtype
assert tensor.constant is constant
@given(constant=st.booleans(), dtype=st.sampled_from((np.int32, np.float64)))
def test_all_tensor_creation(constant, dtype):
x = np.array([1, 2, 3])
e = empty((3, 2), dtype=dtype, constant=constant)
assert e.shape == (3, 2)
assert e.constant is constant
e = empty_like(e, dtype=dtype, constant=constant)
assert e.shape == (3, 2)
assert e.constant is constant
check_tensor_array(
eye(3, dtype=dtype, constant=constant), np.eye(3, dtype=dtype), constant
)
check_tensor_array(
identity(3, dtype=dtype, constant=constant),
np.identity(3, dtype=dtype),
constant,
)
check_tensor_array(
ones((4, 5, 6), dtype=dtype, constant=constant),
np.ones((4, 5, 6), dtype=dtype),
constant,
)
check_tensor_array(
ones_like(x, dtype=dtype, constant=constant),
np.ones_like(x, dtype=dtype),
constant,
)
check_tensor_array(
ones_like(Tensor(x), dtype=dtype, constant=constant),
np.ones_like(x, dtype=dtype),
constant,
)
check_tensor_array(
zeros((4, 5, 6), dtype=dtype, constant=constant),
np.zeros((4, 5, 6), dtype=dtype),
constant,
)
check_tensor_array(
zeros_like(x, dtype=dtype, constant=constant),
np.zeros_like(x, dtype=dtype),
constant,
)
check_tensor_array(
zeros_like(Tensor(x), dtype=dtype, constant=constant),
np.zeros_like(x, dtype=dtype),
constant,
)
check_tensor_array(
full((4, 5, 6), 5.0, dtype=dtype, constant=constant),
np.full((4, 5, 6), 5.0, dtype=dtype),
constant,
)
check_tensor_array(
full_like(x, 5.0, dtype=dtype, constant=constant),
np.full_like(x, 5.0, dtype=dtype),
constant,
)
check_tensor_array(
full_like(Tensor(x), 5.0, dtype=dtype, constant=constant),
np.full_like(x, 5.0, dtype=dtype),
constant,
)
check_tensor_array(
arange(3, 7, dtype=dtype, constant=constant),
np.arange(3, 7, dtype=dtype),
constant,
)
check_tensor_array(
linspace(3, 7, dtype=dtype, constant=constant),
np.linspace(3, 7, dtype=dtype),
constant,
)
check_tensor_array(
logspace(3, 7, dtype=dtype, constant=constant),
np.logspace(3, 7, dtype=dtype),
constant,
)
check_tensor_array(
geomspace(3, 7, dtype=dtype, constant=constant),
np.geomspace(3, 7, dtype=dtype),
constant,
)
@given(
t=tensors(dtype=hnp.floating_dtypes(), include_grad=st.booleans()),
in_graph=st.booleans(),
)
def test_astensor_returns_tensor_reference_consistently(t: Tensor, in_graph: bool):
if in_graph:
t = +t
assert astensor(t) is t
assert astensor(t).grad is t.grad
assert astensor(t).creator is t.creator
assert astensor(t, dtype=t.dtype) is t
assert astensor(t, constant=t.constant) is t
assert astensor(t, dtype=t.dtype, constant=t.constant) is t
@given(
t=tensors(dtype=hnp.floating_dtypes(), include_grad=st.booleans()),
in_graph=st.booleans(),
)
def test_astensor_with_incompat_constant_still_passes_array_ref(
t: Tensor, in_graph: bool
):
if in_graph:
t = +t
t2 = astensor(t, constant=not t.constant)
assert t2 is not t
assert t2.data is t.data
assert t2.creator is None
t2 = astensor(t, dtype=t.dtype, constant=not t.constant)
assert t2 is not t
assert t2.data is t.data
assert t2.creator is None
@given(
t=tensors(dtype=hnp.floating_dtypes(), include_grad=st.booleans()),
in_graph=st.booleans(),
dtype=st.none() | hnp.floating_dtypes(),
constant=st.none() | st.booleans(),
)
def test_astensor_doesnt_mutate_input_tensor(
t: Tensor, in_graph: bool, dtype, constant: bool
):
if in_graph:
t = +t
o_constant = t.constant
o_creator = t.creator
o_data = t.data.copy()
o_grad = None if t.grad is None else t.grad.copy()
astensor(t, dtype=dtype, constant=constant)
assert t.constant is o_constant
assert t.creator is o_creator
assert_array_equal(t, o_data)
if o_grad is not None:
assert_array_equal(t.grad, o_grad)
else:
assert t.grad is None
@given(
a=hnp.arrays(
shape=hnp.array_shapes(min_dims=0, min_side=0),
dtype=hnp.integer_dtypes() | hnp.floating_dtypes(),
)
| tensors(dtype=hnp.floating_dtypes(), include_grad=st.booleans()),
as_list=st.booleans(),
dtype=st.none() | hnp.floating_dtypes(),
constant=st.none() | st.booleans(),
)
def test_as_tensor(a: Union[np.ndarray, Tensor], as_list: bool, dtype, constant: bool):
"""Ensures `astensor` produces a tensor with the expected data, dtype, and constant,
and that it doesn't mutate the input."""
assume(~np.any(
|
np.isnan(a)
|
numpy.isnan
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from os.path import join, dirname
sys.path.insert(0, join(dirname(__file__), '..'))
import simulator
simulator.load('/home/wang/CARLA_0.9.9.4')
import carla
sys.path.append('/home/wang/CARLA_0.9.9.4/PythonAPI/carla')
from agents.navigation.basic_agent import BasicAgent
from simulator import config, set_weather, add_vehicle
from simulator.sensor_manager import SensorManager
from utils.navigator_sim import get_random_destination, get_map, get_nav, replan, close2dest
#from learning.models import GeneratorUNet
from learning.path_model import Model_COS_Img
from ff_collect_pm_data import sensor_dict
from ff.collect_ipm import InversePerspectiveMapping
from ff.carla_sensor import Sensor, CarlaSensorMaster
import os
import cv2
import time
import copy
import threading
import random
import argparse
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from datetime import datetime
import torch
from torch.autograd import grad
import torchvision.transforms as transforms
global_img = None
global_pcd = None
global_nav = None
global_v0 = 0.
global_vel = 0.
global_plan_time = 0.
global_trajectory = None
start_control = False
global_vehicle = None
global_plan_map = None
global_cost_map = None
global_transform = None
max_steer_angle = 0.
draw_cost_map = None
MAX_SPEED = 30
img_height = 128
img_width = 256
longitudinal_length = 25.0 # [m]
random.seed(datetime.now())
torch.manual_seed(999)
torch.cuda.manual_seed(999)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
#generator = GeneratorUNet()
#generator = generator.to(device)
#generator.load_state_dict(torch.load('../ckpt/sim-obs/g.pth'))
model = Model_COS_Img().to(device)
model.load_state_dict(torch.load('result/saved_models/img-input-01/model_42000.pth'))
model.eval()
parser = argparse.ArgumentParser(description='Params')
parser.add_argument('-d', '--data', type=int, default=1, help='data index')
parser.add_argument('-n', '--num', type=int, default=100000, help='total number')
parser.add_argument('--width', type=int, default=400, help='image width')
parser.add_argument('--height', type=int, default=200, help='image height')
parser.add_argument('--max_dist', type=float, default=20., help='max distance')
parser.add_argument('--max_t', type=float, default=5., help='max time')
parser.add_argument('--scale', type=float, default=25., help='longitudinal length')
parser.add_argument('--dt', type=float, default=0.005, help='discretization minimum time interval')
args = parser.parse_args()
data_index = args.data
save_path = '/media/wang/DATASET/CARLA/town01/'+str(data_index)+'/'
img_transforms = [
transforms.Resize((200, 400)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
img_trans = transforms.Compose(img_transforms)
cost_map_transforms_ = [ transforms.Resize((200, 400), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5), (0.5))
]
cost_map_trans = transforms.Compose(cost_map_transforms_)
class Param(object):
def __init__(self):
self.longitudinal_length = longitudinal_length
self.ksize = 21
param = Param()
sensor = Sensor(sensor_dict['camera']['transform'], config['camera'])
sensor_master = CarlaSensorMaster(sensor, sensor_dict['camera']['transform'], binded=True)
inverse_perspective_mapping = InversePerspectiveMapping(param, sensor_master)
def get_cost_map(img, point_cloud):
img2 = np.zeros((args.height, args.width), np.uint8)
img2.fill(255)
pixs_per_meter = args.height/longitudinal_length
u = (args.height-point_cloud[0]*pixs_per_meter).astype(int)
v = (-point_cloud[1]*pixs_per_meter+args.width//2).astype(int)
mask = np.where((u >= 0)&(u < args.height))[0]
u = u[mask]
v = v[mask]
mask = np.where((v >= 0)&(v < args.width))[0]
u = u[mask]
v = v[mask]
img2[u,v] = 0
kernel = np.ones((17,17),np.uint8)
img2 = cv2.erode(img2,kernel,iterations = 1)
img = cv2.addWeighted(img,0.7,img2,0.3,0)
kernel_size = (17, 17)
sigma = 21
img = cv2.GaussianBlur(img, kernel_size, sigma);
return img
def xy2uv(x, y):
pixs_per_meter = args.height/args.scale
u = (args.height-x*pixs_per_meter).astype(int)
v = (y*pixs_per_meter+args.width//2).astype(int)
return u, v
def mkdir(path):
os.makedirs(save_path+path, exist_ok=True)
def image_callback(data):
global global_img, global_plan_time, global_vehicle, global_plan_map,global_nav, global_transform, global_v0
global_plan_time = time.time()
array = np.frombuffer(data.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (data.height, data.width, 4)) # RGBA format
global_img = array
global_transform = global_vehicle.get_transform()
try:
global_nav = get_nav(global_vehicle, global_plan_map)
v = global_vehicle.get_velocity()
global_v0 = np.sqrt(v.x**2+v.y**2+v.z**2)
except:
pass
def lidar_callback(data):
global global_pcd
lidar_data = np.frombuffer(data.raw_data, dtype=np.float32).reshape([-1, 3])
point_cloud = np.stack([-lidar_data[:,1], -lidar_data[:,0], -lidar_data[:,2]])
mask = np.where(point_cloud[2] > -2.3)[0]
point_cloud = point_cloud[:, mask]
global_pcd = point_cloud
def get_control(x, y, vx, vy):
global global_vel
control = carla.VehicleControl()
control.manual_gear_shift = True
control.gear = 1
v_target = np.sqrt(vx**2+vy**2)
yaw = np.arctan2(vy, vx)
theta = np.arctan2(y, x)
dist = np.sqrt(x**2+y**2)
e = dist*np.sin(yaw-theta)
K = 0.01
Ks = 10.0
Kv = 2.0
tan_input = K*e/(Ks + global_vel)
yaw_target = yaw + np.tan(tan_input)
control.throttle = np.clip(0.7 + Kv*(v_target-global_vel), 0., 1.)
control.steer = np.clip(0.35*yaw_target, -1., 1.)
return control
def add_alpha_channel(img):
b_channel, g_channel, r_channel = cv2.split(img)
alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255
alpha_channel[:, :int(b_channel.shape[0] / 2)] = 100
img_BGRA = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
return img_BGRA
cnt = 0
def visualize(img):
#print(costmap.shape, nav.shape, img.shape)
global global_vel, cnt
#costmap = cv2.cvtColor(costmap,cv2.COLOR_GRAY2RGB)
text = "speed: "+str(round(3.6*global_vel, 1))+' km/h'
cv2.putText(img, text, (20, 30), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 2)
#nav = cv2.resize(nav, (240, 200), interpolation=cv2.INTER_CUBIC)
#down_img = np.hstack([costmap, nav])
#down_img = add_alpha_channel(down_img)
#show_img = np.vstack([img, down_img])
cv2.imshow('Result', img)
#cv2.imwrite('result/images/nt-nv-dw/'+str(cnt)+'.png', show_img)
cv2.waitKey(10)
cnt += 1
def draw_traj(cost_map, output):
cost_map = Image.fromarray(cost_map).convert("RGB")
draw = ImageDraw.Draw(cost_map)
result = output.data.cpu().numpy()
x = args.max_dist*result[:,0]
y = args.max_dist*result[:,1]
u, v = xy2uv(x, y)
for i in range(len(u)-1):
draw.line((v[i], u[i], v[i+1], u[i+1]), 'red')
draw.line((v[i]+1, u[i], v[i+1]+1, u[i+1]), 'red')
draw.line((v[i]-1, u[i], v[i+1]-1, u[i+1]), 'red')
return cost_map
def get_traj(plan_time):
global global_v0, draw_cost_map, global_img, global_nav
#img = Image.fromarray(cv2.cvtColor(cost_map,cv2.COLOR_BGR2RGB)).convert('L')
#trans_img = cost_map_trans(img)
img = Image.fromarray(cv2.cvtColor(global_img,cv2.COLOR_BGR2RGB)).convert('RGB')
nav = Image.fromarray(cv2.cvtColor(global_nav,cv2.COLOR_BGR2RGB)).convert('RGB')
img = img_trans(img)
nav = img_trans(nav)
trans_img = torch.cat((img, nav), 0)
t = torch.arange(0, 0.9, args.dt).unsqueeze(1).to(device)
t.requires_grad = True
img = trans_img.expand(len(t),6,args.height, args.width)
img = img.to(device)
img.requires_grad = True
v_0 = torch.FloatTensor([global_v0]).expand(len(t),1)
v_0 = v_0.to(device)
output = model(img, t, v_0)
vx = grad(output[:,0].sum(), t, create_graph=True)[0][:,0]*(args.max_dist/args.max_t)
vy = grad(output[:,1].sum(), t, create_graph=True)[0][:,0]*(args.max_dist/args.max_t)
ax = grad(vx.sum(), t, create_graph=True)[0][:,0]/args.max_t
ay = grad(vy.sum(), t, create_graph=True)[0][:,0]/args.max_t
x = output[:,0]*args.max_dist
y = output[:,1]*args.max_dist
# draw
#draw_cost_map = draw_traj(cost_map, output)
vx = vx.data.cpu().numpy()
vy = vy.data.cpu().numpy()
x = x.data.cpu().numpy()
y = y.data.cpu().numpy()
ax = ax.data.cpu().numpy()
ay = ay.data.cpu().numpy()
trajectory = {'time':plan_time, 'x':x, 'y':y, 'vx':vx, 'vy':vy, 'ax':ax, 'ay':ay}
return trajectory
def make_plan():
global global_img, global_nav, global_pcd, global_plan_time, global_trajectory,start_control, global_cost_map
while True:
plan_time = global_plan_time
# 1. get cGAN result
#result = get_cGAN_result(global_img, global_nav)
# 2. inverse perspective mapping and get costmap
#img = copy.deepcopy(global_img)
#mask = np.where(result > 200)
#img[mask[0],mask[1]] = (255, 0, 0, 255)
#ipm_image = inverse_perspective_mapping.getIPM(result)
#cost_map = get_cost_map(ipm_image, global_pcd)
# 3. get trajectory
trajectory = get_traj(plan_time)
#time.sleep(0.5)
global_trajectory = trajectory
#global_cost_map = cost_map
if not start_control:
start_control = True
def get_transform(transform, org_transform):
x = transform.location.x
y = transform.location.y
yaw = transform.rotation.yaw
x0 = org_transform.location.x
y0 = org_transform.location.y
yaw0 = org_transform.rotation.yaw
dx = x - x0
dy = y - y0
dyaw = yaw - yaw0
return dx, dy, dyaw
def get_new_control(x, y, vx, vy, ax, ay):
global global_vel, max_steer_angle, global_a
Kx = 0.1
Kv = 3.0
Ky = 1.2e-2
K_theta = 0.005
control = carla.VehicleControl()
control.manual_gear_shift = True
control.gear = 1
v_r = np.sqrt(vx**2+vy**2)
yaw = np.arctan2(vy, vx)
theta_e = yaw
#k = (vx*ay-vy*ax)/(v_r**3)
w_r = (vx*ay-vy*ax)/(v_r**2)
theta = np.arctan2(y, x)
dist = np.sqrt(x**2+y**2)
y_e = dist*np.sin(yaw-theta)
x_e = dist*np.cos(yaw-theta)
v_e = v_r - global_vel
####################################
#v = v_r*np.cos(theta_e) + Kx*x_e
w = w_r + v_r*(Ky*y_e + K_theta*np.sin(theta_e))
steer_angle = np.arctan(w*2.405/global_vel)
steer = steer_angle/max_steer_angle
#####################################
throttle = Kx*x_e + Kv*v_e
#throttle = 0.7 +(Kx*x_e + Kv*v_e)*0.06
#throttle = Kx*x_e + Kv*v_e + global_a
control.brake = 0.0
if throttle > 0:
control.throttle = np.clip(throttle, 0., 1.)
else:
control.brake = np.clip(-0.05*throttle, 0., 1.)
control.steer =
|
np.clip(steer, -1., 1.)
|
numpy.clip
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from collections import deque
import itertools
import logging
from multiprocessing import (
Manager,
Process,
)
import os
import random
import numpy as np
import pytoml as toml
import six
from six.moves import input as raw_input
from tqdm import tqdm
from .config import CONFIG
from . import preprocessing
from .training import augment_subvolume_generator
from .util import (
get_color_shader,
Roundrobin,
WrappedViewer,
)
from .volumes import (
HDF5Volume,
partition_volumes,
SubvolumeBounds,
)
from .regions import Region
def generate_subvolume_bounds(filename, volumes, num_bounds, sparse=False, moves=None):
if '{volume}' not in filename:
raise ValueError('CSV filename must contain "{volume}" for volume name replacement.')
if moves is None:
moves = 5
else:
moves = np.asarray(moves)
subv_shape = CONFIG.model.input_fov_shape + CONFIG.model.move_step * 2 * moves
if sparse:
gen_kwargs = {'sparse_margin': subv_shape}
else:
gen_kwargs = {'shape': subv_shape}
for k, v in six.iteritems(volumes):
bounds = v.downsample(CONFIG.volume.resolution)\
.subvolume_bounds_generator(**gen_kwargs)
bounds = itertools.islice(bounds, num_bounds)
SubvolumeBounds.iterable_to_csv(bounds, filename.format(volume=k))
def fill_volume_with_model(
model_file,
volume,
resume_prediction=None,
checkpoint_filename=None,
checkpoint_label_interval=20,
seed_generator='sobel',
background_label_id=0,
bias=True,
move_batch_size=1,
max_moves=None,
max_bodies=None,
num_workers=CONFIG.training.num_gpus,
worker_prequeue=1,
filter_seeds_by_mask=True,
reject_non_seed_components=True,
reject_early_termination=False,
remask_interval=None,
shuffle_seeds=True):
subvolume = volume.get_subvolume(SubvolumeBounds(start=np.zeros(3, dtype=np.int64), stop=volume.shape))
# Create an output label volume.
if resume_prediction is None:
prediction = np.full_like(subvolume.image, background_label_id, dtype=np.uint64)
label_id = 0
else:
if resume_prediction.shape != subvolume.image.shape:
raise ValueError('Resume volume prediction is wrong shape.')
prediction = resume_prediction
prediction.flags.writeable = True
label_id = prediction.max()
# Create a conflict count volume that tracks locations where segmented
# bodies overlap. For now the first body takes precedence in the
# predicted labels.
conflict_count = np.full_like(prediction, 0, dtype=np.uint32)
def worker(worker_id, set_devices, model_file, image, seeds, results, lock, revoked):
lock.acquire()
import tensorflow as tf
if set_devices:
# Only make one GPU visible to Tensorflow so that it does not allocate
# all available memory on all devices.
# See: https://stackoverflow.com/questions/37893755
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(worker_id)
with tf.device('/gpu:0'):
# Late import to avoid Keras import until TF bindings are set.
from .network import load_model
logging.debug('Worker %s: loading model', worker_id)
model = load_model(model_file, CONFIG.network)
lock.release()
def is_revoked(test_seed):
ret = False
lock.acquire()
if tuple(test_seed) in revoked:
ret = True
revoked.remove(tuple(test_seed))
lock.release()
return ret
while True:
seed = seeds.get(True)
if not isinstance(seed, np.ndarray):
logging.debug('Worker %s: got DONE', worker_id)
break
if is_revoked(seed):
results.put((seed, None))
continue
def stopping_callback(region):
stop = is_revoked(seed)
if reject_non_seed_components and \
region.bias_against_merge and \
region.mask[tuple(region.seed_vox)] < 0.5:
stop = True
return stop
logging.debug('Worker %s: got seed %s', worker_id, np.array_str(seed))
# Flood-fill and get resulting mask.
# Allow reading outside the image volume bounds to allow segmentation
# to fill all the way to the boundary.
region = Region(image, seed_vox=seed, sparse_mask=True, block_padding='reflect')
region.bias_against_merge = bias
early_termination = False
try:
six.next(region.fill(
model,
move_batch_size=move_batch_size,
max_moves=max_moves,
progress=2 + worker_id,
stopping_callback=stopping_callback,
remask_interval=remask_interval))
except Region.EarlyFillTermination:
early_termination = True
except StopIteration:
pass
if reject_early_termination and early_termination:
body = None
else:
body = region.to_body()
logging.debug('Worker %s: seed %s filled', worker_id, np.array_str(seed))
results.put((seed, body))
# Generate seeds from volume.
generator = preprocessing.SEED_GENERATORS[seed_generator]
seeds = generator(subvolume.image, CONFIG.volume.resolution)
if filter_seeds_by_mask and volume.mask_data is not None:
seeds = [s for s in seeds if volume.mask_data[tuple(volume.world_coord_to_local(s))]]
pbar = tqdm(desc='Seed queue', total=len(seeds), miniters=1, smoothing=0.0)
label_pbar = tqdm(desc='Labeled vox', total=prediction.size, miniters=1, smoothing=0.0, position=1)
num_seeds = len(seeds)
if shuffle_seeds:
random.shuffle(seeds)
seeds = iter(seeds)
manager = Manager()
# Queue of seeds to be picked up by workers.
seed_queue = manager.Queue()
# Queue of results from workers.
results_queue = manager.Queue()
# Dequeue of seeds that were put in seed_queue but have not yet been
# combined by the main process.
dispatched_seeds = deque()
# Seeds that were placed in seed_queue but subsequently covered by other
# results before their results have been processed. This allows workers to
# abort working on these seeds by checking this list.
revoked_seeds = manager.list()
# Results that have been received by the main process but have not yet
# been combined because they were not received in the dispatch order.
unordered_results = {}
def queue_next_seed():
total = 0
for seed in seeds:
if prediction[seed[0], seed[1], seed[2]] != background_label_id:
# This seed has already been filled.
total += 1
continue
dispatched_seeds.append(seed)
seed_queue.put(seed)
break
return total
for _ in range(min(num_seeds, num_workers * worker_prequeue)):
processed_seeds = queue_next_seed()
pbar.update(processed_seeds)
if 'CUDA_VISIBLE_DEVICES' in os.environ:
set_devices = False
num_workers = 1
logging.warn('Environment variable CUDA_VISIBLE_DEVICES is set, so only one worker can be used.\n'
'See https://github.com/aschampion/diluvian/issues/11')
else:
set_devices = True
workers = []
loading_lock = manager.Lock()
for worker_id in range(num_workers):
w = Process(target=worker, args=(worker_id, set_devices, model_file, subvolume.image,
seed_queue, results_queue, loading_lock, revoked_seeds))
w.start()
workers.append(w)
last_checkpoint_label = label_id
# For each seed, create region, fill, threshold, and merge to output volume.
while dispatched_seeds:
processed_seeds = 1
expected_seed = dispatched_seeds.popleft()
logging.debug('Expecting seed %s', np.array_str(expected_seed))
if tuple(expected_seed) in unordered_results:
logging.debug('Expected seed %s is in old results', np.array_str(expected_seed))
seed = expected_seed
body = unordered_results[tuple(seed)]
del unordered_results[tuple(seed)]
else:
seed, body = results_queue.get(True)
processed_seeds += queue_next_seed()
while not np.array_equal(seed, expected_seed):
logging.debug('Seed %s is early, stashing',
|
np.array_str(seed)
|
numpy.array_str
|
import os
import cv2
import skimage
import numpy as np
import torch
import torch.nn as nn
import torchvision as tv
import math
import network
# ----------------------------------------
# Network
# ----------------------------------------
def create_generator(opt):
# Initialize the network
generator = network.KPN(opt.color, opt.burst_length, opt.blind_est, opt.kernel_size, opt.sep_conv, \
opt.channel_att, opt.spatial_att, opt.upMode, opt.core_bias)
if opt.load_name == '':
# Init the network
network.weights_init(generator, init_type = opt.init_type, init_gain = opt.init_gain)
print('Generator is created!')
else:
# Load a pre-trained network
pretrained_net = torch.load(opt.load_name)
load_dict(generator, pretrained_net)
print('Generator is loaded!')
return generator
def load_dict(process_net, pretrained_net):
# Get the dict from pre-trained network
pretrained_dict = pretrained_net
# Get the dict from processing network
process_dict = process_net.state_dict()
# Delete the extra keys of pretrained_dict that do not belong to process_dict
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in process_dict}
# Update process_dict using pretrained_dict
process_dict.update(pretrained_dict)
# Load the updated dict to processing network
process_net.load_state_dict(process_dict)
return process_net
# ----------------------------------------
# Validation and Sample at training
# ----------------------------------------
def save_sample_png(sample_folder, sample_name, img_list, name_list, pixel_max_cnt = 255, height = -1, width = -1):
# Save image one-by-one
for i in range(len(img_list)):
img = img_list[i]
# Recover normalization
img = img * 255.0
# Process img_copy and do not destroy the data of img
#print(img.size())
img_copy = img.clone().data.permute(0, 2, 3, 1).cpu().numpy()
img_copy = np.clip(img_copy, 0, pixel_max_cnt)
img_copy = img_copy.astype(np.uint8)[0, :, :, :]
img_copy = cv2.cvtColor(img_copy, cv2.COLOR_BGR2RGB)
if (height != -1) and (width != -1):
img_copy = cv2.resize(img_copy, (width, height))
# Save to certain path
save_img_name = sample_name + '_' + name_list[i] + '.png'
save_img_path = os.path.join(sample_folder, save_img_name)
cv2.imwrite(save_img_path, img_copy)
def save_sample_png_test(sample_folder, sample_name, img_list, name_list, pixel_max_cnt = 255):
# Save image one-by-one
for i in range(len(img_list)):
img = img_list[i]
# Recover normalization
img = img * 255.0
# Process img_copy and do not destroy the data of img
img_copy = img.clone().data.permute(0, 2, 3, 1).cpu().numpy()
img_copy = np.clip(img_copy, 0, pixel_max_cnt)
img_copy = img_copy.astype(np.uint8)[0, :, :, :]
img_copy = img_copy.astype(np.float32)
img_copy = cv2.cvtColor(img_copy, cv2.COLOR_BGR2RGB)
# Save to certain path
save_img_name = sample_name + '_' + name_list[i] + '.png'
save_img_path = os.path.join(sample_folder, save_img_name)
cv2.imwrite(save_img_path, img_copy)
def recover_process(img, height = -1, width = -1):
img = img * 255.0
img_copy = img.clone().data.permute(0, 2, 3, 1).cpu().numpy()
img_copy = np.clip(img_copy, 0, 255)
img_copy = img_copy.astype(np.uint8)[0, :, :, :]
img_copy = img_copy.astype(np.float32)
img_copy = cv2.cvtColor(img_copy, cv2.COLOR_BGR2RGB)
if (height != -1) and (width != -1):
img_copy = cv2.resize(img_copy, (width, height))
return img_copy
def psnr(pred, target):
#print(pred.shape)
#print(target.shape)
mse = np.mean( (pred - target) ** 2 )
if mse == 0:
return 100
PIXEL_MAX = 255.0
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
'''
def psnr(pred, target, pixel_max_cnt = 255):
mse = torch.mul(target - pred, target - pred)
rmse_avg = (torch.mean(mse).item()) ** 0.5
p = 20 * np.log10(pixel_max_cnt / rmse_avg)
return p
'''
def grey_psnr(pred, target, pixel_max_cnt = 255):
pred = torch.sum(pred, dim = 0)
target = torch.sum(target, dim = 0)
mse = torch.mul(target - pred, target - pred)
rmse_avg = (torch.mean(mse).item()) ** 0.5
p = 20 * np.log10(pixel_max_cnt * 3 / rmse_avg)
return p
def ssim(pred, target):
pred = pred.clone().data.permute(0, 2, 3, 1).cpu().numpy()
target = target.clone().data.permute(0, 2, 3, 1).cpu().numpy()
target = target[0]
pred = pred[0]
ssim = skimage.measure.compare_ssim(target, pred, multichannel = True)
return ssim
# ----------------------------------------
# PATH processing
# ----------------------------------------
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def savetxt(name, loss_log):
np_loss_log = np.array(loss_log)
|
np.savetxt(name, np_loss_log)
|
numpy.savetxt
|
__Author__ = "<NAME>"
__Email__ = "<EMAIL>"
import csv
import itertools
import operator
import numpy as np
import nltk
import sys
from datetime import datetime
import matplotlib.pyplot as plt
import timeit
import time
from pyspark.sql import functions as F
from pyspark.sql import SQLContext, Row, SparkSession
from pyspark.sql.functions import udf,struct
from pyspark import SparkContext, StorageLevel, SparkConf
from pyspark.sql.types import IntegerType, TimestampType, StringType,DoubleType,StructType,StructField,DateType,DataType,BooleanType,LongType
from utils.utils import *
from model.rnn_numpy import RNNNumpy
class CalcEngine(object):
"""
calculation engine to preprocess training data and train models in parallel by PySpark
"""
def __init__(self):
self.vocabulary_size = 8000
self.unknown_token = "UNKNOWN_TOKEN"
self.sentence_start_token = "SENTENCE_START"
self.sentence_end_token = "SENTENCE_END"
def preprocess(self,input_file):
# Read the data by nltk package and append SENTENCE_START and SENTENCE_END tokens
print("Reading CSV file...")
with open(input_file, 'r') as f:
reader = csv.reader(f, skipinitialspace=True)
next(reader)
# Split full comments into sentences
sentences = itertools.chain(*[nltk.sent_tokenize(x[0].lower()) for x in reader])
# Append SENTENCE_START and SENTENCE_END
sentences = ["%s %s %s" % (self.sentence_start_token, x, self.sentence_end_token) for x in sentences]
print("Parsed %d sentences." % (len(sentences)))
# Tokenize the sentences into words
tokenized_sentences = [nltk.word_tokenize(sent) for sent in sentences]
# Count the word frequencies
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
print("Found %d unique words tokens." % len(word_freq.items()))
# Get the most common words and build index_to_word and word_to_index vectors
vocab = word_freq.most_common(self.vocabulary_size-1)
index_to_word = [x[0] for x in vocab]
index_to_word.append(self.unknown_token)
word_to_index = dict([(w,i) for i,w in enumerate(index_to_word)])
print("Using vocabulary size %d." % self.vocabulary_size)
print("The least frequent word in our vocabulary is '%s' and appeared %d times." % (vocab[-1][0], vocab[-1][1]))
# Replace all words not in our vocabulary with the unknown token
for i, sent in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else self.unknown_token for w in sent]
print("\nExample sentence: '%s'" % sentences[0])
# Create the training data
X_train = np.asarray([[word_to_index[w] for w in sent[:-1]] for sent in tokenized_sentences])
y_train = np.asarray([[word_to_index[w] for w in sent[1:]] for sent in tokenized_sentences])
print("\nExample sentence after Pre-processing: '%s'" % tokenized_sentences[0])
# Print an training data example
# x_example, y_example = X_train[17], y_train[17]
# print("x:\n%s\n%s" % (" ".join([index_to_word[x] for x in x_example]), x_example))
# print("\ny:\n%s\n%s" % (" ".join([index_to_word[x] for x in y_example]), y_example))
return [X_train,y_train]
def distributed_training(self,X_train, y_train,vocabulary_size,num_core=4,rate=0.005):
"""
train rnn in parallel by Spark 2.2
:param X_train: input training set
:param y_train: target in training set
:param vocabulary_size: size of vocabulary only frequent words will be modelled
:param num_core: number of core to run in parallel default is 4 for standalone PC. It needs to change for EMR clusters.
:param rate: learng rate default at 0.005
:return: model parameters U,V,W matrix with minimum loss
"""
global spark_context
learning_rate = [ rate*np.random.random() for i in range(num_core)]
print(learning_rate)
learning_rate_rdd = spark_context.parallelize(learning_rate)
#run map-reduce to find model parameters with mininum loss in training data
result = learning_rate_rdd.map(lambda rate:train_model(rate,vocabulary_size,X_train,y_train)).reduceByKey(min)
return result
def train_model(rate,vocabulary_size,X_train,y_train):
"""
work in Spark map-reduce framework
:param rate: learning rate, different worker has different starting learning rate. I tried to find global optimal paramers by differing learning rate
:param vocabulary_size: size of the vocabulary
:param X_train: input of training set which is the same across workers
:param y_train: target of training set which is the same across workers
:return: locally optimized parameters in each worker
"""
model = RNNNumpy(vocabulary_size)
loss = model.train_with_sgd(X_train, y_train, rate)
return {loss[-1]:model.get_parameter()}
def initialize_spark():
"""
In Spark 2.2, Spark session is recommended(for Spark SQL dataframe operations).
:return:
"""
global debug
global spark_context
global spark_session
global sql_context
global log4j_logger
global logger
spark_session = SparkSession \
.builder \
.appName("tnn-distributed-training") \
.getOrCreate()
sql_context = SQLContext(spark_session.sparkContext)
spark_context = spark_session.sparkContext
log4j_logger = spark_context._jvm.org.apache.log4j
logger = log4j_logger.LogManager.getLogger(__name__)
log('SPARK INITIALIZED')
def log(s):
global log_enabled
if log_enabled:
logger.warn(s)
# if __name__ == "__main__":
#set global variables
debug = False
spark_context = None
spark_session = None
sql_context = None
log4j_logger = None
logger = None
log_enabled = False
#initialize Spark and calculation engine
initialize_spark()
my_engine = CalcEngine()
X_train,y_train = my_engine.preprocess('data/reddit-comments-2015-08.csv')
vocabulary_size=8000
#randomly set model parameters
np.random.seed(10)
model = RNNNumpy(vocabulary_size)
o, s = model.forward_propagation(X_train[10])
print(o.shape)
print(o)
predictions = model.predict(X_train[10])
print(predictions.shape)
print(predictions)
# Limit to 1000 examples to save time
print("Expected Loss for random predictions: %f" % np.log(vocabulary_size))
print("Actual loss: %f" % model.calculate_loss(X_train[:1000], y_train[:1000]))
# To avoid performing millions of expensive calculations we use a smaller vocabulary size for checking.
grad_check_vocab_size = 100
np.random.seed(10)
model = RNNNumpy(grad_check_vocab_size, 10, bptt_truncate=1000)
model.gradient_check([0,1,2,3], [1,2,3,4])
# how long will it take to run one single step of sgd
np.random.seed(10)
model = RNNNumpy(vocabulary_size)
start_time = time.time()
model.sgd_step(X_train[10], y_train[10], 0.005)
end_time = time.time()
print("it take {0} seconds to do one step sgd".format(end_time - start_time))
# Train on a small subset of the data to see what happens
|
np.random.seed(10)
|
numpy.random.seed
|
""" Defines objective-function objects """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import time as _time
import numpy as _np
from .verbosityprinter import VerbosityPrinter as _VerbosityPrinter
from .. import optimize as _opt
from ..tools import listtools as _lt
class ObjectiveFunction(object):
pass
#NOTE on chi^2 expressions:
#in general case: chi^2 = sum (p_i-f_i)^2/p_i (for i summed over outcomes)
#in 2-outcome case: chi^2 = (p+ - f+)^2/p+ + (p- - f-)^2/p-
# = (p - f)^2/p + (1-p - (1-f))^2/(1-p)
# = (p - f)^2 * (1/p + 1/(1-p))
# = (p - f)^2 * ( ((1-p) + p)/(p*(1-p)) )
# = 1/(p*(1-p)) * (p - f)^2
class Chi2Function(ObjectiveFunction):
def __init__(self, mdl, evTree, lookup, circuitsToUse, opLabelAliases, regularizeFactor, cptp_penalty_factor,
spam_penalty_factor, cntVecMx, N, minProbClipForWeighting, probClipInterval, wrtBlkSize,
gthrMem, check=False, check_jacobian=False, comm=None, profiler=None, verbosity=0):
from ..tools import slicetools as _slct
self.mdl = mdl
self.evTree = evTree
self.lookup = lookup
self.circuitsToUse = circuitsToUse
self.comm = comm
self.profiler = profiler
self.check = check
self.check_jacobian = check_jacobian
KM = evTree.num_final_elements() # shorthand for combined spam+circuit dimension
vec_gs_len = mdl.num_params()
self.printer = _VerbosityPrinter.build_printer(verbosity, comm)
self.opBasis = mdl.basis
#Compute "extra" (i.e. beyond the (circuit,spamlabel)) rows of jacobian
self.ex = 0
if regularizeFactor != 0:
self.ex = vec_gs_len
else:
if cptp_penalty_factor != 0: self.ex += _cptp_penalty_size(mdl)
if spam_penalty_factor != 0: self.ex += _spam_penalty_size(mdl)
self.KM = KM
self.vec_gs_len = vec_gs_len
self.regularizeFactor = regularizeFactor
self.cptp_penalty_factor = cptp_penalty_factor
self.spam_penalty_factor = spam_penalty_factor
self.minProbClipForWeighting = minProbClipForWeighting
self.probClipInterval = probClipInterval
self.wrtBlkSize = wrtBlkSize
self.gthrMem = gthrMem
# Allocate peristent memory
# (must be AFTER possible operation sequence permutation by
# tree and initialization of dsCircuitsToUse)
self.probs = _np.empty(KM, 'd')
self.jac = _np.empty((KM + self.ex, vec_gs_len), 'd')
#Detect omitted frequences (assumed to be 0) so we can compute chi2 correctly
self.firsts = []; self.indicesOfCircuitsWithOmittedData = []
for i, c in enumerate(circuitsToUse):
lklen = _slct.length(lookup[i])
if 0 < lklen < mdl.get_num_outcomes(c):
self.firsts.append(_slct.as_array(lookup[i])[0])
self.indicesOfCircuitsWithOmittedData.append(i)
if len(self.firsts) > 0:
self.firsts = _np.array(self.firsts, 'i')
self.indicesOfCircuitsWithOmittedData = _np.array(self.indicesOfCircuitsWithOmittedData, 'i')
self.dprobs_omitted_rowsum = _np.empty((len(self.firsts), vec_gs_len), 'd')
self.printer.log("SPARSE DATA: %d of %d rows have sparse data" % (len(self.firsts), len(circuitsToUse)))
else:
self.firsts = None # no omitted probs
self.cntVecMx = cntVecMx
self.N = N
self.f = cntVecMx / N
self.maxCircuitLength = max([len(x) for x in circuitsToUse])
if self.printer.verbosity < 4: # Fast versions of functions
if regularizeFactor == 0 and cptp_penalty_factor == 0 and spam_penalty_factor == 0 \
and mdl.get_simtype() != "termgap":
# Fast un-regularized version
self.fn = self.simple_chi2
self.jfn = self.simple_jac
elif regularizeFactor != 0:
# Fast regularized version
assert(cptp_penalty_factor == 0), "Cannot have regularizeFactor and cptp_penalty_factor != 0"
assert(spam_penalty_factor == 0), "Cannot have regularizeFactor and spam_penalty_factor != 0"
self.fn = self.regularized_chi2
self.jfn = self.regularized_jac
elif mdl.get_simtype() == "termgap":
assert(cptp_penalty_factor == 0), "Cannot have termgap_pentalty_factor and cptp_penalty_factor != 0"
assert(spam_penalty_factor == 0), "Cannot have termgap_pentalty_factor and spam_penalty_factor != 0"
self.fn = self.termgap_chi2
self.jfn = self.simple_jac
else: # cptp_pentalty_factor != 0 and/or spam_pentalty_factor != 0
assert(regularizeFactor == 0), "Cannot have regularizeFactor and other penalty factors > 0"
self.fn = self.penalized_chi2
self.jfn = self.penalized_jac
else: # Verbose (DEBUG) version of objective_func
if mdl.get_simtype() == "termgap":
raise NotImplementedError("Still need to add termgap support to verbose chi2!")
self.fn = self.verbose_chi2
self.jfn = self.verbose_jac
def get_weights(self, p):
cp = _np.clip(p, self.minProbClipForWeighting, 1 - self.minProbClipForWeighting)
return _np.sqrt(self.N / cp) # nSpamLabels x nCircuits array (K x M)
def get_dweights(self, p, wts): # derivative of weights w.r.t. p
cp = _np.clip(p, self.minProbClipForWeighting, 1 - self.minProbClipForWeighting)
dw = -0.5 * wts / cp # nSpamLabels x nCircuits array (K x M)
dw[_np.logical_or(p < self.minProbClipForWeighting, p > (1 - self.minProbClipForWeighting))] = 0.0
return dw
def update_v_for_omitted_probs(self, v, probs):
# if i-th circuit has omitted probs, have sqrt( N*(p_i-f_i)^2/p_i + sum_k(N*p_k) )
# so we need to take sqrt( v_i^2 + N*sum_k(p_k) )
omitted_probs = 1.0 - _np.array([_np.sum(probs[self.lookup[i]])
for i in self.indicesOfCircuitsWithOmittedData])
clipped_oprobs = _np.clip(omitted_probs, self.minProbClipForWeighting, 1 - self.minProbClipForWeighting)
v[self.firsts] = _np.sqrt(v[self.firsts]**2 + self.N[self.firsts] * omitted_probs**2 / clipped_oprobs)
def update_dprobs_for_omitted_probs(self, dprobs, probs, weights, dprobs_omitted_rowsum):
# with omitted terms, new_obj = sqrt( obj^2 + corr ) where corr = N*omitted_p^2/clipped_omitted_p
# so then d(new_obj) = 1/(2*new_obj) *( 2*obj*dobj + dcorr )*domitted_p where dcorr = N when not clipped
# and 2*N*omitted_p/clip_bound * domitted_p when clipped
v = (probs - self.f) * weights
omitted_probs = 1.0 - _np.array([_np.sum(probs[self.lookup[i]])
for i in self.indicesOfCircuitsWithOmittedData])
clipped_oprobs = _np.clip(omitted_probs, self.minProbClipForWeighting, 1 - self.minProbClipForWeighting)
dprobs_factor_omitted = _np.where(omitted_probs == clipped_oprobs, self.N[self.firsts],
2 * self.N[self.firsts] * omitted_probs / clipped_oprobs)
fullv = _np.sqrt(v[self.firsts]**2 + self.N[self.firsts] * omitted_probs**2 / clipped_oprobs)
# avoid NaNs when both fullv and v[firsts] are zero - result should be *zero* in this case
fullv[v[self.firsts] == 0.0] = 1.0
dprobs[self.firsts, :] = (0.5 / fullv[:, None]) * (
2 * v[self.firsts, None] * dprobs[self.firsts, :]
- dprobs_factor_omitted[:, None] * dprobs_omitted_rowsum)
#Objective Function
def simple_chi2(self, vectorGS):
tm = _time.time()
self.mdl.from_vector(vectorGS)
self.mdl.bulk_fill_probs(self.probs, self.evTree, self.probClipInterval, self.check, self.comm)
v = (self.probs - self.f) * self.get_weights(self.probs) # dims K x M (K = nSpamLabels, M = nCircuits)
if self.firsts is not None:
self.update_v_for_omitted_probs(v, self.probs)
self.profiler.add_time("do_mc2gst: OBJECTIVE", tm)
assert(v.shape == (self.KM,)) # reshape ensuring no copy is needed
return v
def termgap_chi2(self, vectorGS, oob_check=False):
tm = _time.time()
self.mdl.from_vector(vectorGS)
self.mdl.bulk_fill_probs(self.probs, self.evTree, self.probClipInterval, self.check, self.comm)
if oob_check:
if not self.mdl.bulk_probs_paths_are_sufficient(self.evTree,
self.probs,
self.comm,
memLimit=None,
verbosity=1):
raise ValueError("Out of bounds!") # signals LM optimizer
v = (self.probs - self.f) * self.get_weights(self.probs) # dims K x M (K = nSpamLabels, M = nCircuits)
if self.firsts is not None:
self.update_v_for_omitted_probs(v, self.probs)
self.profiler.add_time("do_mc2gst: OBJECTIVE", tm)
assert(v.shape == (self.KM,)) # reshape ensuring no copy is needed
return v
def regularized_chi2(self, vectorGS):
tm = _time.time()
self.mdl.from_vector(vectorGS)
self.mdl.bulk_fill_probs(self.probs, self.evTree, self.probClipInterval, self.check, self.comm)
weights = self.get_weights(self.probs)
v = (self.probs - self.f) * weights # dim KM (K = nSpamLabels, M = nCircuits)
if self.firsts is not None:
self.update_v_for_omitted_probs(v, self.probs)
gsVecNorm = self.regularizeFactor * _np.array([max(0, absx - 1.0) for absx in map(abs, vectorGS)], 'd')
self.profiler.add_time("do_mc2gst: OBJECTIVE", tm)
return _np.concatenate((v.reshape([self.KM]), gsVecNorm))
def penalized_chi2(self, vectorGS):
tm = _time.time()
self.mdl.from_vector(vectorGS)
self.mdl.bulk_fill_probs(self.probs, self.evTree, self.probClipInterval, self.check, self.comm)
weights = self.get_weights(self.probs)
v = (self.probs - self.f) * weights # dims K x M (K = nSpamLabels, M = nCircuits)
if self.firsts is not None:
self.update_v_for_omitted_probs(v, self.probs)
if self.cptp_penalty_factor > 0:
cpPenaltyVec = _cptp_penalty(self.mdl, self.cptp_penalty_factor, self.opBasis)
else: cpPenaltyVec = [] # so concatenate ignores
if self.spam_penalty_factor > 0:
spamPenaltyVec = _spam_penalty(self.mdl, self.spam_penalty_factor, self.opBasis)
else: spamPenaltyVec = [] # so concatenate ignores
self.profiler.add_time("do_mc2gst: OBJECTIVE", tm)
return _np.concatenate((v, cpPenaltyVec, spamPenaltyVec))
def verbose_chi2(self, vectorGS):
tm = _time.time()
self.mdl.from_vector(vectorGS)
self.mdl.bulk_fill_probs(self.probs, self.evTree, self.probClipInterval, self.check, self.comm)
weights = self.get_weights(self.probs)
v = (self.probs - self.f) * weights
if self.firsts is not None:
self.update_v_for_omitted_probs(v, self.probs)
chisq = _np.sum(v * v)
nClipped = len((_np.logical_or(self.probs < self.minProbClipForWeighting,
self.probs > (1 - self.minProbClipForWeighting))).nonzero()[0])
self.printer.log("MC2-OBJ: chi2=%g\n" % chisq
+ " p in (%g,%g)\n" % (_np.min(self.probs), _np.max(self.probs))
+ " weights in (%g,%g)\n" % (_np.min(weights), _np.max(weights))
+ " mdl in (%g,%g)\n" % (_np.min(vectorGS), _np.max(vectorGS))
+ " maxLen = %d, nClipped=%d" % (self.maxCircuitLength, nClipped), 4)
assert((self.cptp_penalty_factor == 0 and self.spam_penalty_factor == 0) or self.regularizeFactor == 0), \
"Cannot have regularizeFactor and other penalty factors != 0"
if self.regularizeFactor != 0:
gsVecNorm = self.regularizeFactor * _np.array([max(0, absx - 1.0) for absx in map(abs, vectorGS)], 'd')
self.profiler.add_time("do_mc2gst: OBJECTIVE", tm)
return _np.concatenate((v, gsVecNorm))
elif self.cptp_penalty_factor != 0 or self.spam_penalty_factor != 0:
if self.cptp_penalty_factor != 0:
cpPenaltyVec = _cptp_penalty(self.mdl, self.cptp_penalty_factor, self.opBasis)
else: cpPenaltyVec = []
if self.spam_penalty_factor != 0:
spamPenaltyVec = _spam_penalty(self.mdl, self.spam_penalty_factor, self.opBasis)
else: spamPenaltyVec = []
self.profiler.add_time("do_mc2gst: OBJECTIVE", tm)
return _np.concatenate((v, cpPenaltyVec, spamPenaltyVec))
else:
self.profiler.add_time("do_mc2gst: OBJECTIVE", tm)
assert(v.shape == (self.KM,))
return v
# Jacobian function
def simple_jac(self, vectorGS):
tm = _time.time()
dprobs = self.jac.view() # avoid mem copying: use jac mem for dprobs
dprobs.shape = (self.KM, self.vec_gs_len)
self.mdl.from_vector(vectorGS)
self.mdl.bulk_fill_dprobs(dprobs, self.evTree,
prMxToFill=self.probs, clipTo=self.probClipInterval,
check=self.check, comm=self.comm, wrtBlockSize=self.wrtBlkSize,
profiler=self.profiler, gatherMemLimit=self.gthrMem)
#DEBUG TODO REMOVE - test dprobs to make sure they look right.
#EPS = 1e-7
#db_probs = _np.empty(self.probs.shape, 'd')
#db_probs2 = _np.empty(self.probs.shape, 'd')
#db_dprobs = _np.empty(dprobs.shape, 'd')
#self.mdl.bulk_fill_probs(db_probs, self.evTree, self.probClipInterval, self.check, self.comm)
#for i in range(self.vec_gs_len):
# vectorGS_eps = vectorGS.copy()
# vectorGS_eps[i] += EPS
# self.mdl.from_vector(vectorGS_eps)
# self.mdl.bulk_fill_probs(db_probs2, self.evTree, self.probClipInterval, self.check, self.comm)
# db_dprobs[:,i] = (db_probs2 - db_probs) / EPS
#if _np.linalg.norm(dprobs - db_dprobs)/dprobs.size > 1e-6:
# #assert(False), "STOP: %g" % (_np.linalg.norm(dprobs - db_dprobs)/db_dprobs.size)
# print("DB: dprobs per el mismatch = ",_np.linalg.norm(dprobs - db_dprobs)/db_dprobs.size)
#self.mdl.from_vector(vectorGS)
#dprobs[:,:] = db_dprobs[:,:]
if self.firsts is not None:
for ii, i in enumerate(self.indicesOfCircuitsWithOmittedData):
self.dprobs_omitted_rowsum[ii, :] = _np.sum(dprobs[self.lookup[i], :], axis=0)
weights = self.get_weights(self.probs)
dprobs *= (weights + (self.probs - self.f) * self.get_dweights(self.probs, weights))[:, None]
# (KM,N) * (KM,1) (N = dim of vectorized model)
# this multiply also computes jac, which is just dprobs
# with a different shape (jac.shape == [KM,vec_gs_len])
if self.firsts is not None:
self.update_dprobs_for_omitted_probs(dprobs, self.probs, weights, self.dprobs_omitted_rowsum)
if self.check_jacobian: _opt.check_jac(lambda v: self.simple_chi2(
v), vectorGS, self.jac, tol=1e-3, eps=1e-6, errType='abs') # TO FIX
# dpr has shape == (nCircuits, nDerivCols), weights has shape == (nCircuits,)
# return shape == (nCircuits, nDerivCols) where ret[i,j] = dP[i,j]*(weights+dweights*(p-f))[i]
self.profiler.add_time("do_mc2gst: JACOBIAN", tm)
return self.jac
def regularized_jac(self, vectorGS):
tm = _time.time()
dprobs = self.jac[0:self.KM, :] # avoid mem copying: use jac mem for dprobs
dprobs.shape = (self.KM, self.vec_gs_len)
self.mdl.from_vector(vectorGS)
self.mdl.bulk_fill_dprobs(dprobs, self.evTree,
prMxToFill=self.probs, clipTo=self.probClipInterval,
check=self.check, comm=self.comm, wrtBlockSize=self.wrtBlkSize,
profiler=self.profiler, gatherMemLimit=self.gthrMem)
if self.firsts is not None:
for ii, i in enumerate(self.indicesOfCircuitsWithOmittedData):
self.dprobs_omitted_rowsum[ii, :] = _np.sum(dprobs[self.lookup[i], :], axis=0)
weights = self.get_weights(self.probs)
dprobs *= (weights + (self.probs - self.f) * self.get_dweights(self.probs, weights))[:, None]
# (KM,N) * (KM,1) (N = dim of vectorized model)
# Note: this also computes jac[0:KM,:]
if self.firsts is not None:
self.update_dprobs_for_omitted_probs(dprobs, self.probs, weights, self.dprobs_omitted_rowsum)
gsVecGrad = _np.diag([(self.regularizeFactor * _np.sign(x) if abs(x) > 1.0 else 0.0)
for x in vectorGS]) # (N,N)
self.jac[self.KM:, :] = gsVecGrad # jac.shape == (KM+N,N)
if self.check_jacobian: _opt.check_jac(lambda v: self.regularized_chi2(
v), vectorGS, self.jac, tol=1e-3, eps=1e-6, errType='abs')
# dpr has shape == (nCircuits, nDerivCols), gsVecGrad has shape == (nDerivCols, nDerivCols)
# return shape == (nCircuits+nDerivCols, nDerivCols)
self.profiler.add_time("do_mc2gst: JACOBIAN", tm)
return self.jac
def penalized_jac(self, vectorGS): # Fast cptp-penalty version
tm = _time.time()
dprobs = self.jac[0:self.KM, :] # avoid mem copying: use jac mem for dprobs
dprobs.shape = (self.KM, self.vec_gs_len)
self.mdl.from_vector(vectorGS)
self.mdl.bulk_fill_dprobs(dprobs, self.evTree,
prMxToFill=self.probs, clipTo=self.probClipInterval,
check=self.check, comm=self.comm, wrtBlockSize=self.wrtBlkSize,
profiler=self.profiler, gatherMemLimit=self.gthrMem)
if self.firsts is not None:
for ii, i in enumerate(self.indicesOfCircuitsWithOmittedData):
self.dprobs_omitted_rowsum[ii, :] = _np.sum(dprobs[self.lookup[i], :], axis=0)
weights = self.get_weights(self.probs)
dprobs *= (weights + (self.probs - self.f) * self.get_dweights(self.probs, weights))[:, None]
# (KM,N) * (KM,1) (N = dim of vectorized model)
# Note: this also computes jac[0:KM,:]
if self.firsts is not None:
self.update_dprobs_for_omitted_probs(dprobs, self.probs, weights, self.dprobs_omitted_rowsum)
off = 0
if self.cptp_penalty_factor > 0:
off += _cptp_penalty_jac_fill(
self.jac[self.KM + off:, :], self.mdl, self.cptp_penalty_factor, self.opBasis)
if self.spam_penalty_factor > 0:
off += _spam_penalty_jac_fill(
self.jac[self.KM + off:, :], self.mdl, self.spam_penalty_factor, self.opBasis)
if self.check_jacobian: _opt.check_jac(lambda v: self.penalized_chi2(
v), vectorGS, self.jac, tol=1e-3, eps=1e-6, errType='abs')
self.profiler.add_time("do_mc2gst: JACOBIAN", tm)
return self.jac
def verbose_jac(self, vectorGS):
tm = _time.time()
dprobs = self.jac[0:self.KM, :] # avoid mem copying: use jac mem for dprobs
dprobs.shape = (self.KM, self.vec_gs_len)
self.mdl.from_vector(vectorGS)
self.mdl.bulk_fill_dprobs(dprobs, self.evTree,
prMxToFill=self.probs, clipTo=self.probClipInterval,
check=self.check, comm=self.comm, wrtBlockSize=self.wrtBlkSize,
profiler=self.profiler, gatherMemLimit=self.gthrMem)
if self.firsts is not None:
for ii, i in enumerate(self.indicesOfCircuitsWithOmittedData):
self.dprobs_omitted_rowsum[ii, :] = _np.sum(dprobs[self.lookup[i], :], axis=0)
weights = self.get_weights(self.probs)
#Attempt to control leastsq by zeroing clipped weights -- this doesn't seem to help (nor should it)
#weights[ _np.logical_or(pr < minProbClipForWeighting, pr > (1-minProbClipForWeighting)) ] = 0.0
dPr_prefactor = (weights + (self.probs - self.f) * self.get_dweights(self.probs, weights)) # (KM)
dprobs *= dPr_prefactor[:, None] # (KM,N) * (KM,1) = (KM,N) (N = dim of vectorized model)
if self.firsts is not None:
self.update_dprobs_for_omitted_probs(dprobs, self.probs, weights, self.dprobs_omitted_rowsum)
if self.regularizeFactor != 0:
gsVecGrad = _np.diag([(self.regularizeFactor * _np.sign(x) if abs(x) > 1.0 else 0.0) for x in vectorGS])
self.jac[self.KM:, :] = gsVecGrad # jac.shape == (KM+N,N)
else:
off = 0
if self.cptp_penalty_factor != 0:
off += _cptp_penalty_jac_fill(self.jac[self.KM + off:, :], self.mdl, self.cptp_penalty_factor,
self.opBasis)
if self.spam_penalty_factor != 0:
off += _spam_penalty_jac_fill(self.jac[self.KM + off:, :], self.mdl, self.spam_penalty_factor,
self.opBasis)
# Zero-out insignificant entries in jacobian -- seemed to help some, but leaving this out,
# thinking less complicated == better
#absJac = _np.abs(jac); maxabs = _np.max(absJac)
#jac[ absJac/maxabs < 5e-8 ] = 0.0
#Rescale jacobian so it's not too large -- an attempt to fix wild leastsq behavior but didn't help
#if maxabs > 1e7:
# print "Rescaling jacobian to 1e7 maxabs"
# jac = (jac / maxabs) * 1e7
#U,s,V = _np.linalg.svd(jac)
#print "DEBUG: s-vals of jac %s = " % (str(jac.shape)), s
nClipped = len((_np.logical_or(self.probs < self.minProbClipForWeighting,
self.probs > (1 - self.minProbClipForWeighting))).nonzero()[0])
self.printer.log("MC2-JAC: jac in (%g,%g)\n" % (_np.min(self.jac), _np.max(self.jac))
+ " pr in (%g,%g)\n" % (
|
_np.min(self.probs)
|
numpy.min
|
import json
import os
import random
from typing import Any, Union
import numpy as np
import tensorflow as tf
from Configs.FasterRCNN_config import Param
from Debugger import debug_print
from NN_Components import Backbone, RPN, RoI
from NN_Helper import NnDataGenerator, BboxTools
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' # for mac os tensorflow setting
class FasterRCNN():
def __init__(self):
self.Backbone = Backbone(img_shape=Param.IMG_RESIZED_SHAPE, n_stage=Param.N_STAGE)
self.IMG_SHAPE = Param.IMG_RESIZED_SHAPE
# self.backbone_model.trainable= False
# === RPN part ===
self.RPN = RPN(backbone_model=self.Backbone.backbone_model,
lambda_factor=Param.LAMBDA_FACTOR,
batch=Param.BATCH_RPN,
lr=Param.LR)
# === RoI part ===
self.RoI = RoI(backbone_model=self.Backbone.backbone_model,
img_shape=self.IMG_SHAPE,
n_output_classes=Param.N_OUT_CLASS,
lr=Param.LR)
self.RoI_header = self.RoI.RoI_header_model
# === Data part ===
self.train_data_generator = NnDataGenerator(
file=Param.DATA_JSON_FILE,
imagefolder_path=Param.PATH_IMAGES,
anchor_base_size=Param.BASE_SIZE,
ratios=Param.RATIOS,
scales=Param.SCALES,
n_anchors=Param.N_ANCHORS,
img_shape_resize=Param.IMG_RESIZED_SHAPE,
n_stage=Param.N_STAGE,
threshold_iou_rpn=Param.THRESHOLD_IOU_RPN,
threshold_iou_roi=Param.THRESHOLD_IOU_RoI)
self.cocotool = self.train_data_generator.dataset_coco
self.anchor_candidate_generator = self.train_data_generator.gen_candidate_anchors
self.anchor_candidates = self.anchor_candidate_generator.anchor_candidates
def test_loss_function(self):
inputs, anchor_targets, bbox_targets = self.train_data_generator.gen_train_data_rpn_all()
print(inputs.shape, anchor_targets.shape, bbox_targets.shape)
input1 = np.reshape(inputs[0, :, :, :], (1, 720, 1280, 3))
anchor1 = np.reshape(anchor_targets[0, :, :, :], (1, 23, 40, 9))
anchor2 = tf.convert_to_tensor(anchor1)
anchor2 = tf.dtypes.cast(anchor2, tf.int32)
anchor2 = tf.one_hot(anchor2, 2, axis=-1)
print(anchor1)
bbox1 = np.reshape(bbox_targets[0, :, :, :, :], (1, 23, 40, 9, 4))
loss = self.RPN._rpn_loss(anchor1, bbox1, anchor2, bbox1)
print(loss)
def faster_rcnn_output(self):
# === prepare input images ===
image_ids = self.train_data_generator.dataset_coco.image_ids
inputs, anchor_targets, bbox_reg_targets = self.train_data_generator.gen_train_data_rpn_one(image_ids[0])
print(inputs.shape, anchor_targets.shape, bbox_reg_targets.shape)
image = np.reshape(inputs[0, :, :, :], (1, self.IMG_SHAPE[0], self.IMG_SHAPE[1], 3))
# === get proposed region boxes ===
rpn_anchor_pred, rpn_bbox_regression_pred = self.RPN.process_image(image)
proposed_boxes = self.RPN._proposal_boxes(rpn_anchor_pred, rpn_bbox_regression_pred,
self.anchor_candidates,
self.anchor_candidate_generator.h,
self.anchor_candidate_generator.w,
self.anchor_candidate_generator.n_anchors,
Param.ANCHOR_PROPOSAL_N,
Param.ANCHOR_THRESHOLD
)
# === processing boxes with RoI header ===
pred_class, pred_box_reg = self.RoI.process_image([image, proposed_boxes])
# === processing the results ===
pred_class_sparse = np.argmax(a=pred_class[:, :], axis=1)
pred_class_sparse_value = np.max(a=pred_class[:, :], axis=1)
print(pred_class, pred_box_reg)
print(pred_class_sparse, pred_class_sparse_value)
print(np.max(proposed_boxes), np.max(pred_box_reg))
final_box = BboxTools.bbox_reg2truebox(base_boxes=proposed_boxes, regs=pred_box_reg)
final_box = BboxTools.clip_boxes(final_box, self.IMG_SHAPE)
# === output to official coco bbox result json file ===
temp_output_to_file = []
for i in range(pred_class_sparse.shape[0]):
temp_category = self.train_data_generator.dataset_coco.get_category_from_sparse(pred_class_sparse[i])
temp_output_to_file.append({
"image_id": f"{image_ids[0]}",
"bbox": [final_box[i][0].item(), final_box[i][1].item(), final_box[i][2].item(),
final_box[i][3].item()],
"score": pred_class_sparse_value[i].item(),
"category": f"{temp_category}"
})
with open("results.pkl.bbox.json", "w") as f:
json.dump(temp_output_to_file, f, indent=4)
# print(final_box)
print(final_box[pred_class_sparse_value > 0.9])
final_box = final_box[pred_class_sparse_value > 0.9]
self.cocotool.draw_bboxes(original_image=image[0], bboxes=final_box.tolist(), show=True, save_file=True,
path=Param.PATH_DEBUG_IMG, save_name='6PredRoISBoxes')
# === Non maximum suppression ===
final_box_temp = np.array(final_box).astype(np.int)
nms_boxes_list = []
while final_box_temp.shape[0] > 0:
ious = self.nms_loop_np(final_box_temp)
nms_boxes_list.append(
final_box_temp[0, :]) # since it's sorted by the value, here we can pick the first one each time.
final_box_temp = final_box_temp[ious < Param.RPN_NMS_THRESHOLD]
debug_print('number of box after nms', len(nms_boxes_list))
self.cocotool.draw_bboxes(original_image=image[0], bboxes=nms_boxes_list, show=True, save_file=True,
path=Param.PATH_DEBUG_IMG, save_name='7PredRoINMSBoxes')
def test_proposal_visualization(self):
# === Prediction part ===
image_ids = self.train_data_generator.dataset_coco.image_ids
inputs, anchor_targets, bbox_reg_targets = self.train_data_generator.gen_train_data_rpn_one(image_ids[0])
print(inputs.shape, anchor_targets.shape, bbox_reg_targets.shape)
input1 = np.reshape(inputs[0, :, :, :], (1, self.IMG_SHAPE[0], self.IMG_SHAPE[1], 3))
rpn_anchor_pred, rpn_bbox_regression_pred = self.RPN.process_image(input1)
print(rpn_anchor_pred.shape, rpn_bbox_regression_pred.shape)
# === Selection part ===
# top_values, top_indices = tf.math.top_k()
rpn_anchor_pred = tf.slice(rpn_anchor_pred, [0, 0, 0, 0, 1],
[1, self.anchor_candidate_generator.h, self.anchor_candidate_generator.w,
self.anchor_candidate_generator.n_anchors, 1]) # second channel is foreground
print(rpn_anchor_pred.shape, rpn_bbox_regression_pred.shape)
# squeeze the pred of anchor and bbox_reg
rpn_anchor_pred = tf.squeeze(rpn_anchor_pred)
rpn_bbox_regression_pred = tf.squeeze(rpn_bbox_regression_pred)
shape1 = tf.shape(rpn_anchor_pred)
print(rpn_anchor_pred.shape, rpn_bbox_regression_pred.shape)
# flatten the pred of anchor to get top N values and indices
rpn_anchor_pred = tf.reshape(rpn_anchor_pred, (-1,))
n_anchor_proposal = Param.ANCHOR_PROPOSAL_N
top_values, top_indices = tf.math.top_k(rpn_anchor_pred,
n_anchor_proposal) # top_k has sort function. it's important here
top_indices = tf.gather_nd(top_indices, tf.where(tf.greater(top_values, Param.ANCHOR_THRESHOLD)))
top_values = tf.gather_nd(top_values, tf.where(tf.greater(top_values, Param.ANCHOR_THRESHOLD)))
debug_print('top values', top_values)
# test_indices = tf.where(tf.greater(tf.reshape(RPN_Anchor_Pred, (-1,)), 0.9))
# print(test_indices)
top_indices = tf.reshape(top_indices, (-1, 1))
debug_print('top indices', top_indices)
update_value = tf.math.add(top_values, 1)
rpn_anchor_pred = tf.tensor_scatter_nd_update(rpn_anchor_pred, top_indices, update_value)
rpn_anchor_pred = tf.reshape(rpn_anchor_pred, shape1)
# --- find the base boxes ---
anchor_pred_top_indices = tf.where(tf.greater(rpn_anchor_pred, 1))
debug_print('original_indices shape', anchor_pred_top_indices.shape)
debug_print('original_indices', anchor_pred_top_indices)
base_boxes = tf.gather_nd(self.anchor_candidates, anchor_pred_top_indices)
debug_print('base_boxes shape', base_boxes.shape)
debug_print('base_boxes', base_boxes)
base_boxes = np.array(base_boxes)
# --- find the bbox_regs ---
# flatten the bbox_reg by last dim to use top_indices to get final_box_reg
rpn_bbox_regression_pred_shape = tf.shape(rpn_bbox_regression_pred)
rpn_bbox_regression_pred = tf.reshape(rpn_bbox_regression_pred, (-1, rpn_bbox_regression_pred_shape[-1]))
debug_print('RPN_BBOX_Regression_Pred shape', rpn_bbox_regression_pred.shape)
final_box_reg = tf.gather_nd(rpn_bbox_regression_pred, top_indices)
debug_print('final box reg values', final_box_reg)
# Convert to numpy to plot
final_box_reg = np.array(final_box_reg)
debug_print('final box reg shape', final_box_reg.shape)
debug_print('max value of final box reg', np.max(final_box_reg))
final_box = BboxTools.bbox_reg2truebox(base_boxes=base_boxes, regs=final_box_reg)
# === Non maximum suppression ===
final_box_temp = np.array(final_box).astype(np.int)
nms_boxes_list = []
while final_box_temp.shape[0] > 0:
ious = self.nms_loop_np(final_box_temp)
nms_boxes_list.append(
final_box_temp[0, :]) # since it's sorted by the value, here we can pick the first one each time.
final_box_temp = final_box_temp[ious < Param.RPN_NMS_THRESHOLD]
debug_print('number of box after nms', len(nms_boxes_list))
# Need to convert above instructions to tf operations
# === visualization part ===
# clip the boxes to make sure they are legal boxes
debug_print('max value of final box', np.max(final_box))
final_box = BboxTools.clip_boxes(final_box, self.IMG_SHAPE)
original_boxes = self.cocotool.get_original_bboxes_list(image_id=self.cocotool.image_ids[0])
self.cocotool.draw_bboxes(original_image=input1[0], bboxes=original_boxes, show=True, save_file=True,
path=Param.PATH_DEBUG_IMG, save_name='1GroundTruthBoxes')
target_anchor_boxes, target_classes = self.train_data_generator.gen_target_anchor_bboxes_classes_for_debug(
image_id=self.cocotool.image_ids[0])
self.cocotool.draw_bboxes(original_image=input1[0], bboxes=target_anchor_boxes, show=True, save_file=True,
path=Param.PATH_DEBUG_IMG, save_name='2TrueAnchorBoxes')
self.cocotool.draw_bboxes(original_image=input1[0], bboxes=base_boxes.tolist(), show=True, save_file=True,
path=Param.PATH_DEBUG_IMG, save_name='3PredAnchorBoxes')
self.cocotool.draw_bboxes(original_image=input1[0], bboxes=final_box.tolist(), show=True, save_file=True,
path=Param.PATH_DEBUG_IMG, save_name='4PredRegBoxes')
self.cocotool.draw_bboxes(original_image=input1[0], bboxes=nms_boxes_list, show=True, save_file=True,
path=Param.PATH_DEBUG_IMG, save_name='5PredNMSBoxes')
def test_total_visualization(self):
# === prediction part ===
input_images, target_anchor_bboxes, target_classes = self.train_data_generator.gen_train_data_roi_one(
self.train_data_generator.dataset_coco.image_ids[0],
self.train_data_generator.gen_candidate_anchors.anchor_candidates_list)
input_images, target_anchor_bboxes, target_classes = np.asarray(input_images).astype(np.float), np.asarray(
target_anchor_bboxes), np.asarray(target_classes)
# TODO:check tf.image.crop_and_resize
input_images2 = input_images[:1].astype(np.float)
print(input_images2.shape)
target_anchor_bboxes2 = target_anchor_bboxes[:1].astype(np.float)
print(target_anchor_bboxes2.shape)
class_header, box_reg_header = self.RoI.process_image([input_images2, target_anchor_bboxes2])
print(class_header.shape, box_reg_header.shape)
print(class_header)
def nms_loop_np(self, boxes):
# boxes : (N, 4), box_1target : (4,)
# box axis format: (x1,y1,x2,y2)
box_1target = np.ones(shape=boxes.shape)
zeros = np.zeros(shape=boxes.shape)
box_1target = box_1target * boxes[0, :]
box_b_area = (box_1target[:, 2] - box_1target[:, 0] + 1) * (box_1target[:, 3] - box_1target[:, 1] + 1)
# --- determine the (x, y)-coordinates of the intersection rectangle ---
x_a = np.max(np.array([boxes[:, 0], box_1target[:, 0]]), axis=0)
y_a = np.max(np.array([boxes[:, 1], box_1target[:, 1]]), axis=0)
x_b = np.min(np.array([boxes[:, 2], box_1target[:, 2]]), axis=0)
y_b = np.min(np.array([boxes[:, 3], box_1target[:, 3]]), axis=0)
# --- compute the area of intersection rectangle ---
inter_area = np.max(
|
np.array([zeros[:, 0], x_b - x_a + 1])
|
numpy.array
|
'''
############################ SAPHIRES utils ###########################
Written by <NAME>, 2019
#######################################################################
This file is part of the SAPHIRES python package.
SAPHIRES is free software: you can redistribute it and/or modify it
under the terms of the MIT license.
SAPHIRES is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
You should have received a copy of the MIT license with SAPHIRES.
If not, see <http://opensource.org/licenses/MIT>.
Module Description:
A collection of utility functions used in the SAPHIRES package. The
only function in here you are likely to use is prepare. The rest are
called by other functions in the bf, xc, or io modules that are
tailored for typical users.
Functions are listed in alphabetical order.
'''
# ---- Standard Library
import sys
import copy as copy
import os
from datetime import datetime
# ----
# ---- Third Party
import numpy as np
from scipy import interpolate
from scipy.ndimage.filters import gaussian_filter
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import pickle as pkl
import astropy.io.fits as pyfits
from astropy.coordinates import SkyCoord, EarthLocation
import astropy.units as u
from astropy.time import Time
from barycorrpy import utc_tdb
from barycorrpy import get_BC_vel
from astropy import constants as const
# ----
# ---- Project
from saphires.extras import bspline_acr as bspl
# ----
py_version = sys.version_info.major
if py_version == 3:
nplts = 'U' #the numpy letter for a string
p_input = input
if py_version == 2:
nplts = 'S' #the numpy letter for a string
p_input = raw_input
def air2vac(w_air):
'''
Air to vacuum conversion formula derived by N. Piskunov
IAU standard:
http://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion
Parameters
----------
w_air : array-like
Array of air wavelengths assumed to be in Angstroms
Returns
-------
w_vac : array-like
Array of vacuum wavelengths converted from w_air
'''
s = 10**4/w_air
n = (1 + 0.00008336624212083 + 0.02408926869968 / (130.1065924522 - s**2) +
0.0001599740894897 / (38.92568793293 - s**2))
w_vac = w_air*n
return w_vac
def apply_shift(t_f_names,t_spectra,rv_shift,shift_style='basic'):
'''
A function to apply a velocity shift to an input spectrum.
The shift is made to the 'nflux' and 'nwave' arrays.
The convention may be a bit wierd, think of it like this:
If there is a feature at +40 km/s and you want that feature
to be at zero velocity, put in 40.
Whatever velocity you put in will be put to zero.
The shifted velocity is stored in the 'rv_shift' header
for each dictionary spectrum. Multiple shifts are stored
i.e. shifting by 40, and then 40 again will result in a
'rv_shift' value of 80.
Parameters
----------
t_f_names: array-like
Array of keywords for a science spectrum SAPHIRES dictionary. Output of
one of the saphires.io read-in functions.
t_spectra : python dictionary
SAPHIRES dictionary for the science spectrum.
rv_shift : float
The velocity (in km/s) you want centered at zero.
shift_style : str, optional
Parameter defines how to shift is applied. Options are 'basic' and
'inter'. The defaul is 'basic'.
- The 'basic' option adjusts the wavelength asignments with the
standard RV shift. Pros, you don't interpolate the flux values;
Cons, you change the wavelength spacing from being strictly linear.
The Pros outweight the cons in most scenarios.
- The 'inter' option leaves the wavelength gridpoints the same, but
shifts the flux with an interpolation. Pros, you don't change the
wavelength spacing; Cons, you interpolate the flux, before you
interpolate it again in the prepare step. Interpolating flux is not
the best thing to do, so the less times you do it the better.
The only case I can think of where this method would be better is
if your "order" spanned a huge wavelength range.
Returns
-------
spectra_out : python dictionary
A python dictionary with the SAPHIRES architecture. The output dictionary
will be a copy of t_specrta, but with updates to the following keywords.
['nwave'] - The shifted wavelength array
['nflux'] - The shifted flux array
['rv_shift'] - The value the spectrum was shifted in km/s
'''
c = const.c.to('km/s').value
spectra_out = copy.deepcopy(t_spectra)
for i in range(t_f_names.size):
if shift_style == 'inter':
w_unshifted = spectra_out[t_f_names[i]]['nwave']
w_shifted = w_unshifted/(1-(-rv_shift/(c)))
f_shifted_f = interpolate.interp1d(w_shifted,spectra_out[t_f_names[i]]['nflux'])
shift_trim = ((w_unshifted>=np.min(w_shifted))&(w_unshifted<=np.max(w_shifted)))
w_unshifted = w_unshifted[shift_trim]
spectra_out[t_f_names[i]]['nwave'] = w_unshifted
f_out=f_shifted_f(w_unshifted)
spectra_out[t_f_names[i]]['nflux'] = f_out
if shift_style == 'basic':
w_unshifted = spectra_out[t_f_names[i]]['nwave']
w_shifted = w_unshifted/(1-(-rv_shift/(c)))
spectra_out[t_f_names[i]]['nwave'] = w_shifted
w_range = spectra_out[t_f_names[i]]['w_region']
if w_range != '*':
w_split = np.empty(0)
w_rc1 = w_range.split('-')
for j in range(len(w_rc1)):
for k in range(len(w_rc1[j].split(','))):
w_split = np.append(w_split,np.float(w_rc1[j].split(',')[k]))
w_split_shift = w_split/(1-(-rv_shift/(c)))
w_range_shift = ''
for j in range(w_split_shift.size):
if (j/2.0 % 1) == 0: #even
w_range_shift = w_range_shift + np.str(np.round(w_split_shift[j],2))+'-'
if (j/2.0 % 1) != 0: #odd
w_range_shift = w_range_shift + np.str(np.round(w_split_shift[j],2))+','
if w_range_shift[-1] == ',':
w_range_shift = w_range_shift[:-1]
spectra_out[t_f_names[i]]['w_region'] = w_range_shift
spectra_out[t_f_names[i]]['rv_shift'] = spectra_out[t_f_names[i]]['rv_shift'] + rv_shift
return spectra_out
def bf_map(template,m):
'''
Creates a two dimensional array from a template by shifting one unit
of the wavelength (velocity) array m/2 times to the right and m/2 to
the left. This array, also known as the design matrix (des), is then
input to bf_solve.
Template is the resampled flux array in log(lambda) space.
Parameters
----------
template : array-like
The logarithmic wavelengthed spectral template. Must have an
even numbered length.
m : int
The number of steps to shift the template. Must be odd
Returns
-------
t : array-like
The design matrix
'''
t=0
n=template.size
if (n % 2) != 0:
print('Input array must be even')
return
if (m % 2) != 1:
print('Input m must be odd')
return
t=np.zeros([n-m+1,m])
for j in range(m):
for i in range(m//2,n-m//2):
t[i-m//2,j]=template[i-j+m//2]
return t
def bf_singleplot(t_f_names,t_spectra,for_plotting,f_trim=20):
'''
A function to make a mega plot of all the spectra in a target
dictionary.
Parameters
----------
t_f_names : array-like
Array of keywords for a science spectrum SAPHIRES dictionary. Output of
one of the saphires.io read-in functions.
t_spectra : python dictionary
SAPHIRES dictionary for the science spectrum. Output of one of the
saphires.io read-in functions.
for_plotting : python dictionary
A dictionary with all of the things you need to plot the fit profiles
from saphires.bf.analysis.
f_trim : int
The amount of points to trim from the edges of the BF before the fit is
made. The edges are usually noisey. Units are in wavelength spacings.
The default is 20.
Returns
-------
None
'''
pp=PdfPages(t_f_names[0].split('[')[0].split('.')[0]+'_allplots.pdf')
for i in range(t_f_names.size):
w1=t_spectra[t_f_names[i]]['vwave']
target=t_spectra[t_f_names[i]]['vflux']
template=t_spectra[t_f_names[i]]['vflux_temp']
vel=t_spectra[t_f_names[i]]['vel']
temp_name=t_spectra[t_f_names[i]]['temp_name']
bf_sols=t_spectra[t_f_names[i]]['bf']
bf_smooth = for_plotting[t_f_names[i]][0]
func = for_plotting[t_f_names[i]][1]
gs_fit = for_plotting[t_f_names[i]][2]
m=vel.size
fig,ax=plt.subplots(2,sharex=True)
ax[0].set_title('Template',fontsize=8)
ax[0].set_ylabel('Normalized Flux')
ax[0].plot(w1,template)
ax[1].set_title('Target',fontsize=8)
ax[1].set_ylabel('Normalized Flux')
ax[1].plot(w1,target)
ax[1].set_xlabel(r'$\rm{\lambda}$ ($\rm{\AA}$)')
plt.tight_layout(pad=0.4)
pp.savefig()
plt.close()
fig,ax=plt.subplots(1)
ax.plot(vel,bf_smooth,color='lightgrey',lw=4,ls='-')
ax.set_ylabel('Broadening Function')
ax.set_xlabel('Radial Velocity (km/s)')
if gs_fit.size == 10:
ax.plot(vel[f_trim:-f_trim],gaussian_off(vel[f_trim:-f_trim],
gs_fit[0],gs_fit[1],
gs_fit[2],gs_fit[9]),
lw=2,ls='--',color='b',
label='Amp1: '+np.str(np.round(gs_fit[0]*gs_fit[2]*np.sqrt(2.0*np.pi),3)))
ax.plot(vel[f_trim:-f_trim],gaussian_off(vel[f_trim:-f_trim],
gs_fit[3],gs_fit[4],
gs_fit[5],gs_fit[9]),
lw=2,ls='--',color='r',
label='Amp2: '+np.str(np.round(gs_fit[3]*gs_fit[5]*np.sqrt(2.0*np.pi),3)))
ax.plot(vel[f_trim:-f_trim],gaussian_off(vel[f_trim:-f_trim],
gs_fit[6],gs_fit[7],
gs_fit[8],gs_fit[9]),
lw=2,ls='--',color='g',
label='Amp3: '+np.str(np.round(gs_fit[6]*gs_fit[8]*np.sqrt(2.0*np.pi),3)))
ax.legend()
if gs_fit.size == 7:
#if func == gauss_rot_off:
# ax.plot(vel[f_trim:-f_trim],gaussian_off(vel[f_trim:-f_trim],
# gs_fit[0],gs_fit[1],
# gs_fit[2],gs_fit[6]),
# lw=2,ls='--',color='b',
# label='Amp1: '+np.str(np.round(gs_fit[0]*gs_fit[2]*np.sqrt(2.0*np.pi),3)))
#
# ax.plot(vel[f_trim:-f_trim],rot_pro(vel[f_trim:-f_trim],
# gs_fit[3],gs_fit[4],
# gs_fit[5],gs_fit[6]),
# lw=2,ls='--',color='r',
# label='Amp2: '+np.str(np.round(gs_fit[3],3)))
if func == d_gaussian_off:
ax.plot(vel[f_trim:-f_trim],gaussian_off(vel[f_trim:-f_trim],
gs_fit[0],gs_fit[1],
gs_fit[2],gs_fit[6]),
lw=2,ls='--',color='b',
label='Amp1: '+np.str(np.round(gs_fit[0]*gs_fit[2]*np.sqrt(2.0*np.pi),3)))
ax.plot(vel[f_trim:-f_trim],gaussian_off(vel[f_trim:-f_trim],
gs_fit[3],gs_fit[4],
gs_fit[5],gs_fit[6]),
lw=2,ls='--',color='r',
label='Amp2: '+np.str(np.round(gs_fit[3]*gs_fit[5]*np.sqrt(2.0*np.pi),3)))
ax.legend()
ax.plot(vel[f_trim:-f_trim],func(vel[f_trim:-f_trim],*gs_fit),
lw=1,ls='-',color='k')
plt.tight_layout(pad=0.4)
pp.savefig()
plt.close()
pp.close()
return
def bf_solve(des,u,ww,vt,target,m):
'''
Takes in the design matrix, the output of saphires.utils.bf_map,
and creates an array of broadening functions where each row is
for a different order of the solution.
The last index out the output (m-1) is the full solution.
All of them here for completeness, but in practice, the full
solution with gaussian smoothing is used to derive RVs and flux ratios.
Parameters
----------
des : arraly-like
Design Matrix computed by saphires.utils.bf_map
u : array-like
One of the outputs of the design matrix's singular value
decomposition
ww : array-like
One of the outputs of the design matrix's singular value
decomposition
vt : array-like
One of the outputs of the design matrix's singular value
decomposition
target : array-like
The target spectrum the corresponds to the template spectrum
that was used to make the design matrix.
m : int
Number of pixel shifts to compute
Returns
-------
b_sols : array-like
Matrix of BF solutions for different orders. The last order if the
one to use.
sig : array-like
Uncertainty array for each order.
'''
#turning ww into a matrix
ww_mat=np.zeros([m,m])
for i in range(m):
ww_mat[i,i]=ww[i]
#turning ww into its inverse (same as the transpose in this case) matrix.
ww1=np.zeros([m,m])
for i in range(m):
ww1[i,i]=1.0/ww[i]
#just making sure all the math went okay.
if np.allclose(des, np.dot(np.dot(u,ww_mat),vt)) == False:
print('Something went wrong with the matrix math in bf_sols')
return
#trimming target spectrum to have the right length
target_trim=target[m//2:target.size-m//2]
#computing the broadening function
b_sols=np.zeros([m,m])
for i in range(m):
wk=np.zeros([m,m])
wb=ww[0:i]
for j in range(wb.size):
wk[j,j]=1.0/wb[j]
b_sols[i,:] = np.dot(np.dot(np.transpose(vt), wk),
np.dot(np.transpose(u),target_trim))
#computing the error of the fit between the two.
sig=np.zeros(m)
pred=np.dot(des,np.transpose(b_sols))
for i in range(m):
sig[i]=np.sqrt(np.sum((pred[:,i]-target_trim)**2)/m)
return b_sols,sig
def bf_text_output(ofname,target,template,gs_fit,rchis,rv_weight,fit_int):
'''
A function to output the results of saphires.bf.analysis to a text file.
Parameters
----------
ofname : str
The name of the output file.
target : str
The name of the target spectrum.
template : str
The name of the template spectrum.
gs_fit : array-like
An array of the profile fit parameters from saphire.bf.analysis.
rchis : float
The reduced chi square of the saphires.bf.analysis fit with the data.
fit_int : array-like
Array of profile integrals from the saphires.bf.analysis fit.
Returns
-------
None
'''
if os.path.exists('./'+ofname) == False:
f=open(ofname,'w')
f.write('#Column Details\n')
f.write('#System Time\n')
f.write('#Fit Parameters - For each profile fit, the following:')
f.write('# - Amp, RV (km/s), Sigma, Integral\n')
f.write('#Reduced Chi Squared of Fit\n')
f.write('#Target File Name\n')
f.write('#Template File Name\n')
else:
f=open(ofname,'a')
f.write(str(datetime.now())[0:-5]+'\t')
if gs_fit.size==10:
peak_ind=np.argsort([gs_fit[0],gs_fit[3],gs_fit[6]])[::-1]*3+1
f.write(np.str(np.round(gs_fit[peak_ind[0]-1],4))+'\t')
f.write(np.str(np.round(gs_fit[peak_ind[0]],4))+'\t')
f.write(np.str(np.round(gs_fit[peak_ind[0]+1],4))+'\t')
f.write(np.str(np.round(gs_fit[peak_ind[0]-1]*
gs_fit[peak_ind[0]+1]*np.sqrt(2*np.pi),2))+'\t')
f.write(np.str(np.round(gs_fit[peak_ind[1]-1],4))+'\t')
f.write(np.str(np.round(gs_fit[peak_ind[1]],4))+'\t')
f.write(np.str(np.round(gs_fit[peak_ind[1]+1],4))+'\t')
f.write(np.str(np.round(gs_fit[peak_ind[1]-1]*
gs_fit[peak_ind[1]+1]*np.sqrt(2*np.pi),2))+'\t')
f.write(np.str(np.round(gs_fit[peak_ind[2]-1],4))+'\t')
f.write(np.str(np.round(gs_fit[peak_ind[2]],4))+'\t')
f.write(np.str(np.round(gs_fit[peak_ind[2]+1],4))+'\t')
f.write(np.str(np.round(gs_fit[peak_ind[2]-1]*
gs_fit[peak_ind[2]+1]*np.sqrt(2*np.pi),2))+'\t')
if gs_fit.size==7:
if fit_int[0]>fit_int[1]:
f.write(np.str(np.round(gs_fit[0],4))+'\t')
f.write(np.str(np.round(gs_fit[1],4))+'\t')
f.write(np.str(np.round(gs_fit[2],4))+'\t')
f.write(np.str(np.round(fit_int[0],2))+'\t')
f.write(np.str(np.round(gs_fit[3],4))+'\t')
f.write(np.str(np.round(gs_fit[4],4))+'\t')
f.write(np.str(np.round(gs_fit[5],4))+'\t')
f.write(np.str(np.round(fit_int[1],2))+'\t')
else:
f.write(np.str(np.round(gs_fit[3],4))+'\t')
f.write(np.str(np.round(gs_fit[4],4))+'\t')
f.write(np.str(np.round(gs_fit[5],4))+'\t')
f.write(np.str(np.round(fit_int[1],2))+'\t')
f.write(np.str(np.round(gs_fit[0],4))+'\t')
f.write(np.str(np.round(gs_fit[1],4))+'\t')
f.write(np.str(np.round(gs_fit[2],4))+'\t')
f.write(np.str(np.round(fit_int[0],2))+'\t')
if gs_fit.size==4:
f.write(np.str(np.round(gs_fit[0],4))+'\t')
f.write(np.str(np.round(gs_fit[1],4))+'\t')
f.write(np.str(np.round(gs_fit[2],4))+'\t')
f.write(np.str(np.round(gs_fit[0]*gs_fit[2]*np.sqrt(2*np.pi),2))+'\t')
f.write(np.str(np.round(rchis,3))+'\t')
f.write(np.str(np.round(rv_weight,3))+'\t')
f.write(target+'\t')
f.write(template+'\n')
f.close()
return
def brvc(dateobs,exptime,observat,ra,dec,rv=0.0,print_out=False,epoch=2000,
pmra=0,pmdec=0,px=0,query=False):
'''
observat options:
- salt - (e.g. HRS)
- eso22 - (e.g. FEROS)
- vlt82 - (e.g. UVES)
- mcd27 - (e.g. IGRINS)
- lco_nres_lsc1 - (e.g. NRES at La Silla)
- lco_nres_cpt1 - (e.g. NRES at SAAO)
- tlv - (e.g. LCO NRES at Wise Observatory in Tel Aviv)
- eso36 - (e.g. HARPS)
- geminiS - (e.g. IGRINS South)
- wiyn - (e.g. HYDRA)
- dct - (e.g. IGRINS at DCT)
- hires - (Keck Hi-Res)
- smarts15 - (e.g. CHIRON)
returns
brv,bjd,bvcorr
'''
c = const.c.to('km/s').value
from astroquery.vizier import Vizier
edr3 = Vizier(columns=["*","+_r","_RAJ2000","_DEJ2000","Epoch","Plx"],catalog=['I/350/gaiaedr3'])
#from astroquery.gaia import Gaia
if isinstance(observat,str):
if isinstance(dateobs,str):
n_sites = 1
observat = [observat]
dateobs = [dateobs]
exptime = [exptime]
rv = [rv]
else:
n_sites = dateobs.size
observat = [observat]*dateobs.size
else:
n_sites = len(observat)
brv = np.zeros(n_sites)
bvcorr = np.zeros(n_sites)
bjd = np.zeros(n_sites)
#longitudes should be in degrees EAST!
#Most are not unless they have a comment below them.
for i in range(n_sites):
if observat[i] == 'keck':
alt = 4145
lat = 19.82636
lon = -155.47501
#https://latitude.to/articles-by-country/us/united-states/7854/w-m-keck-observatory#:~:text=GPS%20coordinates%20of%20W.%20M.%20Keck,Latitude%3A%2019.8264%20Longitude%3A%20%2D155.4750
if observat[i] == 'gemini_south':
alt = 2750
lat = -30.24074167
lon = -70.736683
#http://www.ctio.noao.edu/noao/content/coordinates-observatories-cerro-tololo-and-cerro-pachon
if observat[i] == 'salt':
alt = 1798
lat = -32.3794
lon = 20.810694
#http://www.sal.wisc.edu/~ebb/pfis/observer/intro.html
if observat[i] == 'eso22':
alt = 2335
lat = -29.25428972
lon = 289.26540472
if observat[i] == 'eso36':
alt = 2400
lat = -29.2584
lon = 289.2655
if observat[i] == 'vlt82':
alt = 2635
lat = -24.622997508
lon = 289.59750161
if observat[i] == 'mcdonald':
alt = 2075
lat = 30.6716667
lon = -104.0216667
#https://idlastro.gsfc.nasa.gov/ftp/pro/astro/observatory.pro
if observat[i] == 'wiyn':
alt = 2120
lat = 31.95222
lon = -111.60000
if observat[i] == 'dct':
alt = 2360
lat = 34.744444
lon = -111.42194
if observat[i] == 'smarts15':
alt = 2252.2
lat = -30.169661
lon = -70.806789
#http://www.ctio.noao.edu/noao/content/coordinates-observatories-cerro-tololo-and-cerro-pachon
if observat[i] == 'lsc':
alt = 2201
lat = -30.1673305556
lon = -70.8046611111
#From a header (CTIO - LCO)
if observat[i] == 'cpt':
alt = 1760.0
lat = -32.34734167
lon = 20.81003889
#From a header (SAAO - LCO)
if observat[i] == 'tlv':
alt = 861.4
lat = 30.595833
lon = 34.763333
#From a header (WISE, Isreal - LCO)
if observat[i] == 'elp':
alt = 2030.000
lat = 30.6798330
lon = -104.0151730
#From a header (McDonald - LCO)
if observat[i] == 'coj':
alt = 1168.000
lat = -31.2729330
lon = 149.0706470
#From a header (Siding Springs Observatory - LCO)
if isinstance(ra,str):
ra,dec = saph.utils.sex2dd(ra,dec)
if query == True:
coord = SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')
result = edr3.query_region(coord,radius='0d0m3s')
#Pgaia = Gaia.cone_search_async(coord, 3.0*u.arcsec)
if len(result) == 0:
print('No match found, using provided/default values.')
else:
dec = result['I/350/gaiaedr3']['DE_ICRS'][0]
ra = result['I/350/gaiaedr3']['RA_ICRS'][0]
epoch = result['I/350/gaiaedr3']['Epoch'][0]
pmra = result['I/350/gaiaedr3']["pmRA"][0]
pmdec = result['I/350/gaiaedr3']["pmDE"][0]
px = result['I/350/gaiaedr3']["Plx"][0]
if isinstance(dateobs[i],str):
utc = Time(dateobs[i],format='isot',scale='utc')
if isinstance(dateobs[i],float):
utc = Time(dateobs[i],format='jd',scale='utc')
utc_middle = utc + (exptime[i]/2.0)*u.second
if observat in EarthLocation.get_site_names():
bjd_info = utc_tdb.JDUTC_to_BJDTDB(JDUTC = utc_middle, obsname=observat,
dec=dec, ra=ra, epoch=epoch, pmra=pmra,
pmdec=pmdec, px=px)
bvcorr_info = get_BC_vel(JDUTC = utc_middle, ra=ra, dec=dec, epoch=epoch,
pmra=pmra, pmdec=pmdec, px=px, obsname=observat)
else:
bjd_info = utc_tdb.JDUTC_to_BJDTDB(JDUTC = utc_middle, alt = alt,
lat=lat, longi=lon, dec=dec,
ra=ra, epoch=epoch, pmra=pmra,
pmdec=pmdec, px=px)
bvcorr_info = get_BC_vel(JDUTC = utc_middle, ra=ra, dec=dec, epoch=epoch,
pmra=pmra, pmdec=pmdec, px=px, lat=lat, longi=lon,
alt=alt)
bjd[i] = bjd_info[0][0]
bvcorr[i] = bvcorr_info[0][0]/1000.0
if type(rv) == float:
brv[i] = rv + bvcorr[i] + (rv * bvcorr[i] / (c))
else:
brv[i] = rv[i] + bvcorr[i] + (rv[i] * bvcorr[i] / (c))
if print_out == True:
print('BJD: ',bjd)
print('BRVC: ',bvcorr)
print('BRV: ',brv)
return brv,bjd,bvcorr
def cont_norm(w,f,w_width=200.0,maxiter=15,lower=0.3,upper=2.0,nord=3):
'''
Continuum normalizes a spectrum
Uses the bspline_acr package which was adapted from IDL by
<NAME>.
Note: In the SAPHIRES architecture, this has to be done before
the spectrum is inverted, which happens automatically in the
saphires.io read in functions. That is why the option to
continuum normalize is available in those functions, and
should be done there.
Parameters
----------
w : array-like
Wavelength array of the spectrum to be normalized.
Assumed to be angstroms, but doesn't really matter.
f : array-like
Flux array of the spectrum to be normalized.
Assumes linear flux units.
w_width : number
Width is the spline fitting window. This is useful for
long, stitched spectra where is doesn't makse sense to
try and normalize the entire thing in one shot.
The defaults is 200 A, which seems to work reasonably well.
Assumes angstroms but will naturally be on the same scale
as the wavelength array, w.
maxiter : int
Number of interations. The default is 15.
lower : float
Lower limit in units of sigmas for including data in the
spline interpolation. The default is 0.3.
upper : float
Upper limit in units of sigma for including data in the
spline interpolation. The default is 2.0.
nord : int
Order of the spline. The defatul is 3.
Returns
-------
f_norm : array-like
Continuum normalized flux array
'''
norm_space = w_width #/(w[1]-w[0])
#x = np.arange(f.size,dtype=float)
spl = bspl.iterfit(w, f, maxiter = maxiter, lower = lower,
upper = upper, bkspace = norm_space,
nord = nord )[0]
cont = spl.value(w)[0]
f_norm = f/cont
return f_norm
def dd2sex(ra,dec,results=False):
'''
Convert ra and dec in decimal degrees format to sexigesimal format.
Parameters:
-----------
ra : ndarray
Array or single value of RA in decimal degrees.
dec : ndarray
Array or single value of Dec in decimal degrees.
Results : bool
If True, the decimal degree RA and Dec results are printed.
Returns:
--------
raho : ndarray
Array of RA hours.
ramo : ndarray
Array of RA minutes.
raso : ndarray
Array of RA seconds.
decdo : ndarray
Array of Dec degree placeholders in sexigesimal format.
decmo : ndarray
Array of Dec minutes.
decso : ndarray
Array of Dec seconds.
Output:
-------
Prints results to terminal if results == True.
Version History:
----------------
2015-05-15 - Start
'''
rah=(np.array([ra])/360.0*24.0)
raho=np.array(rah,dtype=np.int)
ramo=np.array(((rah-raho)*60.0),dtype=np.int)
raso=((rah-raho)*60.0-ramo)*60.0
dec=np.array([dec])
dec_sign = np.sign(dec)
decdo=np.array(dec,dtype=np.int)
decmo=np.array(np.abs(dec-decdo)*60,dtype=np.int)
decso=(np.abs(dec-decdo)*60-decmo)*60.0
if results == True:
for i in range(rah.size):
print(np.str(raho[i])+':'+np.str(ramo[i])+':'+ \
(str.format('{0:2.6f}',raso[i])).zfill(7)+', '+ \
np.str(decdo[i])+':'+np.str(decmo[i])+':'+ \
(str.format('{0:2.6f}',decso[i])).zfill(7))
return raho,ramo,raso,dec_sign,decdo,decmo,decso
def d_gaussian_off(x,A1,x01,sig1,A2,x02,sig2,o):
'''
A double gaussian function with a constant vetical offset.
Parameters
----------
x : array-like
Array of x values over which the Gaussian profile will
be computed.
A1 : float
Amplitude of the first Gaussian profile.
x01 : float
Center of the first Gaussian profile.
sig1 : float
Standard deviation (sigma) of the first Gaussian profile.
A2 : float
Amplitude of the second Gaussian profile.
x02 : float
Center of the second Gaussian profile.
sig2 : float
Standard deviation (sigma) of the second Gaussian profile.
o : float
Vertical offset of the Gaussian mixture.
Returns
-------
profile : array-like
The Gaussian mixture specified over the input x array.
Array has the same length as x.
'''
return (A1*np.e**(-(x-x01)**2/(2.0*sig1**2))+
A2*np.e**(-(x-x02)**2/(2.0*sig2**2))+o)
def EWM(x,xerr,wstd=False):
'''
A function to return the error weighted mean of an array and the error on
the error weighted mean.
Parameters
----------
x : array like
An array of values you want the error weighted mean of.
xerr : array like
Array of associated one-sigma errors.
wstd : bool
Option to return the error weighted standard deviation. If True,
three values are returned. The default is False.
Returns
-------
xmean : float
The error weighted mean
xmeanerr : float
The error on the error weighted mean. This number will only make
sense if the input xerr is a 1-sigma uncertainty.
xmeanstd : conditional output, float
Error weighted standard deviation, only output if wstd=True
Outputs
-------
None
Version History
---------------
2016-12-06 - Start
'''
weight = 1.0 / xerr**2
xmean=np.sum(x/xerr**2)/np.sum(weight)
xmeanerr=1.0/np.sqrt(np.sum(weight))
xwstd = np.sqrt(np.sum(weight*(x - xmean)**2) / ((np.sum(weight)*(x.size-1)) / x.size))
if wstd == True:
return xmean,xmeanerr,xwstd
else:
return xmean,xmeanerr
def gaussian_off(x,A,x0,sig,o):
'''
A simple gaussian function with a constant vetical offset.
This Gaussian is not normalized in the typical sense.
Parameters
----------
x : array-like
Array of x values over which the Gaussian profile will
be computed.
A : float
Amplitude of the Gaussian profile.
x0 : float
Center of the Gaussian profile.
sig : float
Standard deviation (sigma) of the Gaussian profile.
o : float
Vertical offset of the Gaussian profile.
Returns
-------
profile : array-like
The Gaussian profile specified over the input x array.
Array has the same length as x.
'''
return A*np.e**(-(x-x0)**2/(2.0*sig**2))+o
def lco2u(lco_fits,pkl_out,v2a=False):
'''
A function to read in the fits output from the standard
LCO/NRES pipeline and turn it into a pkl file that
matching the SAPHIRES architecture.
Parameters
----------
lco_fits : str
The name of a single LCO fits file
pkl_out : str
The name of the output pickle file
v2a:
Option to convert the wavelength convention to air
from the provided vacuum values.
Returns
-------
None
'''
hdu = pyfits.open(lco_fits)
flux = hdu[3].data
for i in range(flux.shape[0]):
flux[i] = flux[i]+np.abs(np.min(flux[i]))
w_vac = hdu[6].data*10
if v2a == True:
w_out = vac2air(w_vac)
else:
w_out = w_vac
dict = {'wav':w_out, 'flux':flux}
pkl.dump(dict,open(pkl_out,'wb'))
print("LCO pickle written out to "+pkl_out)
return
def make_rot_pro_ip(R,e=0.75):
'''
A function to make a specific rotationally broadened fitting
function with a specific limb-darkening parameter that is
convolved with the instrumental profile that corresponds to
a given spectral resolution.
The output profile is uses the linear limb darkening law from
Gray 2005
Parameters
----------
R : float
The resolution that corresponds to the spectrograph's
instrumental profile.
e : float
Linear limb darkening parameter. Default is 0.75,
appropriate for a low-mass star.
Returns
-------
rot_pro_ip : function
A function that returns the line profile for a rotationally
broadened star with the limb darkening parameter given by the make
function and that have been convolved with the instrumental
profile specified by the spectral resolution by the make function
above.
Parameters
----------
x : array-like
X array values, should be provided in velocity in km/s, over
which the smooth rotationally broadened profile will be
computed.
A : float
Amplitude of the smoothed rotationally broadened profile.
Equal to the profile's integral.
rv : float
RV center of the profile.
rvw : float
The vsini of the profile.
o : float
The vertical offset of the profile.
Returns
-------
prof_conv : array-like
The smoothed rotationally broadened profile specified by the
paramteres above, over the input x array. Array has the same
length as x.
'''
c = const.c.to('km/s').value
FWHM = (c)/R
sig = FWHM/(2.0*np.sqrt(2.0*np.log(2.0)))
def rot_pro_ip(x,A,rv,rvw,o):
'''
A thorough descrition of this function is provieded in the main
function.
Rotational line broadening function.
To produce an actual line profile, you have to convolve this function
with an acutal spectrum.
In this form it can be fit directly to a the Broadening Fucntion.
This is in velocity so if you're going to convolve this with a spectrum
make sure to take the appropriate cautions.
'''
c1 = (2*(1-e))/(np.pi*rvw*(1-e/3.0))
c2 = e/(2*rvw*(1-e/3.0))
prof=A*(c1*np.sqrt(1-((x-rv)/rvw)**2)+c2*(1-((x-rv)/rvw)**2))+o
prof[np.isnan(prof)] = o
v_spacing = x[1]-x[0]
smooth_sigma = sig/v_spacing
prof_conv=gaussian_filter(prof,sigma=smooth_sigma)
return prof_conv
return rot_pro_ip
def make_rot_pro_qip(R,a=0.3,b=0.4):
'''
A function to make a specific rotationally broadened fitting
function with a specific limb-darkening parameter that is
convolved with the instrumental profile that corresponds to
a given spectral resolution.
The output profile is uses the linear limb darkening law from
Gray 2005
Parameters
----------
R : float
The resolution that corresponds to the spectrograph's
instrumental profile.
e : float
Linear limb darkening parameter. Default is 0.75,
appropriate for a low-mass star.
Returns
-------
rot_pro_ip : function
A function that returns the line profile for a rotationally
broadened star with the limb darkening parameter given by the make
function and that have been convolved with the instrumental
profile specified by the spectral resolution by the make function
above.
Parameters
----------
x : array-like
X array values, should be provided in velocity in km/s, over
which the smooth rotationally broadened profile will be
computed.
A : float
Amplitude of the smoothed rotationally broadened profile.
Equal to the profile's integral.
rv : float
RV center of the profile.
rvw : float
The vsini of the profile.
o : float
The vertical offset of the profile.
Returns
-------
prof_conv : array-like
The smoothed rotationally broadened profile specified by the
paramteres above, over the input x array. Array has the same
length as x.
'''
c = const.c.to('km/s').value
FWHM = (c)/R
sig = FWHM/(2.0*np.sqrt(2.0*np.log(2.0)))
def rot_pro_qip(x,A,rv,rvw,o):
'''
A thorough descrition of this function is provieded in the main
function.
Rotational line broadening function.
To produce an actual line profile, you have to convolve this function
with an acutal spectrum.
In this form it can be fit directly to a the Broadening Fucntion.
This is in velocity so if you're going to convolve this with a spectrum
make sure to take the appropriate cautions.
'''
prof = A*((2.0*(1-a-b)*np.sqrt(1-((x-rv)/rvw)**2) +
np.pi*((a/2.0) + 2.0*b)*(1-((x-rv)/rvw)**2) -
(4.0/3.0)*b*(1-((x-rv)/rvw)**2)**(3.0/2.0)) /
(np.pi*(1-(a/3.0)-(b/6.0)))) + o
prof[np.isnan(prof)] = o
v_spacing = x[1]-x[0]
smooth_sigma = sig/v_spacing
prof_conv=gaussian_filter(prof,sigma=smooth_sigma)
return prof_conv
return rot_pro_qip
def order_stitch(t_f_names,spectra,n_comb,print_orders=True):
'''
A function to stitch together certain parts a specified
number of orders throughout a dictionary.
e.g. an 8 order spectrum, with a specified number of orders
to combine set to 2, will stich together 1-2, 3-4, 5-6, and 7-8,
and result in a dictionary with 4 stitched orders.
If the number of combined orders does not divide evenly, the
remaining orders will be appended to the last stitched order.
'''
n_orders = t_f_names[t_f_names!='Combined'].size
n_orders_out = np.int(n_orders/np.float(n_comb))
spectra_out = {}
t_f_names_out = np.zeros(n_orders_out,dtype=nplts+'1000')
for i in range(n_orders_out):
w_all = np.empty(0)
flux_all = np.empty(0)
for j in range(n_comb):
w_all = np.append(w_all,spectra[t_f_names[i*n_comb+j]]['nwave'])
flux_all = np.append(flux_all,spectra[t_f_names[i*n_comb+j]]['nflux'])
if j == 0:
w_range_all = spectra[t_f_names[i*n_comb+j]]['w_region']+','
if ((j > 0) & (j<n_comb-1)):
w_range_all = w_range_all+spectra[t_f_names[i*n_comb+j]]['w_region']+','
if j == n_comb-1:
w_range_all = w_range_all+spectra[t_f_names[i*n_comb+j]]['w_region']
if i == n_orders_out-1:
leftover = n_orders - (i*n_comb+n_comb)
for j in range(leftover):
w_all = np.append(w_all,spectra[t_f_names[i*n_comb+n_comb+j]]['nwave'])
flux_all = np.append(flux_all,spectra[t_f_names[i*n_comb+n_comb+j]]['nflux'])
if j == 0:
w_range_all = w_range_all+','+spectra[t_f_names[i*n_comb+n_comb+j]]['w_region']+','
if ((j > 0) & (j < leftover-1)):
w_range_all = w_range_all+spectra[t_f_names[i*n_comb+n_comb+j]]['w_region']+','
if j == leftover-1:
w_range_all = w_range_all+spectra[t_f_names[i*n_comb+n_comb+j]]['w_region']
if w_range_all[-1] == ',':
w_range_all = w_range_all[:-1]
flux_all = flux_all[np.argsort(w_all)]
w_all = w_all[np.argsort(w_all)]
w_min=np.int(np.min(w_all))
w_max=np.int(np.max(w_all))
t_dw = np.median(w_all - np.roll(w_all,1))
t_f_names_out[i] = ('R'+np.str(i)+'['+np.str(i)+']['+np.str(w_min)+'-'+np.str(w_max)+']')
if print_orders == True:
print(t_f_names_out[i],w_range_all)
spectra_out[t_f_names_out[i]] = {'nflux': flux_all,
'nwave': w_all,
'ndw': np.median(np.abs(w_all - np.roll(w_all,1))),
'wav_cent': np.mean(w_all),
'w_region': w_range_all,
'rv_shift': spectra[t_f_names[0]]['rv_shift'],
'order_flag': 1}
return t_f_names_out,spectra_out
def prepare(t_f_names,t_spectra,temp_spec,oversample=1,
quiet=False,trap_apod=0,cr_trim=-0.1,trim_style='clip',
vel_spacing='uniform'):
'''
A function to prepare a target spectral dictionary with a template
spectral dictionary for use with SAPHIRES analysis tools. The preparation
ammounts to resampling the wavelength array to logarithmic spacing, which
corresponds to linear velocity spacing. Linear velocity spacing is required
for use TODCOR or compute a broadening function
oversample -
A key parameter of this function is "oversample". It sets the logarithmic
spacing of the wavelength resampling (and the corresponding velocity
'resolution' of the broadening function). Generally, a higher oversampling
will produce better results, but it very quickly becomes expensive for two
reasons. One, your arrays become longer, and two, you have to increase the m
value (a proxy for the velocity regime probed) to cover the same velocity range.
A low oversample value can be problematic if the intrinsitic width of your
tempate and target lines are the same. In this case, the broadening function
should be a delta function. The height/width of this function will depened on
the location of velocity grid points in the BF in an undersampled case. This
makes measurements of the RV and especially flux ratios problematic.
If you're unsure what value to use, look at the BF with low smoothing
(i.e. high R). If the curves are jagged and look undersampled, increase the
oversample parameter.
Parameters
----------
t_f_names: array-like
Array of keywords for a science spectrum SAPHIRES dictionary. Output of
one of the saphires.io read-in functions.
t_spectra : python dictionary
SAPHIRES dictionary for the science spectrum. Output of one of the
saphires.io read-in functions.
temp_spec : python dictionary
SAPHIRES dictionary for the template spectrum. Output of one of the
saphires.io read-in functions.
oversample : float
Factor by which the velocity resolution is oversampled. This parameter
has an extended discussion above. The default value is 1.
quiet : bool
Specifies whether messaged are printed to the teminal. Specifically, if
the science and template spectrum do not overlap, this function will
print and error. The default value is False.
trap_apod : float
Option to apodize (i.e. taper) the resampled flux array to zero near
the edges. A value of 0.1 will taper 10% of the array length on each
end of the array. Some previous studies that use broaden fuctions in
the literarure use this, claiming it reduced noise in the sidebands. I
havent found this to be the case, but the functionallity exisits
nonetheless. The detault value is 0, i.e. no apodization.
cr_trim : float
This parameter sets the value below which emission features are removed.
Emission is this case is negative becuase the spectra are inverted. The
value must be negative. Points below this value are linearly interpolated
over. The defulat value is -0.1. If you don't want to clip anything, set
this paramter to -np.inf.
trim_style : str, options: 'clip', 'lin', 'spl'
If a wavelength region file is input in the 'spectra_list' parameter,
this parameter describes how gaps are dealt with.
- If 'clip', unused regions will be left as gaps.
- If 'lin', unused regions will be linearly interpolated over.
- If 'spl', unused regions will be interpolated over with a cubic
spline. You probably don't want to use this one.
vel_spacing : str; 'orders' or 'uniform', or float
Parameter that determines how the velocity width of the resampled array
is set.
If 'orders', the velocity width will be set by the smallest velocity
separation between the native input science and template wavelength arrays on
an order by order basis.
If 'uniform', every order will have the same velocity spacing. This is useful
if you want to combine BFs for instance. The end result will generally be
slightly oversampled, but other than taking a bit longer, to process, it should
not have any adverse effects.
If this parameter is a float, the velocity spacing will be set to that value,
assuming it is in km/s.
You can get wierd results if you put in a value that doesn't make sense, so
I recommend the orders or uniform setting. This option is available for more
advanced use cases that may only relevant if you are using TODCOR. See
documentation there for a relevant example.
The oversample parameter is ignored when this parameter is set to a float.
Returns
-------
spectra : dictionary
A python dictionary with the SAPHIRES architecture. The output dictionary
will have 5 new keywords as a result of this function.
['vflux'] - resampled flux array (inverted)
['vwave'] - resampled wavelength array
['vflux_temp'] - resampled template flux array (inverted)
['vel'] - velocity array to be used with the BF or CCF
['temp_name'] - template name
['vel_spacing'] - the velocity spacing that corresponds to the
resampled wavelength array
It also updates the values for the following keyword under the right
conditions:
['order_flag'] - order flag will be updated to 0 if the order has no
overlap with the template. This tells other functions
to ignore this order.
'''
#########################################
#This part "prepares" the spectra
c = const.c.to('km/s').value
spectra = copy.deepcopy(t_spectra)
max_w_orders = np.zeros(t_f_names.size)
min_w_orders = np.zeros(t_f_names.size)
min_dw_orders = np.zeros(t_f_names.size)
for i in range(t_f_names.size):
w_tar = spectra[t_f_names[i]]['nwave']
flux_tar = spectra[t_f_names[i]]['nflux']
w_range = spectra[t_f_names[i]]['w_region']
w_temp = temp_spec['nwave']
flux_temp = temp_spec['nflux']
temp_trim = temp_spec['w_region']
w_tar,flux_tar = spec_trim(w_tar,flux_tar,w_range,temp_trim,trim_style=trim_style)
w_temp,flux_temp = spec_trim(w_temp,flux_temp,w_range,temp_trim,trim_style=trim_style)
if w_tar.size == 0:
min_w_orders[i] = np.nan
max_w_orders[i] = np.nan
min_dw_orders[i] = np.nan
else:
min_w_orders[i] = np.max([np.min(w_tar),np.min(w_temp)])
max_w_orders[i] = np.min([np.max(w_tar),np.max(w_temp)])
min_dw_orders[i]=np.min([temp_spec['ndw'],spectra[t_f_names[i]]['ndw']])
min_dw = np.nanmin(min_dw_orders)
min_w = np.nanmin(min_w_orders)
max_w = np.nanmax(max_w_orders)
if vel_spacing == 'uniform':
r = np.min(min_dw/max_w/oversample)
#velocity spacing in km/s
stepV=r*c
if ((type(vel_spacing) == float) or (type(vel_spacing) == 'numpy.float64')):
stepV = vel_spacing
r = stepV / (c)
min_dw = r * max_w
for i in range(t_f_names.size):
spectra[t_f_names[i]]['temp_name'] = temp_spec['temp_name']
w_range = spectra[t_f_names[i]]['w_region']
w_tar = spectra[t_f_names[i]]['nwave']
flux_tar = spectra[t_f_names[i]]['nflux']
temp_trim = temp_spec['w_region']
w_temp = temp_spec['nwave']
flux_temp = temp_spec['nflux']
#This gets rid of large emission lines and CRs by interpolating over them.
if np.min(flux_tar) < cr_trim:
f_tar = interpolate.interp1d(w_tar[flux_tar > cr_trim],flux_tar[flux_tar > cr_trim])
w_tar = w_tar[(w_tar >= np.min(w_tar[flux_tar > cr_trim]))&
(w_tar <= np.max(w_tar[flux_tar > cr_trim]))]
flux_tar = f_tar(w_tar)
if np.min(flux_temp) < cr_trim:
f_temp = interpolate.interp1d(w_temp[flux_temp > cr_trim],flux_temp[flux_temp > cr_trim])
w_temp = w_temp[(w_temp >= np.min(w_temp[flux_temp > cr_trim]))&
(w_temp <= np.max(w_temp[flux_temp > cr_trim]))]
flux_temp = f_temp(w_temp)
w_tar,flux_tar = spec_trim(w_tar,flux_tar,w_range,temp_trim,trim_style=trim_style)
if w_tar.size == 0:
if quiet==False:
print(t_f_names[i],w_range)
print("No overlap between target and template.")
print(' ')
spectra[t_f_names[i]]['vwave'] = 0.0
spectra[t_f_names[i]]['order_flag'] = 0
continue
f_tar = interpolate.interp1d(w_tar,flux_tar)
f_temp = interpolate.interp1d(w_temp,flux_temp)
min_w = np.max([np.min(w_tar),np.min(w_temp)])
max_w = np.min([np.max(w_tar),np.max(w_temp)])
if vel_spacing == 'orders':
#Using the wavelength spacing of the most densely sampled spectrum
min_dw=np.min([temp_spec['ndw'],spectra[t_f_names[i]]['ndw']])
#inverse of the spectral resolution
r = min_dw/max_w/oversample
#velocity spacing in km/s
stepV = r * c
#the largest array length between target and spectrum
#conditional below makes sure it is even
max_size = np.int(np.log(max_w/(min_w+1))/np.log(1+r))
if (max_size/2.0 % 1) != 0:
max_size=max_size-1
#log wavelength spacing, linear velocity spacing
w1t=(min_w+1)*(1+r)**np.arange(max_size)
w1t_temp = copy.deepcopy(w1t)
t_rflux = f_tar(w1t)
temp_rflux = f_temp(w1t)
w1t,t_rflux = spec_trim(w1t,t_rflux,w_range,temp_trim,trim_style=trim_style)
w1t_temp,temp_rflux = spec_trim(w1t_temp,temp_rflux,w_range,temp_trim,trim_style=trim_style)
if (w1t.size/2.0 % 1) != 0:
w1t=w1t[0:-1]
t_rflux = t_rflux[0:-1]
temp_rflux = temp_rflux[0:-1]
if trap_apod > 0:
trap_apod_fun = np.ones(w1t.size)
slope = 1.0/np.int(w1t.size*trap_apod)
y_int = slope*w1t.size
trap_apod_fun[:np.int(w1t.size*trap_apod)] = slope*np.arange(np.int(w1t.size*trap_apod),dtype=float)
trap_apod_fun[-np.int(w1t.size*trap_apod)-1:] = -slope*(np.arange(np.int(w1t.size*trap_apod+1),dtype=float)+(w1t.size*(1-trap_apod))) + y_int
temp_rflux = temp_rflux * trap_apod_fun
t_rflux = t_rflux * trap_apod_fun
spectra[t_f_names[i]]['vflux'] = t_rflux
spectra[t_f_names[i]]['vwave'] = w1t
spectra[t_f_names[i]]['vflux_temp'] = temp_rflux
spectra[t_f_names[i]]['vel_spacing'] = stepV
spectra[t_f_names[i]]['w_region_temp'] = temp_spec['w_region']
return spectra
def RChiS(x,y,yerr,func,params):
'''
A function to compute the Reduced Chi Square between some data and
a model.
Parameters
----------
x : array-like
Array of x values for data.
y : array-like
Array of y values for data.
yerr : array-like
Array of 1-sigma uncertainties for y vaules.
func : function
Function being compared to the data.
params : array-like
List of parameter values for the function above.
Returns
-------
rchis : float
The reduced chi square between the data and model.
'''
rchis=np.sum((y-func(x,*params))**2 / yerr**2 )/(x.size-params.size)
return rchis
def region_select_pkl(target,template=None,tar_stretch=True,
temp_stretch=True,reverse=False,dk_wav='wav',dk_flux='flux',
tell_file=None,jump_to=0,reg_file=None):
'''
An interactive function to plot target and template spectra
that allowing you to select useful regions with which to
compute the broadening functions, ccfs, ect.
This funciton is meant for specrta in a pickled dictionary.
For this function to work properly the template and target
spectrum have to have the same format, i.e. the same number
of orders and roughly the same wavelength coverage.
If a template spectrum is not specified, it will plot the
target twice, where it can be nice to have one strethed
and on not.
Functionality:
The function brings up an interactive figure with the target
on top and the template on bottom. hitting the 'm' key will
mark wavelengths dotted red lines. The 'b' key will mark the
start of a region with a solid black line and then the end of
the region with a dashed black line. Regions should always go
from small wavelengths to larger wavelengths, and regions
should always close (.i.e., end with a dashed line). Hitting
the return key over the terminal will advance to the next order
and it will print the region(s) you've created to the terminal
screen that are in the format that the saphires.io.read
functions use. The regions from the previous order will show
up as dotted black lines allowing you to create regions that
do not overlap.
Parameters
----------
target : str
File name for a pickled dictionary that has wavelength and
flux arrays for the target spectrum with the header keywords
defined in the dk_wav and dk_flux arguments.
template : str, None
File name for a pickled dictionary that has wavelength and
flux arrays for the target spectrum with the header keywords
defined in the dk_wav and dk_flux arguments. If None, the
target spectrum will be plotted in both panels.
tar_stretch : bool
Option to window y-axis of the target spectrum plot on the
median with 50% above and below. This is useful for echelle
data with noisey edges. The default is True.
temp_stretch ; bool
Option to window y-axis of the template spectrum plot on the
median with 50% above and below. This is useful for echelle
data with noisey edges.The default is True.
reverse : bool
This function works best when the orders are ordered with
ascending wavelength coverage. If this is not the case,
this option will flip them. The default is False, i.e., no
flip in the order.
dk_wav : str
Dictionary keyword for the wavelength array. Default is 'wav'
dk_flux : str
Dictionary keyword for the flux array. Default is 'flux'
tell_file : optional keyword, None or str
Name of file containing the location of telluric lines to be
plotted as vertical lines. This is useful when selecting
regions free to telluric contamination.
File must be a tab/space separated ascii text file with the
following format:
w_low w_high depth(compated the conintuum) w_central
This is modeled after the MAKEE telluric template here:
https://www2.keck.hawaii.edu/inst/common/makeewww/Atmosphere/atmabs.txt
but just a heads up, these are in vaccum.
If None, this option is ignored.
The default is None.
jump_to : int
Starting order. Useful when you want to pick up somewhere.
Default is 0.
reg_file : optional keyword, None or str
The name of a region file you want to overplay on the target and
template spectra. The start of a regions will be a solid veritcal
grey line. The end will be a dahsed vertical grey line.
The region file has the same formatting requirements as the io.read
functions. The default is None.
Returns
-------
None
'''
l_range = []
def press_key(event):
if event.key == 'b':
l_range.append(np.round(event.xdata,2))
if (len(l_range)/2.0 % 1) != 0:
ax[0].axvline(event.xdata,ls='-',color='k')
ax[1].axvline(event.xdata,ls='-',color='k')
else:
ax[0].axvline(event.xdata,ls='--',color='k')
ax[1].axvline(event.xdata,ls='--',color='k')
plt.draw()
return l_range
if event.key == 'm':
ax[0].axvline(event.xdata,ls=':',color='r')
ax[1].axvline(event.xdata,ls=':',color='r')
plt.draw()
return
#------ Reading in telluric file --------------
if tell_file != None:
wl,wh,r,w_tell = np.loadtxt(tell_file,unpack=True)
#------ Reading in region file --------------
if reg_file != None:
name,reg_order,w_string = np.loadtxt(reg_file,unpack=True,dtype=nplts+'100,i,'+nplts+'1000')
#----- Reading in and Formatiing ---------------
if template == None:
template = copy.deepcopy(target)
if py_version == 2:
tar = pkl.load(open(target,'rb'))
temp = pkl.load(open(template,'rb'))
if py_version == 3:
tar = pkl.load(open(target,'rb'),encoding='latin')
temp = pkl.load(open(template,'rb'),encoding='latin')
keys = list(tar.keys())
if dk_wav not in keys:
print("The wavelength array dictionary keyword specified, '"+dk_wav+"'")
print("was not found.")
return 0,0
if dk_flux not in keys:
print("The flux array dictionary keyword specified, '"+dk_flux+"'")
print("was not found.")
return 0,0
if (tar[dk_wav].ndim == 1):
order = 1
if (tar[dk_wav].ndim > 1):
order=tar[dk_wav].shape[0]
if reverse == False:
i = 0 + jump_to
if reverse == True:
i = order - jump_to - 1
#-------------------------------------------------
plt.ion()
i = 0 + jump_to
while i < order:
if order > 1:
if reverse == True:
i_ind = order-1-i
if reverse == False:
i_ind = i
flux = tar[dk_flux][i_ind]
w = tar[dk_wav][i_ind]
t_flux = temp[dk_flux][i_ind]
t_w = temp[dk_wav][i_ind]
else:
i_ind = i
flux = tar[dk_flux]
w = tar[dk_wav]
t_flux = temp[dk_flux]
t_w = temp[dk_wav]
#target
w = w[~np.isnan(flux)]
flux = flux[~np.isnan(flux)]
w = w[np.isfinite(flux)]
flux = flux[np.isfinite(flux)]
#template
t_w = t_w[~np.isnan(t_flux)]
t_flux = t_flux[~np.isnan(t_flux)]
t_w = t_w[np.isfinite(t_flux)]
t_flux = t_flux[np.isfinite(t_flux)]
#if ((w.size > 0) & (t_w.size > 0)):
fig,ax=plt.subplots(2,sharex=True,figsize=(14.25,7.5))
ax[0].set_title('Target - '+np.str(i_ind))
ax[0].plot(w,flux)
if len(l_range) > 0:
for j in range(len(l_range)):
ax[0].axvline(l_range[j],ls=':',color='red')
ax[0].set_ylabel('Flux')
ax[0].set_xlim(np.min(w),np.max(w))
if tell_file != None:
for j in range(w_tell.size):
if ((w_tell[j] > np.min(w)) & (w_tell[j] < np.max(w))) == True:
r_alpha = 1.0-r[j]
ax[0].axvline(w_tell[j],ls='--',color='blue',alpha=r_alpha)
ax[0].axvline(wl[j],ls=':',color='blue',alpha=r_alpha)
ax[0].axvline(wh[j],ls=':',color='blue',alpha=r_alpha)
ax[1].axvline(w_tell[j],ls='--',color='blue',alpha=r_alpha)
ax[1].axvline(wl[j],ls=':',color='blue',alpha=r_alpha)
ax[1].axvline(wh[j],ls=':',color='blue',alpha=r_alpha)
if reg_file != None:
if i_ind in reg_order:
reg_ind = np.where(reg_order == i_ind)[0][0]
n_regions=len(str(w_string[reg_ind]).split('-'))-1
for j in range(n_regions):
w_reg_start = np.float(w_string[reg_ind].split(',')[j].split('-')[0])
w_reg_end = np.float(w_string[reg_ind].split(',')[j].split('-')[1])
ax[0].axvline(w_reg_start,ls='-',color='grey')
ax[0].axvline(w_reg_end,ls='--',color='grey')
ax[1].axvline(w_reg_start,ls='-',color='grey')
ax[1].axvline(w_reg_end,ls='--',color='grey')
if tar_stretch == True:
ax[0].axis([np.min(w),np.max(w),
np.median(flux)-np.median(flux)*0.5,
np.median(flux)+np.median(flux)*0.5])
ax[0].grid(b=True,which='both',axis='both')
ax[1].set_title('Template - '+np.str(i_ind))
ax[1].plot(t_w,t_flux)
if len(l_range) > 0:
for j in range(len(l_range)):
ax[1].axvline(l_range[j],ls=':',color='red')
ax[1].set_ylabel('Flux')
ax[1].set_xlabel('Wavelength')
if ((t_flux.size > 0)&(temp_stretch==True)):
ax[1].axis([np.min(t_w),np.max(t_w),
np.median(t_flux)-np.median(t_flux)*0.5,
np.median(t_flux)+np.median(t_flux)*0.5])
ax[1].grid(b=True,which='both',axis='both')
plt.tight_layout()
l_range = []
cid = fig.canvas.mpl_connect('key_press_event',press_key)
wait = p_input('')
if wait != 'r':
i = i+1
if len(l_range) > 0:
out_range=''
for j in range(len(l_range)):
if j < len(l_range)-1:
if (j/2.0 % 1) != 0:
out_range=out_range+str(l_range[j])+','
if (j/2.0 % 1) == 0:
out_range=out_range+str(l_range[j])+'-'
if j == len(l_range)-1:
out_range=out_range+str(l_range[j])
print(target,i_ind,out_range)
if (len(l_range) == 0) & (reg_file != None):
if i_ind in reg_order:
i_reg = np.where(reg_order == i_ind)[0][0]
print(target,i_ind,w_string[i_reg])
fig.canvas.mpl_disconnect(cid)
plt.cla()
plt.close()
return
def region_select_vars(w,f,tar_stretch=True,reverse=False,tell_file=None,
jump_to=0):
'''
An interactive function to plot spectra that allowing you
to select useful regions with which to compute the
broadening functions, ccfs, ect.
Functionality:
The function brings up an interactive figure with spectra.
Hitting the 'm' key will mark wavelengths with dotted red
lines. The 'b' key will mark the start of a region with a
solid black line and then the end of the region with a
dashed black line. Regions should always go from small
wavelengths to larger wavelengths, and regions should always
close (.i.e., end with a dashed line). Hitting the return
key over the terminal will advance to the next order and it
will print the region(s) you've created to the terminal
screen that are in the format that the saphires.io.read_vars
function can use. The regions from the previous order will
show up as dotted black lines allowing you to create regions
that do not overlap.
Parameters
----------
w : array-like
Wavelength array assumed to be in Angstroms.
tar_stretch : bool
Option to window y-axis of the spectrum plot on the
median with 50% above and below. This is useful for echelle
data with noisey edges. The default is True.
reverse : bool
This function works best when the orders are ordered with
ascending wavelength coverage. If this is not the case,
this option will flip them. The default is False, i.e., no
flip in the order.
tell_file : optional keyword, None or str
Name of file containing the location of telluric lines to be
plotted as vertical lines. This is useful when selecting
regions free to telluric contamination.
File must be a tab/space separated ascii text file with the
following format:
w_low w_high depth(compated the conintuum) w_central
This is modeled after the MAKEE telluric template here:
https://www2.keck.hawaii.edu/inst/common/makeewww/Atmosphere/atmabs.txt
but just a heads up, these are in vaccum.
If None, this option is ignored.
The default is None.
jump_to : int
Starting order. Useful when you want to pick up somewhere.
Default is 0.
Returns
-------
None
'''
l_range = []
def press_key(event):
if event.key == 'b':
l_range.append(np.round(event.xdata,2))
if (len(l_range)/2.0 % 1) != 0:
ax[0].axvline(event.xdata,ls='-',color='k')
ax[1].axvline(event.xdata,ls='-',color='k')
else:
ax[0].axvline(event.xdata,ls='--',color='k')
ax[1].axvline(event.xdata,ls='--',color='k')
plt.draw()
return l_range
if event.key == 'm':
ax[0].axvline(event.xdata,ls=':',color='r')
ax[1].axvline(event.xdata,ls=':',color='r')
plt.draw()
return
#------ Reading in telluric file --------------
if tell_file != None:
wl,wh,r,w_tell = np.loadtxt(tell_file,unpack=True)
#----- Reading in and Formatiing ---------------
if (w.ndim == 1):
order = 1
if (w.ndim > 1):
order=w.shape[0]
#-------------------------------------------------
plt.ion()
i = 0 + jump_to
while i < order:
if order > 1:
if reverse == True:
i_ind = order-1-i
if reverse == False:
i_ind = i
flux_plot = f[i_ind]
w_plot = w[i_ind]
else:
i_ind = i
flux_plot = f
w_plot = w
#target
w_plot = w_plot[~np.isnan(flux_plot)]
flux_plot = flux_plot[~np.isnan(flux_plot)]
w_plot = w_plot[np.isfinite(flux_plot)]
flux_plot = flux_plot[np.isfinite(flux_plot)]
fig,ax=plt.subplots(2,sharex=True,figsize=(14.25,7.5))
ax[0].set_title('Target - '+np.str(i_ind))
ax[0].plot(w_plot,flux_plot)
if len(l_range) > 0:
for j in range(len(l_range)):
ax[0].axvline(l_range[j],ls=':',color='red')
ax[0].set_ylabel('Flux')
ax[0].set_xlim(np.min(w),np.max(w))
if tell_file != None:
for j in range(w_tell.size):
if ((w_tell[j] > np.min(w)) & (w_tell[j] < np.max(w))) == True:
r_alpha = 1.0-r[j]
ax[0].axvline(w_tell[j],ls='--',color='blue',alpha=r_alpha)
ax[0].axvline(wl[j],ls=':',color='blue',alpha=r_alpha)
ax[0].axvline(wh[j],ls=':',color='blue',alpha=r_alpha)
ax[1].axvline(w_tell[j],ls='--',color='blue',alpha=r_alpha)
ax[1].axvline(wl[j],ls=':',color='blue',alpha=r_alpha)
ax[1].axvline(wh[j],ls=':',color='blue',alpha=r_alpha)
if tar_stretch == True:
ax[0].axis([np.min(w_plot),np.max(w_plot),
np.median(flux_plot)-np.median(flux_plot)*0.5,
np.median(flux_plot)+np.median(flux_plot)*0.5])
ax[0].grid(b=True,which='both',axis='both')
ax[1].plot(w_plot,flux_plot)
if len(l_range) > 0:
for j in range(len(l_range)):
ax[1].axvline(l_range[j],ls=':',color='red')
ax[1].set_ylabel('Flux')
ax[1].set_xlabel('Wavelength')
ax[1].grid(b=True,which='both',axis='both')
plt.tight_layout()
l_range = []
cid = fig.canvas.mpl_connect('key_press_event',press_key)
wait = p_input('')
if wait != 'r':
i = i+1
if len(l_range) > 0:
out_range=''
for j in range(len(l_range)):
if j < len(l_range)-1:
if (j/2.0 % 1) != 0:
out_range=out_range+str(l_range[j])+','
if (j/2.0 % 1) == 0:
out_range=out_range+str(l_range[j])+'-'
if j == len(l_range)-1:
out_range=out_range+str(l_range[j])
print(i_ind,out_range)
fig.canvas.mpl_disconnect(cid)
plt.cla()
plt.close()
plt.ioff()
return
def region_select_ms(target,template=None,tar_stretch=True,
temp_stretch=True,reverse=False,t_order=0,temp_order=0,
header_wave=False,w_mult=1,igrins_default=False,
tell_file=None,jump_to=0,reg_file=None):
'''
An interactive function to plot target and template spectra
that allowing you to select useful regions with which to
compute the broadening functions, ccfs, ect.
This funciton is meant for multi-order specrta in a fits file.
For this function to work properly the template and target
spectrum have to have the same format, i.e. the same number
of orders and roughly the same wavelength coverage.
If a template spectrum is not specified, it will plot the
target twice, where it can be nice to have one strethed
and on not.
Functionality:
The function brings up an interactive figure with the target
on top and the template on bottom. hitting the 'm' key will
mark wavelengths dotted red lines. The 'b' key will mark the
start of a region with a solid black line and then the end of
the region with a dashed black line. Regions should always go
from small wavelengths to larger wavelengths, and regions
should always close (.i.e., end with a dashed line). Hitting
the return key over the terminal will advance to the next order
and it will print the region(s) you've created to the terminal
screen that are in the format that the saphires.io.read
functions use (you may have to delete commas and parenthesis
depending on whether you are running this command in python 2
or 3). The regions from the previous order will show
up as dotted black lines allowing you to create regions that
do not overlap.
Parameters
----------
target : str
File name for a pickled dictionary that has wavelength and
flux arrays for the target spectrum with the header keywords
defined in the dk_wav and dk_flux arguments.
template : str, None
File name for a pickled dictionary that has wavelength and
flux arrays for the target spectrum with the header keywords
defined in the dk_wav and dk_flux arguments. If None, the
target spectrum will be plotted in both panels.
tar_stretch : bool
Option to window y-axis of the target spectrum plot on the
median with 50% above and below. This is useful for echelle
data with noisey edges. The default is True.
temp_stretch ; bool
Option to window y-axis of the template spectrum plot on the
median with 50% above and below. This is useful for echelle
data with noisey edges.The default is True.
reverse : bool
This function works best when the orders are ordered with
ascending wavelength coverage. If this is not the case,
this option will flip them. The default is False, i.e., no
flip in the order.
t_order : int
The order of the target spectrum. Some multi-order spectra
come in multi-extension fits files (e.g. IGRINS). This
parameter defines that extension. The default is 0.
temp_order : int
The order of the template spectrum. Some multi-order spectra
come in multi-extension fits files (e.g. IGRINS). This
parameter defines that extension. The default is 0.
header_wave : bool or 'Single'
Whether to assign the wavelength array from the header keywords or
from a separate fits extension. If True, it uses the header keywords,
assumiing they are linearly spaced. If False, it looks in the second
fits extension, i.e. hdu[1].data
If header_wave is set to 'Single', it treats each fits extension like
single order fits file that could be read in with saph.io.read_fits.
This feature is useful for SALT/HRS specrtra reduced with the MIDAS
pipeline.
w_mult : float
Value to multiply the wavelength array. This is used to convert the
input wavelength array to Angstroms if it is not already. The default
is 1, assuming the wavelength array is alreay in Angstroms.
igrins_default : bool
The option to override all of the input arguments to parameters
that are tailored to IGRINS data. Keyword arguments will be set
to:
t_order = 0
temp_order = 3
temp_stretch = False
header_wave = True
w_mult = 10**4
reverse = True
tell_file : optional keyword, None or str
Name of file containing the location of telluric lines to be
plotted as vertical lines. This is useful when selecting
regions free to telluric contamination.
File must be a tab/space separated ascii text file with the
following format:
w_low w_high depth(compated the conintuum) w_central
This is modeled after the MAKEE telluric template here:
https://www2.keck.hawaii.edu/inst/common/makeewww/Atmosphere/atmabs.txt
but just a heads up, these are in vaccum.
If None, this option is ignored.
The default is None.
jump_to : int
Starting order. Useful when you want to pick up somewhere.
Default is 0.
reg_file : optional keyword, None or str
The name of a region file you want to overplay on the target and
template spectra. The start of a regions will be a solid veritcal
grey line. The end will be a dahsed vertical grey line.
The region file has the same formatting requirements as the io.read
functions. The default is None.
Returns
-------
None
'''
l_range = []
def press_key(event):
if event.key == 'b':
l_range.append(np.round(event.xdata,2))
if (len(l_range)/2.0 % 1) != 0:
ax[0].axvline(event.xdata,ls='-',color='k')
ax[1].axvline(event.xdata,ls='-',color='k')
else:
ax[0].axvline(event.xdata,ls='--',color='k')
ax[1].axvline(event.xdata,ls='--',color='k')
plt.draw()
return l_range
if event.key == 'm':
ax[0].axvline(event.xdata,ls=':',color='r')
ax[1].axvline(event.xdata,ls=':',color='r')
plt.draw()
return
if igrins_default == True:
t_order = 0
temp_order = 3
temp_stretch = False
header_wave = False
w_mult = 10**4
reverse = True
#------ Reading in telluric file --------------
if tell_file != None:
wl,wh,r,w_tell = np.loadtxt(tell_file,unpack=True)
#------ Reading in region file --------------
if reg_file != None:
name,reg_order,w_string = np.loadtxt(reg_file,unpack=True,dtype=nplts+'100,i,'+nplts+'1000')
if (name.size == 1):
name=np.array([name])
reg_order=np.array([reg_order])
w_string=np.array([w_string])
#----- Reading in and Formatiing ---------------
if template == None:
template = copy.deepcopy(target)
hdulist = pyfits.open(target)
t_hdulist = pyfits.open(template)
if header_wave != 'Single':
order = hdulist[t_order].data.shape[0]
else:
order = len(hdulist)
plt.ion()
if reverse == False:
i = 0 + jump_to
if reverse == True:
i = order - jump_to - 1
while i < order:
if reverse == True:
i_ind = order-1-i
if reverse == False:
i_ind = i
#---- Read in the Target -----
if header_wave == 'Single':
flux=hdulist[i_ind].data
w0=np.float(hdulist[i_ind].header['CRVAL1'])
dw=np.float(hdulist[i_ind].header['CDELT1'])
if 'LTV1' in hdulist[i_ind].header:
shift=np.float(hdulist[i_ind].header['LTV1'])
w0=w0-shift*dw
w0=w0 * w_mult
dw=dw * w_mult
w=np.arange(flux.size)*dw+w0
if header_wave == False:
flux = hdulist[t_order].data[i_ind]
w = hdulist[1].data[i_ind]*w_mult
dw=(np.max(w) - np.min(w))/np.float(w.size)
if header_wave == True:
flux = hdulist[order].data[i_ind]
#Pulls out all headers that have the WAT2 keywords
header_keys=np.array(hdulist[t_order].header.keys(),dtype=str)
header_test=np.array([header_keys[d][0:4]=='WAT2' \
for d in range(header_keys.size)])
w_sol_inds=np.where(header_test==True)[0]
#The loop below puts all the header extensions into one string
w_sol_str=''
for j in range(w_sol_inds.size):
if len(hdulist[t_order].header[w_sol_inds[j]]) == 68:
w_sol_str=w_sol_str+hdulist[t_order].header[w_sol_inds[j]]
if len(hdulist[t_order].header[w_sol_inds[j]]) == 67:
w_sol_str=w_sol_str+hdulist[t_order].header[w_sol_inds[j]]+' '
if len(hdulist[t_order].header[w_sol_inds[j]]) == 66:
w_sol_str=w_sol_str+hdulist[t_order].header[w_sol_inds[j]]+' '
if len(hdulist[t_order].header[w_sol_inds[j]]) < 66:
w_sol_str=w_sol_str+hdulist[t_order].header[w_sol_inds[j]]
# normalized the formatting
w_sol_str=w_sol_str.replace(' ',' ').replace(' ',' ').replace(' ',' ')
# removed wavelength solution preamble
w_sol_str=w_sol_str[16:]
#Check that the wavelength solution is linear.
w_parameters = len(w_sol_str.split(' = ')[1].split(' '))
if w_parameters > 11:
print('Your header wavelength solution is not linear')
print('Non-linear wavelength solutions are not currently supported')
print('Aborting...')
return
w_type = np.float(w_sol_str.split('spec')[1:][order[i]].split(' ')[3])
if w_type != 0:
print('Your header wavelength solution is not linear')
print('Non-linear wavelength solutions are not currently supported')
print('Aborting...')
return
w0 = np.float(w_sol_str.split('spec')[1:][order[i]].split(' ')[5])
dw = np.float(w_sol_str.split('spec')[1:][order[i]].split(' ')[6])
z = np.float(w_sol_str.split('spec')[1:][order[i]].split(' ')[7])
w = ((np.arange(flux.size)*dw+w0)/(1+z))*w_mult
#---- Read in the Template -----
if header_wave == 'Single':
t_flux=t_hdulist[i_ind].data
t_w0=np.float(t_hdulist[i_ind].header['CRVAL1'])
t_dw=np.float(t_hdulist[i_ind].header['CDELT1'])
if 'LTV1' in t_hdulist[i_ind].header:
t_shift=np.float(t_hdulist[i_ind].header['LTV1'])
t_w0=t_w0-t_shift*t_dw
t_w0=t_w0 * w_mult
t_dw=t_dw * w_mult
t_w=np.arange(t_flux.size)*t_dw+t_w0
if header_wave == False:
t_flux = t_hdulist[temp_order].data[i_ind]
t_w = t_hdulist[1].data[i_ind]*w_mult
t_dw=(np.max(t_w) - np.min(t_w))/np.float(t_w.size)
if header_wave == True:
t_flux = t_hdulist[temp_order].data[i_ind]
#Pulls out all headers that have the WAT2 keywords
header_keys=np.array(t_hdulist[temp_order].header.keys(),dtype=str)
header_test=np.array([header_keys[d][0:4]=='WAT2' \
for d in range(header_keys.size)])
w_sol_inds=np.where(header_test==True)[0]
#The loop below puts all the header extensions into one string
w_sol_str=''
for j in range(w_sol_inds.size):
if len(t_hdulist[temp_order].header[w_sol_inds[j]]) == 68:
w_sol_str=w_sol_str+t_hdulist[temp_order].header[w_sol_inds[j]]
if len(t_hdulist[temp_order].header[w_sol_inds[j]]) == 67:
w_sol_str=w_sol_str+t_hdulist[temp_order].header[w_sol_inds[j]]+' '
if len(t_hdulist[temp_order].header[w_sol_inds[j]]) == 66:
w_sol_str=w_sol_str+t_hdulist[temp_order].header[w_sol_inds[j]]+' '
if len(t_hdulist[temp_order].header[w_sol_inds[j]]) < 66:
w_sol_str=w_sol_str+t_hdulist[temp_order].header[w_sol_inds[j]]
# normalized the formatting
w_sol_str=w_sol_str.replace(' ',' ').replace(' ',' ').replace(' ',' ')
# removed wavelength solution preamble
w_sol_str=w_sol_str[16:]
#Check that the wavelength solution is linear.
w_parameters = len(w_sol_str.split(' = ')[1].split(' '))
if w_parameters > 11:
print('Your header wavelength solution is not linear')
print('Non-linear wavelength solutions are not currently supported')
print('Aborting...')
return
w_type = np.float(w_sol_str.split('spec')[1:][order[i]].split(' ')[3])
if w_type != 0:
print('Your header wavelength solution is not linear')
print('Non-linear wavelength solutions are not currently supported')
print('Aborting...')
return
t_w0 = np.float(w_sol_str.split('spec')[1:][order[i]].split(' ')[5])
t_dw = np.float(w_sol_str.split('spec')[1:][order[i]].split(' ')[6])
z = np.float(w_sol_str.split('spec')[1:][order[i]].split(' ')[7])
t_w = ((np.arange(t_flux.size)*t_dw+t_w0)/(1+z))*w_mult
#target
w = w[~np.isnan(flux)]
flux = flux[~np.isnan(flux)]
w = w[np.isfinite(flux)]
flux = flux[np.isfinite(flux)]
#template
t_w = t_w[~np.isnan(t_flux)]
t_flux = t_flux[~np.isnan(t_flux)]
t_w = t_w[np.isfinite(t_flux)]
t_flux = t_flux[np.isfinite(t_flux)]
#--------Interactive Plotting --------
fig,ax=plt.subplots(2,sharex=True,figsize=(14.25,7.5))
if ((w.size > 0) &(t_w.size > 0)):
ax[0].set_title('Target - '+np.str(i_ind))
ax[0].plot(w,flux)
if len(l_range) > 0:
for j in range(len(l_range)):
ax[0].axvline(l_range[j],ls=':',color='red')
ax[0].set_ylabel('Flux')
ax[0].set_xlim(np.min(w),np.max(w))
if tell_file != None:
for j in range(w_tell.size):
if ((w_tell[j] >
|
np.min(w)
|
numpy.min
|
import argparse
from collections import Counter
import numpy as np
import os
from tqdm import tqdm, trange
from collections import defaultdict
import pickle
import torch
import uuid
from prob_cbr.data.data_utils import load_data_from_triples, get_unique_entities_from_triples, \
read_graph_from_triples, get_entities_group_by_relation_from_triples, get_inv_relation, create_adj_list_from_triples
from prob_cbr.data.stream_utils import KBStream
from prob_cbr.data.get_paths import get_paths
from prob_cbr.clustering.grinch_with_deletes import GrinchWithDeletes
from typing import *
import logging
import json
import sys
import wandb
import copy
logger = logging.getLogger('main')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("[%(asctime)s \t %(message)s]",
"%Y-%m-%d %H:%M:%S")
ch.setFormatter(formatter)
logger.addHandler(ch)
class ProbCBR(object):
def __init__(self, args, train_map, eval_map, entity_vocab, rev_entity_vocab, rel_vocab, rev_rel_vocab, eval_vocab,
eval_rev_vocab, all_paths, rel_ent_map):
self.args = args
self.eval_map = eval_map
self.train_map = train_map
self.all_zero_ctr = []
self.all_num_ret_nn = []
self.entity_vocab, self.rev_entity_vocab, self.rel_vocab, self.rev_rel_vocab = entity_vocab, rev_entity_vocab, rel_vocab, rev_rel_vocab
self.eval_vocab, self.eval_rev_vocab = eval_vocab, eval_rev_vocab
self.all_paths = all_paths
self.rel_ent_map = rel_ent_map
self.num_non_executable_programs = []
self.query_c = None
self.nearest_neighbor_1_hop = None
def set_nearest_neighbor_1_hop(self, nearest_neighbor_1_hop):
self.nearest_neighbor_1_hop = nearest_neighbor_1_hop
@staticmethod
def calc_sim(adj_mat: Type[torch.Tensor], query_entities: Type[torch.LongTensor]) -> Type[torch.LongTensor]:
"""
:param adj_mat: N X R
:param query_entities: b is a batch of indices of query entities
:return:
"""
query_entities_vec = torch.index_select(adj_mat, dim=0, index=query_entities)
sim = torch.matmul(query_entities_vec, torch.t(adj_mat))
return sim
def get_nearest_neighbor_inner_product(self, e1: str, r: str, k: Optional[int] = 5) -> Union[List[str], None]:
try:
nearest_entities = [self.rev_entity_vocab[e] for e in
self.nearest_neighbor_1_hop[self.eval_vocab[e1]].tolist()]
# remove e1 from the set of k-nearest neighbors if it is there.
nearest_entities = [nn for nn in nearest_entities if nn != e1]
# making sure, that the similar entities also have the query relation
ctr = 0
temp = []
for nn in nearest_entities:
if ctr == k:
break
if len(self.train_map[nn, r]) > 0:
temp.append(nn)
ctr += 1
nearest_entities = temp
except KeyError:
return None
return nearest_entities
def get_nearest_neighbor_naive(self, e1: str, r: str, k: Optional[int] = 5) -> List[str]:
"""
Return entities which have the query relation
:param e1:
:param r:
:param k:
:return:
"""
entities_with_r = self.rel_ent_map[r]
# choose randomly from this set
# pick (k+1), if e1 is there then remove it otherwise return first k
nearest_entities = np.random.choice(entities_with_r, k + 1)
if e1 in nearest_entities:
nearest_entities = [i for i in nearest_entities if i != e1]
else:
nearest_entities = nearest_entities[:k]
return nearest_entities
def get_programs(self, e: str, ans: str, all_paths_around_e: List[List[str]]):
"""
Given an entity and answer, get all paths which end at that ans node in the subgraph surrounding e
"""
all_programs = []
for path in all_paths_around_e:
for l, (r, e_dash) in enumerate(path):
if e_dash == ans:
# get the path till this point
all_programs.append([x for (x, _) in path[:l + 1]]) # we only need to keep the relations
return all_programs
def get_programs_from_nearest_neighbors(self, e1: str, r: str, nn_func: Callable, num_nn: Optional[int] = 5):
all_programs = []
nearest_entities = nn_func(e1, r, k=num_nn)
if nearest_entities is None:
self.all_num_ret_nn.append(0)
return None
self.all_num_ret_nn.append(len(nearest_entities))
zero_ctr = 0
for e in nearest_entities:
if len(self.train_map[(e, r)]) > 0:
paths_e = self.all_paths[e] # get the collected 3 hop paths around e
nn_answers = self.train_map[(e, r)]
for nn_ans in nn_answers:
all_programs += self.get_programs(e, nn_ans, paths_e)
elif len(self.train_map[(e, r)]) == 0:
zero_ctr += 1
self.all_zero_ctr.append(zero_ctr)
return all_programs
def rank_programs(self, list_programs: List[str], r: str) -> List[str]:
"""
Rank programs.
"""
# sort it by the path score
unique_programs = set()
for p in list_programs:
unique_programs.add(tuple(p))
# now get the score of each path
path_and_scores = []
for p in unique_programs:
try:
path_and_scores.append((p, self.args.path_prior_map_per_relation[self.query_c][r][p] *
self.args.precision_map[self.query_c][r][p]))
except KeyError:
# TODO: Fix key error
if len(p) == 1 and p[0] == r:
continue # ignore query relation
else:
# use the fall back score
try:
c = 0
score = self.args.path_prior_map_per_relation_fallback[c][r][p] * \
self.args.precision_map_fallback[c][r][p]
path_and_scores.append((p, score))
except KeyError:
# still a path or rel is missing.
path_and_scores.append((p, 0))
# sort wrt counts
sorted_programs = [k for k, v in sorted(path_and_scores, key=lambda item: -item[1])]
return sorted_programs
def execute_one_program(self, e: str, path: List[str], depth: int, max_branch: int):
"""
starts from an entity and executes the path by doing depth first search. If there are multiple edges with the same label, we consider
max_branch number.
"""
if depth == len(path):
# reached end, return node
return [e]
next_rel = path[depth]
next_entities = self.train_map[(e, path[depth])]
if len(next_entities) == 0:
# edge not present
return []
if len(next_entities) > max_branch:
# select max_branch random entities
next_entities = np.random.choice(next_entities, max_branch, replace=False).tolist()
answers = []
for e_next in next_entities:
answers += self.execute_one_program(e_next, path, depth + 1, max_branch)
return answers
def execute_programs(self, e: str, r: str, path_list: List[List[str]], max_branch: Optional[int] = 1000) -> List[
str]:
def _fall_back(r, p):
"""
When a cluster does not have a query relation (because it was not seen during counting)
or if a path is not found, then fall back to no cluster statistics
:param r:
:param p:
:return:
"""
score = 0
c = 0 # one cluster for all entity
try:
score = self.args.path_prior_map_per_relation_fallback[c][r][p] * \
self.args.precision_map_fallback[c][r][p]
except KeyError:
# either the path or relation is missing from the fall back map as well
score = 0
return score
all_answers = []
not_executed_paths = []
execution_fail_counter = 0
executed_path_counter = 0
for path in path_list:
if executed_path_counter == self.args.max_num_programs:
break
ans = self.execute_one_program(e, path, depth=0, max_branch=max_branch)
temp = []
for a in ans:
path = tuple(path)
if self.args.use_path_counts:
try:
if path in self.args.path_prior_map_per_relation[self.query_c][r] and path in \
self.args.precision_map[self.query_c][r]:
temp.append((a,
self.args.path_prior_map_per_relation[self.query_c][r][path] *
self.args.precision_map[self.query_c][r][path],
path))
else:
# logger.info("This path was not there in the cluster for the relation.")
score = _fall_back(r, path)
temp.append((a, score, path))
except KeyError:
# logger.info("Looks like the relation was not found in the cluster, have to fall back")
# fallback to the global scores
score = _fall_back(r, path)
temp.append((a, score, path))
else:
temp.append((a, 1, path))
ans = temp
if ans == []:
not_executed_paths.append(path)
execution_fail_counter += 1
else:
executed_path_counter += 1
all_answers += ans
# if len(all_answers) == 0:
# all_answers = set(ans)
# else:
# all_answers = all_answers.intersection(set(ans))
self.num_non_executable_programs.append(execution_fail_counter)
return all_answers, not_executed_paths
def rank_answers(self, list_answers: List[str]) -> List[str]:
"""
Different ways to re-rank answers
"""
count_map = {}
uniq_entities = set()
for e, e_score, path in list_answers:
if e not in count_map:
count_map[e] = {}
if path not in count_map:
count_map[e][path] = e_score # just count once for a path type.
uniq_entities.add(e)
score_map = defaultdict(int)
for e, path_scores_map in count_map.items():
sum_path_score = 0
for p, p_score in path_scores_map.items():
sum_path_score += p_score
score_map[e] = sum_path_score
sorted_entities_by_val = sorted(score_map.items(), key=lambda kv: -kv[1])
return sorted_entities_by_val
@staticmethod
def get_rank_in_list(e, predicted_answers):
rank = 0
for i, e_to_check in enumerate(predicted_answers):
if e == e_to_check:
return i + 1
return -1
def get_hits(self, list_answers: List[str], gold_answers: List[str], query: Tuple[str, str]) -> Tuple[
float, float, float, float]:
hits_1 = 0.0
hits_3 = 0.0
hits_5 = 0.0
hits_10 = 0.0
rr = 0.0
(e1, r) = query
all_gold_answers = self.args.all_kg_map[(e1, r)]
for gold_answer in gold_answers:
# remove all other gold answers from prediction
filtered_answers = []
for pred in list_answers:
if pred in all_gold_answers and pred != gold_answer:
continue
else:
filtered_answers.append(pred)
rank = ProbCBR.get_rank_in_list(gold_answer, filtered_answers)
if rank > 0:
if rank <= 10:
hits_10 += 1
if rank <= 5:
hits_5 += 1
if rank <= 3:
hits_3 += 1
if rank <= 1:
hits_1 += 1
rr += 1.0 / rank
return hits_10, hits_5, hits_3, hits_1, rr
@staticmethod
def get_accuracy(gold_answers: List[str], list_answers: List[str]) -> List[float]:
all_acc = []
for gold_ans in gold_answers:
if gold_ans in list_answers:
all_acc.append(1.0)
else:
all_acc.append(0.0)
return all_acc
def do_symbolic_case_based_reasoning(self):
num_programs = []
num_answers = []
all_acc = []
non_zero_ctr = 0
hits_10, hits_5, hits_3, hits_1, mrr = 0.0, 0.0, 0.0, 0.0, 0.0
per_relation_scores = {} # map of performance per relation
per_relation_query_count = {}
total_examples = 0
learnt_programs = defaultdict(lambda: defaultdict(int)) # for each query relation, a map of programs to count
for _, ((e1, r), e2_list) in enumerate(tqdm((self.eval_map.items()))):
# if e2_list is in train list then remove them
# Normally, this shouldnt happen at all, but this happens for Nell-995.
orig_train_e2_list = self.train_map[(e1, r)]
temp_train_e2_list = []
for e2 in orig_train_e2_list:
if e2 in e2_list:
continue
temp_train_e2_list.append(e2)
self.train_map[(e1, r)] = temp_train_e2_list
# also remove (e2, r^-1, e1)
r_inv = get_inv_relation(r, self.args.dataset_name)
temp_map = {} # map from (e2, r_inv) -> outgoing nodes
for e2 in e2_list:
temp_map[(e2, r_inv)] = self.train_map[e2, r_inv]
temp_list = []
for e1_dash in self.train_map[e2, r_inv]:
if e1_dash == e1:
continue
else:
temp_list.append(e1_dash)
self.train_map[e2, r_inv] = temp_list
total_examples += len(e2_list)
if e1 not in self.entity_vocab:
all_acc += [0.0] * len(e2_list)
# put it back
self.train_map[(e1, r)] = orig_train_e2_list
for e2 in e2_list:
self.train_map[(e2, r_inv)] = temp_map[(e2, r_inv)]
continue
self.query_c = self.args.cluster_assignments[self.entity_vocab[e1]]
all_programs = self.get_programs_from_nearest_neighbors(e1, r, self.get_nearest_neighbor_inner_product,
num_nn=self.args.k_adj)
if all_programs is None or len(all_programs) == 0:
all_acc += [0.0] * len(e2_list)
# put it back
self.train_map[(e1, r)] = orig_train_e2_list
for e2 in e2_list:
self.train_map[(e2, r_inv)] = temp_map[(e2, r_inv)]
continue
for p in all_programs:
if p[0] == r:
continue
if r not in learnt_programs:
learnt_programs[r] = {}
p = tuple(p)
if p not in learnt_programs[r]:
learnt_programs[r][p] = 0
learnt_programs[r][p] += 1
# filter the program if it is equal to the query relation
temp = []
for p in all_programs:
if len(p) == 1 and p[0] == r:
continue
temp.append(p)
all_programs = temp
if len(all_programs) > 0:
non_zero_ctr += len(e2_list)
all_uniq_programs = self.rank_programs(all_programs, r)
num_programs.append(len(all_uniq_programs))
# Now execute the program
answers, not_executed_programs = self.execute_programs(e1, r, all_uniq_programs)
# if len(not_executed_programs) > 0:
# import pdb
# pdb.set_trace()
answers = self.rank_answers(answers)
if len(answers) > 0:
acc = self.get_accuracy(e2_list, [k[0] for k in answers])
_10, _5, _3, _1, rr = self.get_hits([k[0] for k in answers], e2_list, query=(e1, r))
hits_10 += _10
hits_5 += _5
hits_3 += _3
hits_1 += _1
mrr += rr
if self.args.output_per_relation_scores:
if r not in per_relation_scores:
per_relation_scores[r] = {"hits_1": 0, "hits_3": 0, "hits_5": 0, "hits_10": 0, "mrr": 0}
per_relation_query_count[r] = 0
per_relation_scores[r]["hits_1"] += _1
per_relation_scores[r]["hits_3"] += _3
per_relation_scores[r]["hits_5"] += _5
per_relation_scores[r]["hits_10"] += _10
per_relation_scores[r]["mrr"] += rr
per_relation_query_count[r] += len(e2_list)
else:
acc = [0.0] * len(e2_list)
all_acc += acc
num_answers.append(len(answers))
# put it back
self.train_map[(e1, r)] = orig_train_e2_list
for e2 in e2_list:
self.train_map[(e2, r_inv)] = temp_map[(e2, r_inv)]
if self.args.output_per_relation_scores:
for r, r_scores in per_relation_scores.items():
r_scores["hits_1"] /= per_relation_query_count[r]
r_scores["hits_3"] /= per_relation_query_count[r]
r_scores["hits_5"] /= per_relation_query_count[r]
r_scores["hits_10"] /= per_relation_query_count[r]
r_scores["mrr"] /= per_relation_query_count[r]
out_file_name = os.path.join(self.args.output_dir, "per_relation_scores.json")
fout = open(out_file_name, "w")
logger.info("Writing per-relation scores to {}".format(out_file_name))
fout.write(json.dumps(per_relation_scores, sort_keys=True, indent=4))
fout.close()
logger.info(
"Out of {} queries, atleast one program was returned for {} queries".format(total_examples, non_zero_ctr))
logger.info("Avg number of programs {:3.2f}".format(np.mean(num_programs)))
logger.info("Avg number of answers after executing the programs: {}".format(np.mean(num_answers)))
logger.info("Accuracy (Loose): {}".format(np.mean(all_acc)))
logger.info("Hits@1 {}".format(hits_1 / total_examples))
logger.info("Hits@3 {}".format(hits_3 / total_examples))
logger.info("Hits@5 {}".format(hits_5 / total_examples))
logger.info("Hits@10 {}".format(hits_10 / total_examples))
logger.info("MRR {}".format(mrr / total_examples))
logger.info("Avg number of nn, that do not have the query relation: {}".format(
np.mean(self.all_zero_ctr)))
logger.info("Avg num of returned nearest neighbors: {:2.4f}".format(np.mean(self.all_num_ret_nn)))
logger.info("Avg number of programs that do not execute per query: {:2.4f}".format(
np.mean(self.num_non_executable_programs)))
if self.args.print_paths:
for k, v in learnt_programs.items():
logger.info("query: {}".format(k))
logger.info("=====" * 2)
for rel, _ in learnt_programs[k].items():
logger.info((rel, learnt_programs[k][rel]))
logger.info("=====" * 2)
if self.args.use_wandb:
# Log all metrics
wandb.log({'hits_1': hits_1 / total_examples, 'hits_3': hits_3 / total_examples,
'hits_5': hits_5 / total_examples, 'hits_10': hits_10 / total_examples,
'mrr': mrr / total_examples, 'total_examples': total_examples, 'non_zero_ctr': non_zero_ctr,
'all_zero_ctr': self.all_zero_ctr, 'avg_num_nn': np.mean(self.all_num_ret_nn),
'avg_num_prog': np.mean(num_programs), 'avg_num_ans': np.mean(num_answers),
'avg_num_failed_prog': np.mean(self.num_non_executable_programs), 'acc_loose': np.mean(all_acc)})
def calc_precision_map(self):
"""
Calculates precision of each path wrt a query relation, i.e. ratio of how many times, a path was successful when executed
to how many times the path was executed.
Note: In the current implementation, we compute precisions for the paths stored in the path_prior_map
:return:
"""
logger.info("Calculating precision map")
success_map, total_map = {}, {} # map from query r to a dict of path and ratio of success
success_map_fallback, total_map_fallback = {0: {}}, {0: {}}
# not sure why I am getting RuntimeError: dictionary changed size during iteration.
train_map_list = [((e1, r), e2_list) for ((e1, r), e2_list) in self.train_map.items()]
for ((e1, r), e2_list) in tqdm(train_map_list):
c = self.args.cluster_assignments[self.entity_vocab[e1]]
if c not in success_map:
success_map[c] = {}
if c not in total_map:
total_map[c] = {}
if r not in success_map[c]:
success_map[c][r] = {}
if r not in total_map[c]:
total_map[c][r] = {}
if r not in success_map_fallback[0]:
success_map_fallback[0][r] = {}
if r not in total_map_fallback[0]:
total_map_fallback[0][r] = {}
paths_for_this_relation = self.args.path_prior_map_per_relation[c][r]
for p_ctr, (path, _) in enumerate(paths_for_this_relation.items()):
ans = self.execute_one_program(e1, path, depth=0, max_branch=100)
if len(ans) == 0:
continue
# execute the path get answer
if path not in success_map[c][r]:
success_map[c][r][path] = 0
if path not in total_map[c][r]:
total_map[c][r][path] = 0
if path not in success_map_fallback[0][r]:
success_map_fallback[0][r][path] = 0
if path not in total_map_fallback[0][r]:
total_map_fallback[0][r][path] = 0
for a in ans:
if a in e2_list:
success_map[c][r][path] += 1
success_map_fallback[0][r][path] += 1
total_map[c][r][path] += 1
total_map_fallback[0][r][path] += 1
precision_map = {}
for c, _ in success_map.items():
for r, _ in success_map[c].items():
if c not in precision_map:
precision_map[c] = {}
if r not in precision_map[c]:
precision_map[c][r] = {}
for path, s_c in success_map[c][r].items():
precision_map[c][r][path] = s_c / total_map[c][r][path]
precision_map_fallback = {0: {}}
for r, _ in success_map_fallback[0].items():
if r not in precision_map_fallback[0]:
precision_map_fallback[0][r] = {}
for path, s_c in success_map_fallback[0][r].items():
precision_map_fallback[0][r][path] = s_c / total_map_fallback[0][r][path]
return precision_map, precision_map_fallback
def calc_per_entity_precision_components(self, per_entity_path_prior_count, entity_set=None):
"""
Calculates precision of each path wrt a query relation, i.e. ratio of how many times, a path was successful when executed
to how many times the path was executed.
Note: In the current implementation, we compute precisions for the paths stored in the path_prior_map
:return:
"""
logger.info("Calculating precision map at entity level")
success_map, total_map = {}, {} # map from query r to a dict of path and ratio of success
if entity_set is None:
entity_set = set(self.entity_vocab.keys())
# # not sure why I am getting RuntimeError: dictionary changed size during iteration.
train_map_list = [((e1, r), e2_list) for ((e1, r), e2_list) in self.train_map.items()]
for ((e1, r), e2_list) in tqdm(train_map_list):
if len(e2_list) == 0:
del self.train_map[(e1, r)]
continue
if e1 not in entity_set:
continue
if e1 not in success_map:
success_map[e1] = {}
if e1 not in total_map:
total_map[e1] = {}
if r not in success_map[e1]:
success_map[e1][r] = {}
if r not in total_map[e1]:
total_map[e1][r] = {}
paths_for_this_relation = per_entity_path_prior_count[e1][r]
for p_ctr, (path, _) in enumerate(paths_for_this_relation.items()):
ans = self.execute_one_program(e1, path, depth=0, max_branch=100)
if len(ans) == 0:
continue
# execute the path get answer
if path not in success_map[e1][r]:
success_map[e1][r][path] = 0
if path not in total_map[e1][r]:
total_map[e1][r][path] = 0
for a in ans:
if a in e2_list:
success_map[e1][r][path] += 1
total_map[e1][r][path] += 1
return success_map, total_map
def get_precision_map_entity2cluster(self, per_entity_success_map, per_entity_total_map, cluster_assignments,
path_prior_map_per_relation):
"""
Calculates precision of each path wrt a query relation, i.e. ratio of how many times, a path was successful when
executed to how many times the path was executed.
Note: In the current implementation, we compute precisions for the paths stored in the path_prior_map
:return:
"""
logger.info("Calculating precision map for cluster from entity level map")
success_map, total_map = {}, {} # map from query r to a dict of path and ratio of success
train_map_list = [((e1, r), e2_list) for ((e1, r), e2_list) in self.train_map.items()]
_skip_ctr = 0
for ((e1, r), e2_list) in tqdm(train_map_list):
if len(e2_list) == 0:
del self.train_map[(e1, r)]
continue
c = cluster_assignments[self.entity_vocab[e1]]
if c not in path_prior_map_per_relation or r not in path_prior_map_per_relation[c]:
_skip_ctr += 1
continue
if c not in success_map:
success_map[c] = {}
if c not in total_map:
total_map[c] = {}
if r not in success_map[c]:
success_map[c][r] = {}
if r not in total_map[c]:
total_map[c][r] = {}
paths_for_this_relation = path_prior_map_per_relation[c][r]
for p_ctr, (path, _) in enumerate(paths_for_this_relation.items()):
if e1 in per_entity_success_map and r in per_entity_success_map[e1] and \
path in per_entity_success_map[e1][r]:
if path not in success_map[c][r]:
success_map[c][r][path] = 0
if path not in total_map[c][r]:
total_map[c][r][path] = 0
success_map[c][r][path] += per_entity_success_map[e1][r][path]
total_map[c][r][path] += per_entity_total_map[e1][r][path]
else:
ans = self.execute_one_program(e1, path, depth=0, max_branch=100)
if len(ans) == 0:
continue
# execute the path get answer
if path not in success_map[c][r]:
success_map[c][r][path] = 0
if path not in total_map[c][r]:
total_map[c][r][path] = 0
if e1 not in per_entity_success_map:
per_entity_success_map[e1] = {}
if r not in per_entity_success_map[e1]:
per_entity_success_map[e1][r] = {}
if path not in per_entity_success_map[e1][r]:
per_entity_success_map[e1][r][path] = 0
if e1 not in per_entity_total_map:
per_entity_total_map[e1] = {}
if r not in per_entity_total_map[e1]:
per_entity_total_map[e1][r] = {}
if path not in per_entity_total_map[e1][r]:
per_entity_total_map[e1][r][path] = 0
for a in ans:
if a in e2_list:
per_entity_success_map[e1][r][path] += 1
success_map[c][r][path] += 1
per_entity_total_map[e1][r][path] += 1
total_map[c][r][path] += 1
logger.info(f'[get_precision_map_entity2cluster] {_skip_ctr} skips')
precision_map = {}
for c, _ in success_map.items():
for r, _ in success_map[c].items():
if c not in precision_map:
precision_map[c] = {}
if r not in precision_map[c]:
precision_map[c][r] = {}
for path, s_c in success_map[c][r].items():
precision_map[c][r][path] = s_c / total_map[c][r][path]
return precision_map, success_map, total_map
def update_precision_map_entity2cluster(self, per_entity_success_map, per_entity_total_map,
per_entity_success_map_updates, per_entity_total_map_updates,
per_cluster_success_map, per_cluster_total_map,
per_cluster_success_map_fallback, per_cluster_total_map_fallback,
cluster_adds, cluster_dels,
path_prior_map_per_relation, path_prior_map_per_relation_fallback):
logger.info("Updating prior map for cluster from entity level map")
# e2old_cluster = dict([(e, c) for c, elist in cluster_dels.items() for e in elist])
e2new_cluster = dict([(e, c) for c, elist in cluster_adds.items() for e in elist])
# First delete from old cluster
for c_old, elist in cluster_dels.items():
for e1 in elist:
c_new = e2new_cluster.get(e1, -1)
assert c_new != -1
if e1 not in per_entity_success_map:
assert e1 not in per_entity_total_map
else:
for r, path_counts in per_entity_success_map[e1].items():
for path, p_c in path_counts.items():
if r in per_cluster_success_map[c_old] and path in per_cluster_success_map[c_old][r]:
per_cluster_success_map[c_old][r][path] -= p_c
per_cluster_total_map[c_old][r][path] -= per_entity_total_map[e1][r][path]
assert per_cluster_success_map[c_old][r][path] >= 0
assert per_cluster_total_map[c_old][r][path] >= 0
per_cluster_success_map_fallback[0][r][path] -= p_c
per_cluster_total_map_fallback[0][r][path] -= per_entity_total_map[e1][r][path]
assert per_cluster_success_map_fallback[0][r][path] >= 0
assert per_cluster_total_map_fallback[0][r][path] >= 0
train_map_list = [((e1, r), e2_list) for ((e1, r), e2_list) in self.train_map.items()]
per_entity_success_map.update(per_entity_success_map_updates)
per_entity_total_map.update(per_entity_total_map_updates)
exec_ctr_k, reuse_ctr_k = 0, 0
exec_ctr_f, reuse_ctr_f = 0, 0
for ((e1, r), e2_list) in tqdm(train_map_list):
if len(e2_list) == 0:
del self.train_map[(e1, r)]
continue
c_new = e2new_cluster.get(e1, -1)
if c_new == -1:
continue
# Add to new cluster
if c_new not in per_cluster_success_map:
per_cluster_success_map[c_new] = {}
if c_new not in per_cluster_total_map:
per_cluster_total_map[c_new] = {}
if r not in per_cluster_success_map[c_new]:
per_cluster_success_map[c_new][r] = {}
if r not in per_cluster_total_map[c_new]:
per_cluster_total_map[c_new][r] = {}
if r not in per_cluster_success_map_fallback[0]:
per_cluster_success_map_fallback[0][r] = {}
if r not in per_cluster_total_map_fallback[0]:
per_cluster_total_map_fallback[0][r] = {}
paths_for_this_relation = path_prior_map_per_relation[c_new][r]
for p_ctr, (path, _) in enumerate(paths_for_this_relation.items()):
if e1 in per_entity_success_map and r in per_entity_success_map[e1] and \
path in per_entity_success_map[e1][r]:
if path not in per_cluster_success_map[c_new][r]:
per_cluster_success_map[c_new][r][path] = 0
if path not in per_cluster_total_map[c_new][r]:
per_cluster_total_map[c_new][r][path] = 0
per_cluster_success_map[c_new][r][path] += per_entity_success_map[e1][r][path]
per_cluster_total_map[c_new][r][path] += per_entity_total_map[e1][r][path]
reuse_ctr_k += 1
else:
ans = self.execute_one_program(e1, path, depth=0, max_branch=100)
exec_ctr_k += 1
if len(ans) == 0:
continue
# execute the path get answer
if path not in per_cluster_success_map[c_new][r]:
per_cluster_success_map[c_new][r][path] = 0
if path not in per_cluster_total_map[c_new][r]:
per_cluster_total_map[c_new][r][path] = 0
if e1 not in per_entity_success_map:
per_entity_success_map[e1] = {}
if r not in per_entity_success_map[e1]:
per_entity_success_map[e1][r] = {}
if path not in per_entity_success_map[e1][r]:
per_entity_success_map[e1][r][path] = 0
if e1 not in per_entity_total_map:
per_entity_total_map[e1] = {}
if r not in per_entity_total_map[e1]:
per_entity_total_map[e1][r] = {}
if path not in per_entity_total_map[e1][r]:
per_entity_total_map[e1][r][path] = 0
for a in ans:
if a in e2_list:
per_entity_success_map[e1][r][path] += 1
per_cluster_success_map[c_new][r][path] += 1
per_entity_total_map[e1][r][path] += 1
per_cluster_total_map[c_new][r][path] += 1
paths_for_this_relation = path_prior_map_per_relation_fallback[0][r]
for p_ctr, (path, _) in enumerate(paths_for_this_relation.items()):
if e1 in per_entity_success_map and r in per_entity_success_map[e1] and \
path in per_entity_success_map[e1][r]:
if path not in per_cluster_success_map_fallback[0][r]:
per_cluster_success_map_fallback[0][r][path] = 0
if path not in per_cluster_total_map_fallback[0][r]:
per_cluster_total_map_fallback[0][r][path] = 0
per_cluster_success_map_fallback[0][r][path] += per_entity_success_map[e1][r][path]
per_cluster_total_map_fallback[0][r][path] += per_entity_total_map[e1][r][path]
reuse_ctr_f += 1
else:
ans = self.execute_one_program(e1, path, depth=0, max_branch=100)
exec_ctr_f += 1
if len(ans) == 0:
continue
# execute the path get answer
if path not in per_cluster_success_map_fallback[0][r]:
per_cluster_success_map_fallback[0][r][path] = 0
if path not in per_cluster_total_map_fallback[0][r]:
per_cluster_total_map_fallback[0][r][path] = 0
if e1 not in per_entity_success_map:
per_entity_success_map[e1] = {}
if r not in per_entity_success_map[e1]:
per_entity_success_map[e1][r] = {}
if path not in per_entity_success_map[e1][r]:
per_entity_success_map[e1][r][path] = 0
if e1 not in per_entity_total_map:
per_entity_total_map[e1] = {}
if r not in per_entity_total_map[e1]:
per_entity_total_map[e1][r] = {}
if path not in per_entity_total_map[e1][r]:
per_entity_total_map[e1][r][path] = 0
for a in ans:
if a in e2_list:
per_entity_success_map[e1][r][path] += 1
per_cluster_success_map_fallback[0][r][path] += 1
per_entity_total_map[e1][r][path] += 1
per_cluster_total_map_fallback[0][r][path] += 1
logging.info(
f"Update for cluster map required {exec_ctr_k} executions and {reuse_ctr_k} reuses of per_entity maps")
logging.info(
f"Update for fallback map required {exec_ctr_f} executions and {reuse_ctr_f} reuses of per_entity maps")
precision_map = {}
for c, _ in per_cluster_success_map.items():
for r, _ in per_cluster_success_map[c].items():
if c not in precision_map:
precision_map[c] = {}
if r not in precision_map[c]:
precision_map[c][r] = {}
for path, s_c in per_cluster_success_map[c][r].items():
if per_cluster_total_map[c][r][path] == 0:
precision_map[c][r][path] = 0
else:
precision_map[c][r][path] = s_c / per_cluster_total_map[c][r][path]
precision_map_fallback = {0: {}}
for r, _ in per_cluster_success_map_fallback[0].items():
if r not in precision_map_fallback[0]:
precision_map_fallback[0][r] = {}
for path, s_c in per_cluster_success_map_fallback[0][r].items():
if per_cluster_total_map_fallback[0][r][path] == 0:
precision_map_fallback[0][r][path] = 0
else:
precision_map_fallback[0][r][path] = s_c / per_cluster_total_map_fallback[0][r][path]
return precision_map, precision_map_fallback
def calc_prior_path_prob(self):
"""
Calculate how probable a path is given a query relation, i.e P(path|query rel)
For each entity in the graph, count paths that exists for each relation in the
random subgraph.
:return:
"""
logger.info("Calculating prior map")
programs_map = {}
programs_map_fallback = {0: {}}
unique_cluster_ids = set() # have to do this since the assigned cluster ids doesnt seems to be contiguous or start from 0 or end at K-1
for c in self.args.cluster_assignments:
unique_cluster_ids.add(c)
for c in unique_cluster_ids:
for _, ((e1, r), e2_list) in enumerate(tqdm((self.train_map.items()))):
if self.args.cluster_assignments[self.entity_vocab[e1]] != c:
# if this entity does not belong to this cluster, don't consider.
continue
if c not in programs_map:
programs_map[c] = {}
if r not in programs_map[c]:
programs_map[c][r] = {}
if r not in programs_map_fallback[0]:
programs_map_fallback[0][r] = {}
all_paths_around_e1 = self.all_paths[e1]
nn_answers = e2_list
for nn_ans in nn_answers:
programs = self.get_programs(e1, nn_ans, all_paths_around_e1)
for p in programs:
p = tuple(p)
if len(p) == 1:
if p[0] == r: # don't store query relation
continue
if p not in programs_map[c][r]:
programs_map[c][r][p] = 0
if p not in programs_map_fallback[0][r]:
programs_map_fallback[0][r][p] = 0
programs_map[c][r][p] += 1
programs_map_fallback[0][r][p] += 1
for c, _ in programs_map.items():
for r, path_counts in programs_map[c].items():
sum_path_counts = 0
for p, p_c in path_counts.items():
sum_path_counts += p_c
for p, p_c in path_counts.items():
programs_map[c][r][p] = p_c / sum_path_counts
for r, path_counts in programs_map_fallback[0].items():
sum_path_counts = 0
for p, p_c in path_counts.items():
sum_path_counts += p_c
for p, p_c in path_counts.items():
programs_map_fallback[0][r][p] = p_c / sum_path_counts
return programs_map, programs_map_fallback
def calc_per_entity_prior_path_count(self, entity_set=None):
"""
Calculate how probable a path is given a query relation, i.e P(path|query rel)
For each entity in the graph, count paths that exists for each relation in the
random subgraph.
:return:
"""
logger.info("Calculating prior map at entity level")
per_entity_prior_map = {}
if entity_set is None:
entity_set = set(self.entity_vocab.keys())
train_map_list = [((e1, r), e2_list) for ((e1, r), e2_list) in self.train_map.items()]
for _, ((e1, r), e2_list) in enumerate(tqdm(train_map_list)):
if e1 not in entity_set:
continue
if e1 not in per_entity_prior_map:
per_entity_prior_map[e1] = {}
if r not in per_entity_prior_map[e1]:
per_entity_prior_map[e1][r] = {}
all_paths_around_e1 = self.all_paths[e1]
nn_answers = e2_list
for nn_ans in nn_answers:
programs = self.get_programs(e1, nn_ans, all_paths_around_e1)
for p in programs:
p = tuple(p)
if len(p) == 1:
if p[0] == r: # don't store query relation
continue
if p not in per_entity_prior_map[e1][r]:
per_entity_prior_map[e1][r][p] = 0
per_entity_prior_map[e1][r][p] += 1
# Note the prior is un-normalized
return per_entity_prior_map
def get_prior_path_count_entity2cluster(self, per_entity_prior_map, cluster_assignments):
"""
Calculate how probable a path is given a query relation, i.e P(path|query rel)
For each entity in the graph, count paths that exists for each relation in the
random subgraph.
:return:
"""
logger.info("Calculating prior map for cluster from entity level map")
path_prior_map = {}
path_prior_map_fallback = {0: {}}
for e1, _ in per_entity_prior_map.items():
c = cluster_assignments[self.entity_vocab[e1]]
if c not in path_prior_map:
path_prior_map[c] = {}
for r, path_counts in per_entity_prior_map[e1].items():
if r not in path_prior_map[c]:
path_prior_map[c][r] = {}
if r not in path_prior_map_fallback[0]:
path_prior_map_fallback[0][r] = {}
for p, p_c in path_counts.items():
if p not in path_prior_map[c][r]:
path_prior_map[c][r][p] = 0
if p not in path_prior_map_fallback[0][r]:
path_prior_map_fallback[0][r][p] = 0
path_prior_map[c][r][p] += p_c
path_prior_map_fallback[0][r][p] += p_c
path_prior_map_normed = {}
for c, _ in path_prior_map.items():
for r, path_counts in path_prior_map[c].items():
sum_path_counts = 0
for p, p_c in path_counts.items():
sum_path_counts += p_c
if c not in path_prior_map_normed:
path_prior_map_normed[c] = {}
if r not in path_prior_map_normed[c]:
path_prior_map_normed[c][r] = {}
for p, p_c in path_counts.items():
path_prior_map_normed[c][r][p] = p_c / sum_path_counts
path_prior_map_normed_fallback = {0: {}}
for r, path_counts in path_prior_map_fallback[0].items():
if r not in path_prior_map_normed_fallback[0]:
path_prior_map_normed_fallback[0][r] = {}
sum_path_counts = 0
for p, p_c in path_counts.items():
sum_path_counts += p_c
for p, p_c in path_counts.items():
path_prior_map_fallback[0][r][p] = p_c / sum_path_counts
return path_prior_map_normed, path_prior_map_normed_fallback, path_prior_map, path_prior_map_fallback
def update_prior_path_count_entity2cluster(self, per_entity_prior_counts, per_entity_prior_path_count_updates,
path_prior_map, path_prior_map_fallback,
cluster_adds, cluster_dels):
logger.info("Updating prior map for cluster from entity level map")
# For points moving out of a cluster, delete their contribution to prior map of old cluster
skip_counter = 0
for c, e_changes in cluster_dels.items():
if c not in path_prior_map:
logger.debug(f"Unusual condition: Cluster {c} in cluster_dels but not in path_prior_map")
continue
for e1 in e_changes:
if e1 not in per_entity_prior_counts:
skip_counter += 1
continue
for r, path_counts in per_entity_prior_counts[e1].items():
for p, p_c in path_counts.items():
path_prior_map[c][r][p] -= p_c
assert path_prior_map[c][r][p] >= 0
assert path_prior_map_fallback[0][r][p] >= 0
if path_prior_map[c][r][p] == 0:
del path_prior_map[c][r][p]
if path_prior_map_fallback[0][r][p] == 0:
del path_prior_map_fallback[0][r][p]
logging.info(f"Skipped {skip_counter} deletes")
# For points moving into a cluster, add their contribution to prior map of new cluster
skip_counter = 0
for c, e_changes in cluster_adds.items():
if c not in path_prior_map:
path_prior_map[c] = {}
for e1 in e_changes:
if e1 in per_entity_prior_path_count_updates:
# Use new counts: Either bcoz new entity or neighborhood changed
e_count_map = per_entity_prior_path_count_updates[e1]
elif e1 not in per_entity_prior_counts:
skip_counter += 1
continue
else:
# Use old counts
e_count_map = per_entity_prior_counts[e1]
for r, path_counts in e_count_map.items():
if r not in path_prior_map[c]:
path_prior_map[c][r] = {}
if r not in path_prior_map_fallback[0]:
path_prior_map_fallback[0][r] = {}
for p, p_c in path_counts.items():
if p not in path_prior_map[c][r]:
path_prior_map[c][r][p] = 0
if p not in path_prior_map_fallback[0][r]:
path_prior_map_fallback[0][r][p] = 0
path_prior_map[c][r][p] += p_c
path_prior_map_fallback[0][r][p] += p_c
logging.info(f"Skipped {skip_counter} additions")
path_prior_map_normed = {}
for c, _ in path_prior_map.items():
for r, path_counts in path_prior_map[c].items():
sum_path_counts = 0
for p, p_c in path_counts.items():
sum_path_counts += p_c
if c not in path_prior_map_normed:
path_prior_map_normed[c] = {}
if r not in path_prior_map_normed[c]:
path_prior_map_normed[c][r] = {}
for p, p_c in path_counts.items():
path_prior_map_normed[c][r][p] = p_c / sum_path_counts
path_prior_map_normed_fallback = {0: {}}
for r, path_counts in path_prior_map_fallback[0].items():
if r not in path_prior_map_normed_fallback[0]:
path_prior_map_normed_fallback[0][r] = {}
sum_path_counts = 0
for p, p_c in path_counts.items():
sum_path_counts += p_c
for p, p_c in path_counts.items():
path_prior_map_fallback[0][r][p] = p_c / sum_path_counts
return path_prior_map_normed, path_prior_map_normed_fallback, path_prior_map, path_prior_map_fallback
def main_step(args, entity_vocab, rev_entity_vocab, rel_vocab, rev_rel_vocab, adj_mat, train_map, dev_map, dev_entities,
new_dev_map, new_dev_entities, test_map, test_entities, new_test_map, new_test_entities, all_paths,
rel_ent_map):
# Lets put adj_mat to GPU
adj_mat = torch.from_numpy(adj_mat)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info(f'Using device: {device.__str__()}')
adj_mat = adj_mat.to(device)
######################################
# Perform evaluation on full dev set #
######################################
if not args.only_test:
logger.info("Begin evaluation on full dev set ...")
eval_map = dev_map
# get the unique entities in eval set, so that we can calculate similarity in advance.
eval_entities = dev_entities
eval_vocab, eval_rev_vocab = {}, {}
query_ind = []
e_ctr = 0
for e in eval_entities:
try:
query_ind.append(entity_vocab[e])
except KeyError:
continue
eval_vocab[e] = e_ctr
eval_rev_vocab[e_ctr] = e
e_ctr += 1
prob_cbr_agent = ProbCBR(args, train_map, eval_map, entity_vocab, rev_entity_vocab,
rel_vocab, rev_rel_vocab, eval_vocab, eval_rev_vocab, all_paths,
rel_ent_map)
query_ind = torch.LongTensor(query_ind).to(device)
# Calculate similarity
sim = prob_cbr_agent.calc_sim(adj_mat,
query_ind) # n X N (n== size of dev_entities, N: size of all entities)
nearest_neighbor_1_hop = np.argsort(-sim.cpu(), axis=-1)
prob_cbr_agent.set_nearest_neighbor_1_hop(nearest_neighbor_1_hop)
prob_cbr_agent.do_symbolic_case_based_reasoning()
######################################
# Perform evaluation on new dev set #
######################################
if not args.only_test and new_dev_map is not None:
logger.info("Begin evaluation on new dev set ...")
eval_map = new_dev_map
# get the unique entities in eval set, so that we can calculate similarity in advance.
eval_entities = new_dev_entities
eval_vocab, eval_rev_vocab = {}, {}
query_ind = []
e_ctr = 0
for e in eval_entities:
try:
query_ind.append(entity_vocab[e])
except KeyError:
continue
eval_vocab[e] = e_ctr
eval_rev_vocab[e_ctr] = e
e_ctr += 1
prob_cbr_agent = ProbCBR(args, train_map, eval_map, entity_vocab, rev_entity_vocab,
rel_vocab, rev_rel_vocab, eval_vocab, eval_rev_vocab,
all_paths, rel_ent_map)
query_ind = torch.LongTensor(query_ind).to(device)
# Calculate similarity
sim = prob_cbr_agent.calc_sim(adj_mat,
query_ind) # n X N (n== size of dev_entities, N: size of all entities)
nearest_neighbor_1_hop = np.argsort(-sim.cpu(), axis=-1)
prob_cbr_agent.set_nearest_neighbor_1_hop(nearest_neighbor_1_hop)
prob_cbr_agent.do_symbolic_case_based_reasoning()
#######################################
# Perform evaluation on full test set #
#######################################
if args.test:
logger.info("Begin evaluation on full test set ...")
eval_map = test_map
# get the unique entities in eval set, so that we can calculate similarity in advance.
eval_entities = test_entities
eval_vocab, eval_rev_vocab = {}, {}
query_ind = []
e_ctr = 0
for e in eval_entities:
try:
query_ind.append(entity_vocab[e])
except KeyError:
continue
eval_vocab[e] = e_ctr
eval_rev_vocab[e_ctr] = e
e_ctr += 1
prob_cbr_agent = ProbCBR(args, train_map, eval_map, entity_vocab, rev_entity_vocab, rel_vocab,
rev_rel_vocab, eval_vocab, eval_rev_vocab, all_paths, rel_ent_map)
query_ind = torch.LongTensor(query_ind).to(device)
# Calculate similarity
sim = prob_cbr_agent.calc_sim(adj_mat,
query_ind) # n X N (n== size of dev_entities, N: size of all entities)
nearest_neighbor_1_hop = np.argsort(-sim.cpu(), axis=-1)
prob_cbr_agent.set_nearest_neighbor_1_hop(nearest_neighbor_1_hop)
prob_cbr_agent.do_symbolic_case_based_reasoning()
######################################
# Perform evaluation on new test set #
######################################
if args.test and new_test_map is not None:
logger.info("Begin evaluation on new test set ...")
eval_map = new_test_map
# get the unique entities in eval set, so that we can calculate similarity in advance.
eval_entities = new_test_entities
eval_vocab, eval_rev_vocab = {}, {}
query_ind = []
e_ctr = 0
for e in eval_entities:
try:
query_ind.append(entity_vocab[e])
except KeyError:
continue
eval_vocab[e] = e_ctr
eval_rev_vocab[e_ctr] = e
e_ctr += 1
prob_cbr_agent = ProbCBR(args, train_map, eval_map, entity_vocab, rev_entity_vocab,
rel_vocab, rev_rel_vocab, eval_vocab, eval_rev_vocab, all_paths,
rel_ent_map)
query_ind = torch.LongTensor(query_ind).to(device)
# Calculate similarity
sim = prob_cbr_agent.calc_sim(adj_mat,
query_ind) # n X N (n== size of dev_entities, N: size of all entities)
nearest_neighbor_1_hop = np.argsort(-sim.cpu(), axis=-1)
prob_cbr_agent.set_nearest_neighbor_1_hop(nearest_neighbor_1_hop)
prob_cbr_agent.do_symbolic_case_based_reasoning()
class CBRWrapper:
def __init__(self, args, total_n_entity, total_n_relation):
self.args = args
# Create GRINCH clustering object
self.clustering_model = GrinchWithDeletes(np.zeros((total_n_entity, total_n_relation)))
self.seen_entities = set()
self.entity_representation = np.zeros((total_n_entity, total_n_relation))
self.all_paths = {}
self.per_entity_prior_path_count = {}
self.per_cluster_path_prior_count, self.per_cluster_path_prior_count_fallback = {}, {}
self.per_entity_prec_success_counts, self.per_entity_prec_total_counts = {}, {}
self.per_entity_prec_success_counts_fallback, self.per_entity_prec_total_counts_fallback = {}, {}
self.per_cluster_prec_success_counts, self.per_cluster_prec_total_counts = {}, {}
self.per_cluster_prec_success_counts_fallback, self.per_cluster_prec_total_counts_fallback = {}, {}
self.per_cluster_precision_map, self.fallback_precision_map = {}, {}
self.cluster_assignments = np.zeros(total_n_entity)
def process_seed_kb(self, entity_vocab, rev_entity_vocab, rel_vocab, rev_rel_vocab,
known_true_triples, train_triples, valid_triples, test_triples):
if self.args.just_preprocess and not (self.args.process_num == -1 or self.args.process_num == 0):
# Important for later batches
self.seen_entities = set(entity_vocab.keys())
return
self.args.output_dir = os.path.join(self.args.expt_dir, "outputs", self.args.dataset_name,
self.args.name_of_run, 'stream_step_0')
if not os.path.exists(self.args.output_dir):
os.makedirs(self.args.output_dir)
logger.info(f"Output directory: {self.args.output_dir}")
# 1. Load all maps
logger.info("Loading train map")
train_map = load_data_from_triples(train_triples)
rel_ent_map = get_entities_group_by_relation_from_triples(train_triples)
logger.info("Loading dev map")
dev_map = load_data_from_triples(valid_triples)
dev_entities = get_unique_entities_from_triples(valid_triples)
logger.info("Loading test map")
test_map = load_data_from_triples(test_triples)
test_entities = get_unique_entities_from_triples(test_triples)
logger.info("Loading combined train/dev/test map for filtered eval")
all_kg_map = load_data_from_triples(known_true_triples)
self.args.all_kg_map = all_kg_map
logger.info("Dumping vocabs")
with open(os.path.join(self.args.output_dir, "entity_vocab.pkl"), "wb") as fout:
pickle.dump(entity_vocab, fout)
with open(os.path.join(self.args.output_dir, "rel_vocab.pkl"), "wb") as fout:
pickle.dump(rel_vocab, fout)
# 2. Sample subgraph around entities
logger.info("Load train adacency map")
train_adj_map = create_adj_list_from_triples(train_triples)
if self.args.warm_start:
logger.info("[WARM_START] Load paths around graph entities")
with open(os.path.join(self.args.output_dir, f'paths_{self.args.num_paths_to_collect}.pkl'), "rb") as fin:
self.all_paths = pickle.load(fin)
else:
logger.info("Sample paths around graph entities")
for ctr, e1 in enumerate(tqdm(entity_vocab.keys())):
self.all_paths[e1] = get_paths(self.args, train_adj_map, e1, max_len=3)
with open(os.path.join(self.args.output_dir, f'paths_{self.args.num_paths_to_collect}.pkl'), "wb") as fout:
pickle.dump(self.all_paths, fout)
self.args.all_paths = self.all_paths
# 3. Obtain entity cluster assignments
# Calculate adjacency matrix
logger.info("Calculate adjacency matrix")
adj_mat = read_graph_from_triples(train_triples, entity_vocab, rel_vocab)
adj_mat = np.sqrt(adj_mat)
l2norm = np.linalg.norm(adj_mat, axis=-1)
l2norm = np.clip(l2norm, np.finfo(np.float).eps, None)
adj_mat = adj_mat / l2norm.reshape(l2norm.shape[0], 1)
self.seen_entities.update(entity_vocab.keys())
self.entity_representation[:len(entity_vocab), :len(rel_vocab)] = adj_mat
if not self.args.just_preprocess:
logger.info("Cluster entities")
for i in trange(adj_mat.shape[0]):
# first arg is point id, second argument is the point vector.
# if you leave second argument blank, it will take vector from points
# passed in at constructor
self.clustering_model.insert(i=i, i_vec=self.entity_representation[i])
cluster_assignments = self.clustering_model.flat_clustering(threshold=self.args.cluster_threshold).astype(
int)
assert np.all(cluster_assignments[:len(entity_vocab)] != -1) and \
np.all(cluster_assignments[len(entity_vocab):] == -1)
self.args.cluster_assignments = cluster_assignments[:len(entity_vocab)]
self.cluster_assignments = cluster_assignments[:len(entity_vocab)].copy()
cluster_population = Counter(self.args.cluster_assignments)
logger.info(f"Found {len(cluster_population)} flat clusters")
logger.info(f"Cluster stats :: Most common: {cluster_population.most_common(5)}")
logger.info(f"Cluster stats :: Avg Size: {np.mean(list(cluster_population.values()))}")
logger.info(f"Cluster stats :: Min Size: {np.min(list(cluster_population.values()))}")
logger.info("Dumping cluster assignments")
with open(os.path.join(self.args.output_dir, "cluster_assignments.pkl"), "wb") as fout:
pickle.dump(self.cluster_assignments, fout)
# 4. Create solver
prob_cbr_agent = ProbCBR(args, train_map, {}, entity_vocab, rev_entity_vocab, rel_vocab,
rev_rel_vocab, {}, {}, self.args.all_paths, rel_ent_map)
# 5. Compute path prior map
if self.args.warm_start:
logger.info("[WARM_START] Load per entity prior map")
with open(os.path.join(self.args.output_dir, f'per_entity_prior_path_count.pkl'), "rb") as fin:
self.per_entity_prior_path_count = pickle.load(fin)
else:
self.per_entity_prior_path_count = prob_cbr_agent.calc_per_entity_prior_path_count()
with open(os.path.join(self.args.output_dir, 'per_entity_prior_path_count.pkl'), "wb") as fout:
pickle.dump(self.per_entity_prior_path_count, fout)
if not self.args.just_preprocess:
self.args.path_prior_map_per_relation, self.args.path_prior_map_per_relation_fallback, \
self.per_cluster_path_prior_count, self.per_cluster_path_prior_count_fallback = \
prob_cbr_agent.get_prior_path_count_entity2cluster(self.per_entity_prior_path_count,
self.args.cluster_assignments)
dir_name = os.path.join(self.args.output_dir, "t_{}".format(self.args.cluster_threshold))
if not os.path.exists(dir_name):
os.makedirs(dir_name)
logger.info("Dumping path prior map at {}".format(dir_name))
with open(os.path.join(dir_name, "path_prior_map.pkl"), "wb") as fout:
pickle.dump(self.args.path_prior_map_per_relation, fout)
dir_name = os.path.join(self.args.output_dir, "K_1")
if not os.path.exists(dir_name):
os.makedirs(dir_name)
logger.info("Dumping fallback path prior map at {}".format(dir_name))
with open(os.path.join(dir_name, "path_prior_map.pkl"), "wb") as fout:
pickle.dump(self.args.path_prior_map_per_relation_fallback, fout)
# 6. Compute path precision map
if self.args.warm_start:
logger.info("[WARM_START] Load per entity precision count maps")
with open(os.path.join(self.args.output_dir, f'per_entity_prec_success_counts.pkl'), "rb") as fin:
self.per_entity_prec_success_counts = pickle.load(fin)
with open(os.path.join(self.args.output_dir, f'per_entity_prec_total_counts.pkl'), "rb") as fin:
self.per_entity_prec_total_counts = pickle.load(fin)
else:
self.per_entity_prec_success_counts, self.per_entity_prec_total_counts = \
prob_cbr_agent.calc_per_entity_precision_components(self.per_entity_prior_path_count)
with open(os.path.join(self.args.output_dir, f'per_entity_prec_success_counts.pkl'), "wb") as fout:
pickle.dump(self.per_entity_prec_success_counts, fout)
with open(os.path.join(self.args.output_dir, f'per_entity_prec_total_counts.pkl'), "wb") as fout:
pickle.dump(self.per_entity_prec_total_counts, fout)
if not self.args.just_preprocess:
self.args.precision_map, self.per_cluster_prec_success_counts, self.per_cluster_prec_total_counts = \
prob_cbr_agent.get_precision_map_entity2cluster(self.per_entity_prec_success_counts,
self.per_entity_prec_total_counts,
self.args.cluster_assignments,
self.args.path_prior_map_per_relation)
self.per_cluster_precision_map = self.args.precision_map
self.per_entity_prec_success_counts_fallback, self.per_entity_prec_total_counts_fallback = \
copy.deepcopy(self.per_entity_prec_success_counts), copy.deepcopy(self.per_entity_prec_total_counts)
self.args.precision_map_fallback, self.per_cluster_prec_success_counts_fallback, \
self.per_cluster_prec_total_counts_fallback = \
prob_cbr_agent.get_precision_map_entity2cluster(self.per_entity_prec_success_counts_fallback,
self.per_entity_prec_total_counts_fallback,
|
np.zeros_like(self.args.cluster_assignments)
|
numpy.zeros_like
|
# -*- coding: utf-8 -*-
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of EntropyBottleneck class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import test
import tensorflow_compression as tfc
class EntropyBottleneckTest(test.TestCase):
def test_noise(self):
# Tests that the noise added is uniform noise between -0.5 and 0.5.
inputs = tf.placeholder(tf.float32, (None, 1))
layer = tfc.EntropyBottleneck()
noisy, _ = layer(inputs, training=True)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
values = np.linspace(-50, 50, 100)[:, None]
noisy, = sess.run([noisy], {inputs: values})
self.assertFalse(np.allclose(values, noisy, rtol=0, atol=.49))
self.assertAllClose(values, noisy, rtol=0, atol=.5)
def test_quantization(self):
# Tests that inputs are quantized to full integer values, even after
# quantiles have been updated.
inputs = tf.placeholder(tf.float32, (None, 1))
layer = tfc.EntropyBottleneck(optimize_integer_offset=False)
quantized, _ = layer(inputs, training=False)
opt = tf.train.GradientDescentOptimizer(learning_rate=1)
self.assertTrue(len(layer.losses) == 1)
step = opt.minimize(layer.losses[0])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(step)
values = np.linspace(-50, 50, 100)[:, None]
quantized, = sess.run([quantized], {inputs: values})
self.assertAllClose(np.around(values), quantized, rtol=0, atol=1e-6)
def test_quantization_optimized_offset(self):
# Tests that inputs are not quantized to full integer values after quantiles
# have been updated. However, the difference between input and output should
# be between -0.5 and 0.5, and the offset must be consistent.
inputs = tf.placeholder(tf.float32, (None, 1))
layer = tfc.EntropyBottleneck(optimize_integer_offset=True)
quantized, _ = layer(inputs, training=False)
opt = tf.train.GradientDescentOptimizer(learning_rate=1)
self.assertTrue(len(layer.losses) == 1)
step = opt.minimize(layer.losses[0])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(step)
values = np.linspace(-50, 50, 100)[:, None]
quantized, = sess.run([quantized], {inputs: values})
self.assertAllClose(values, quantized, rtol=0, atol=.5)
diff = np.ravel(np.around(values) - quantized) % 1
self.assertAllClose(diff, np.full_like(diff, diff[0]), rtol=0, atol=5e-6)
self.assertNotEqual(diff[0], 0)
def test_codec(self):
# Tests that inputs are compressed and decompressed correctly, and quantized
# to full integer values, even after quantiles have been updated.
inputs = tf.placeholder(tf.float32, (1, None, 1))
layer = tfc.EntropyBottleneck(
data_format="channels_last", init_scale=60,
optimize_integer_offset=False)
bitstrings = layer.compress(inputs)
decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
opt = tf.train.GradientDescentOptimizer(learning_rate=1)
self.assertTrue(len(layer.losses) == 1)
step = opt.minimize(layer.losses[0])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(step)
self.assertTrue(len(layer.updates) == 1)
sess.run(layer.updates[0])
values = np.linspace(-50, 50, 100)[None, :, None]
decoded, = sess.run([decoded], {inputs: values})
self.assertAllClose(np.around(values), decoded, rtol=0, atol=1e-6)
def test_codec_optimized_offset(self):
# Tests that inputs are compressed and decompressed correctly, and not
# quantized to full integer values after quantiles have been updated.
# However, the difference between input and output should be between -0.5
# and 0.5, and the offset must be consistent.
inputs = tf.placeholder(tf.float32, (1, None, 1))
layer = tfc.EntropyBottleneck(
data_format="channels_last", init_scale=60,
optimize_integer_offset=True)
bitstrings = layer.compress(inputs)
decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
opt = tf.train.GradientDescentOptimizer(learning_rate=1)
self.assertTrue(len(layer.losses) == 1)
step = opt.minimize(layer.losses[0])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(step)
self.assertTrue(len(layer.updates) == 1)
sess.run(layer.updates[0])
values = np.linspace(-50, 50, 100)[None, :, None]
decoded, = sess.run([decoded], {inputs: values})
self.assertAllClose(values, decoded, rtol=0, atol=.5)
diff = np.ravel(np.around(values) - decoded) % 1
self.assertAllClose(diff, np.full_like(diff, diff[0]), rtol=0, atol=5e-6)
self.assertNotEqual(diff[0], 0)
def test_codec_clipping(self):
# Tests that inputs are compressed and decompressed correctly, and clipped
# to the expected range.
inputs = tf.placeholder(tf.float32, (1, None, 1))
layer = tfc.EntropyBottleneck(
data_format="channels_last", init_scale=40)
bitstrings = layer.compress(inputs)
decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertTrue(len(layer.updates) == 1)
sess.run(layer.updates[0])
values = np.linspace(-50, 50, 100)[None, :, None]
decoded, = sess.run([decoded], {inputs: values})
expected = np.clip(np.around(values), -40, 40)
self.assertAllClose(expected, decoded, rtol=0, atol=1e-6)
def test_channels_last(self):
# Test the layer with more than one channel and multiple input dimensions,
# with the channels in the last dimension.
inputs = tf.placeholder(tf.float32, (None, None, None, 2))
layer = tfc.EntropyBottleneck(
data_format="channels_last", init_scale=50)
noisy, _ = layer(inputs, training=True)
quantized, _ = layer(inputs, training=False)
bitstrings = layer.compress(inputs)
decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertTrue(len(layer.updates) == 1)
sess.run(layer.updates[0])
values = 5 * np.random.normal(size=(7, 5, 3, 2))
noisy, quantized, decoded = sess.run(
[noisy, quantized, decoded], {inputs: values})
self.assertAllClose(values, noisy, rtol=0, atol=.5)
self.assertAllClose(values, quantized, rtol=0, atol=.5)
self.assertAllClose(values, decoded, rtol=0, atol=.5)
def test_channels_first(self):
# Test the layer with more than one channel and multiple input dimensions,
# with the channel dimension right after the batch dimension.
inputs = tf.placeholder(tf.float32, (None, 3, None, None))
layer = tfc.EntropyBottleneck(
data_format="channels_first", init_scale=50)
noisy, _ = layer(inputs, training=True)
quantized, _ = layer(inputs, training=False)
bitstrings = layer.compress(inputs)
decoded = layer.decompress(bitstrings, tf.shape(inputs)[1:])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertTrue(len(layer.updates) == 1)
sess.run(layer.updates[0])
values = 5 *
|
np.random.normal(size=(2, 3, 5, 7))
|
numpy.random.normal
|
import gc
from logging import warning
from time import sleep, perf_counter
from typing import Optional, Union, Dict, List, Tuple, Callable
import numpy as np
import pandas as pd
from numpy import ndarray
from rdkit.Chem import AddHs, CanonSmiles, MolToSmiles, MolFromSmiles, MolFromInchi, Kekulize, SanitizeMol
from rdkit.Chem.rdchem import Mol, RWMol
from sklearn.base import BaseEstimator
from .AbstractModel import AbstractModel
from .BaseCreator import getRadicalsByBondIdx, getBondIndex, getBondIndexByAtom
from .Creator import DataGenerator
from .Dataset import FeatureData, allocateFeatureLocation
from .DatasetAPI import DatasetTransmitterWrapper
from .Preprocessing import inputFullCheck, inputCheckRange, inputCheckIterableInRange, MeasureExecutionTime, FixPath, \
ReadFile, ExportFile, SortWithIdx, ArraySorting, EvaluateInputPosition, GetIndexOnArrangedData, ArrayEqual
from .coreConfig import getPrebuiltInfoLabels
BASE_TEMPLATE = Optional[ndarray]
INPUT_FOR_DATABASE = Union[BASE_TEMPLATE, pd.DataFrame]
ITERABLE_TEMPLATE = Union[INPUT_FOR_DATABASE, List, Tuple]
SINGLE_INPUT_TEMPLATE = Union[BASE_TEMPLATE, str]
MULTI_INPUT_TEMPLATE = Union[BASE_TEMPLATE, List[str]]
MULTI_COLUMNS = Union[List, Tuple]
def _FixData_(database: INPUT_FOR_DATABASE) -> ndarray:
if not isinstance(database, ndarray):
database: ndarray = np.asarray(database)
if database.ndim != 2:
database: ndarray = np.atleast_2d(database)
return database
def _tuneFinalDataset_(InfoData: ndarray, FalseLine: List[int], TrueReference: Optional[ndarray] = None) \
-> Tuple[ndarray, Optional[ndarray]]:
inputFullCheck(value=FalseLine, name='FalseLine', dtype='List')
if len(FalseLine) != 0:
FalseLine.sort()
warning(f" These are the error lines needed to be removed: {FalseLine}")
InfoData: ndarray = np.delete(InfoData, obj=FalseLine, axis=0)
if TrueReference is not None:
TrueReference: ndarray = np.delete(TrueReference, obj=FalseLine, axis=0)
return InfoData, TrueReference
class PredictModel(DatasetTransmitterWrapper):
"""
A Python Implementation of AIP-BDET model (Bit2Edge framework): This class used to make prediction as
well as some modifier for data visualization
- AIP-BDET is a low-cost and multi-purpose deep-learning tool that can predict Bond Dissociation Energy with
superior accuracy with powerful classification strength & interpretability on ordinary atoms (C, H, O, N).
- AIP-BDET is constructed based on the inspiration of feedforward network architecture and graph neural network
in chemistry using hashed bit-type molecular fingerprints
"""
_SavedPredLabels: List[str] = ["Reference", "AIP-BDET: Prediction", "AIP-BDET: Error"]
@MeasureExecutionTime
def __init__(self, DatasetObject: FeatureData, GeneratorObject: DataGenerator, TrainedModelMode: Optional[int] = 1,
InputKey: Optional[int] = None, dataType: np.dtype=np.uint8, TrainedModelInputFileName: str = None,
CFile: str = None, SFile: str = None, dataConfiguration: str = None, gpu_memory: bool = False):
print("-" * 33, "INITIALIZATION", "-" * 33)
from .config import MODEL_INIT, updateDataConfig
# [1.0]: Main Attribute for Data
if not isinstance(DatasetObject, FeatureData) or DatasetObject is None:
InputKey: int = MODEL_INIT[TrainedModelMode][0] if InputKey is None else InputKey
self._dataset: FeatureData = FeatureData(InputKey=InputKey, trainable=False, retraining=False,
dataType=dataType)
else:
if DatasetObject.isTrainable():
raise TypeError("This dataset is invalid")
self._dataset: FeatureData = DatasetObject
super(PredictModel, self).__init__(dataset=DatasetObject, priority_key="Test")
if not isinstance(GeneratorObject, DataGenerator) or GeneratorObject is None:
self._generator: DataGenerator = \
DataGenerator(DatasetObject=self.getDataset(), priorityKey=self.getKey(), boostMode=True,
simplifySmilesEnvironment=True, showInvalidStereochemistry=False,
environmentCatching=False)
else:
if GeneratorObject.getDataset() is not self._dataset:
raise TypeError("This generator is invalid is invalid")
self._generator: DataGenerator = GeneratorObject
self._PrebuiltInfoLabels: List[str] = getPrebuiltInfoLabels()
self._dataset.setRequestLabels(value=self._PrebuiltInfoLabels, request="Info")
# [1.1]: Cache Attribute
self._mol, self._radical, self._bondIndex, self._bondType = self._dataset.getPropertiesRespectToColumn()
self.dataType: np.dtype = self._dataset.dataType
# [1.2]: Main Attribute for Model
if True: # Re-initialize Hyper-parameter
TrainedModel: str = MODEL_INIT[TrainedModelMode][1] if TrainedModelInputFileName is None else TrainedModelInputFileName
CColPath: str = MODEL_INIT[TrainedModelMode][2] if CFile is None else CFile
SColPath: str = MODEL_INIT[TrainedModelMode][3] if SFile is None else SFile
if TrainedModel is not None and isinstance(TrainedModel, str):
TrainedModel: str = FixPath(FileName=TrainedModel, extension=".h5")
dataConfiguration: str = MODEL_INIT[TrainedModelMode][4] if dataConfiguration is None else dataConfiguration
if dataConfiguration is not None:
updateDataConfig(dataConfiguration)
if gpu_memory:
from tensorflow import config
dev = config.list_physical_devices('GPU')
config.experimental.set_memory_growth(dev[0], True)
pass
self._dataset.setTrainedSavedLabels(CTrainedLabelsDirectory=CColPath, STrainedLabelsDirectory=SColPath)
self._generator.setBondTypeForInferenceOrFineTuning(bondTypeSaved=self._getBondTypeSaved_())
# print(TrainedModel)
self.TF_model: AbstractModel = AbstractModel(DatasetObject=self._dataset, model=TrainedModel, sparseMode=False)
# [2]: Model Calculation
self._MolArray: List[str] = []
self._RecordedMolArray: Optional[ndarray] = None
self.y_pred: BASE_TEMPLATE = None
self.InterOutput: BASE_TEMPLATE = None
self.BuiltDataFrame: Optional[pd.DataFrame] = None
self.DensityDataFrame: Optional[pd.DataFrame] = None
# [3]: Status Attribute
self._inplace: bool = True # In-place Standardization
self._isStandardized: bool = False # Prevent Multiple Standardization
self._timer: Dict[str, Union[int, float]] = {'add': 0, 'create': 0, 'process': 0, 'predictMethod': 0,
'predictFunction': 0, 'visualize': 0}
# [0]: Indirect/Hidden Method: -----------------------------------------------------------------------------------
# [0.1]: Preprocessing - Timer: ----------------------------------------------------------------------------------
def _resetTime_(self) -> None:
self._timer: Dict[str, Union[int, float]] = {'add': 0, 'create': 0, 'process': 0, 'predictMethod': 0,
'predictFunction': 0, 'visualize': 0}
def _getFullTime_(self) -> float:
return self._timer['add'] + self._getProcessTime_() + self._timer['visualize']
def _getProcessTime_(self) -> float:
return self._timer['create'] + self._timer['process'] + self._timer['predictMethod']
def exportProcessTime(self) -> pd.DataFrame:
""" Return a DataFrame that recorded execution time """
index = ["#1: Adding Data", "#2: Create Data", "#3: Process Data", "#4: Prediction Method",
"#5: Prediction Speed", "#6: Visualization Speed", "#7: Total Time"]
column = ["Full Timing (secs)", "Timing per Unit (ms/bond)"]
numsSamples: int = self._dataset.getRequestData(requests=("Train", "Info")).shape[0]
value = [[self._timer['add'], 1e3 * self._timer['add'] / numsSamples],
[self._timer["create"], 1e3 * self._timer["create"] / numsSamples],
[self._timer["process"], 1e3 * self._timer["process"] / numsSamples],
[self._timer["predictMethod"], 1e3 * self._timer["predictMethod"] / numsSamples],
[self._timer["predictFunction"], 1e3 * self._timer["predictFunction"] / numsSamples],
[self._timer["visualize"], 1e3 * self._timer["visualize"] / numsSamples],
[self._getFullTime_(), 1e3 * self._getFullTime_() / numsSamples]]
x = pd.DataFrame(data=value, index=index, columns=column, dtype=np.float32)
x.index.name = "Method"
print(x)
return x
# [1]: Rarely-Called Method: -------------------------------------------------------------------------------------
def summary(self) -> None:
self.TF_model.summary()
def getFlops(self, batch_size: Optional[int] = None, specific: bool = False) -> int:
return self.TF_model.getFlops(batch_size=batch_size, specific=specific)
@MeasureExecutionTime
def getFingerprintData(self, CFileName: str = None, SFileName: str = None) -> None:
"""
Implementation of retrieving the fingerprint database
:param CFileName: Directory of fingerprint from the C-Model
:type CFileName: str
:param SFileName: Directory of fingerprint in the S-Model
:type SFileName: str
:return: None
"""
if CFileName is None and SFileName is None:
warning("CFileName / CFileName must not be None (NoneType Object)")
if CFileName is not None:
inputFullCheck(value=CFileName, name="CFileName", dtype='str')
data, labels = self.getDataLabels(request="CData")
if data is not None:
ExportFile(DataFrame=pd.DataFrame(data=data, columns=labels, index=None, dtype=self.dataType),
FilePath=CFileName)
else:
warning("No data can be found")
if SFileName is not None:
if self._dataset.getIsSharedInput():
warning("Your CModel's Input = SModel's Input. Thus, we don't export your file here")
return None
inputFullCheck(value=SFileName, name="SFileName", dtype='str')
data, labels = self.getDataLabels(request="SData")
if data is not None:
ExportFile(DataFrame=pd.DataFrame(data=data, columns=labels, index=None, dtype=self.dataType),
FilePath=SFileName)
else:
warning("No data can be found")
return None
@MeasureExecutionTime
def searchEnvironment(self, OutputFileName: str, MoleculeSearch: bool = False) -> pd.DataFrame:
"""
Implementation whether that searching environments were in the training set with 3 inputs. However, this
function don't allow to see whether reaction is trained as it will cost O(3 * N * K) time-complexity where as k
is the number of row in the new-input, N is the number of reaction learned in the training set.
:param OutputFileName: The directory of the file
:type OutputFileName: str
:param MoleculeSearch: Whether the molecule is in the consideration
:type MoleculeSearch: bool
:return: pd.DataFrame
"""
# Hyper-parameter Verification
if True:
if not self.isDataAvailable(request="Env"):
raise ValueError("There are no data available to check the function")
inputFullCheck(value=OutputFileName, name='OutputFileName', dtype='str')
inputFullCheck(value=MoleculeSearch, name='MoleculeSearch', dtype='bool')
from .config import TRAINED_PATH
from typing import Set
print("-" * 80)
print("The object is now searching whether those environments were in the training set with 3 inputs")
# [1]: Initialize Data
numsInput: int = self._dataset.getNumsInput()
notations: Tuple[str, ...] = self._dataset.getFingerprintNotation()
useCols: List[int] = [len(self._PrebuiltInfoLabels) + i for i in range(numsInput)]
SearchEnvLabels: List[str] = []
for idx, notation in enumerate(notations):
SearchEnvLabels.append(f"{notation}: Environment")
SearchEnvLabels.append(f"{notation}: Available")
if MoleculeSearch:
SearchEnvLabels.append(self._PrebuiltInfoLabels[0])
SearchEnvLabels.append(f"Mol: Available")
useCols.append(self._mol)
TrainedData: ndarray = ReadFile(FilePath=TRAINED_PATH, header=0, get_values=True, usecols=useCols,
get_columns=False)
useCols: List[int] = [i for i in range(len(SearchEnvLabels) // 2)]
EnvData: ndarray = self.getData(request="Env")
SearchResult: ndarray = np.zeros(shape=(EnvData.shape[0], len(SearchEnvLabels)), dtype=np.object_)
SearchResult[:, [2 * i for i in range(numsInput)]] = EnvData[:, :numsInput]
if MoleculeSearch:
SearchResult[:, 2 * useCols[-1]] = self.getData(request="Info")[:, self._mol]
# [2]: Evaluate Environment Array
for col in range(0, SearchResult.shape[1] // 2):
UniqueTrainedMatrix: Set = set(np.unique(TrainedData[:, col], axis=None).tolist())
CurrentEnvData: List[str] = SearchResult[:, 2 * col].tolist()
SearchResult[:, 2 * col + 1] = [bool(CurrentEnvData[row] in UniqueTrainedMatrix)
for row in range(0, SearchResult.shape[0])]
DataFrame = pd.DataFrame(data=SearchResult, columns=SearchEnvLabels, index=None)
ExportFile(DataFrame=DataFrame, FilePath=OutputFileName)
return DataFrame
# [2]: Informational Method: -------------------------------------------------------------------------------------
def BuildFullInfoData(self, InfoData: bool = True, EnvironmentData: bool = True,
TargetData: bool = True) -> pd.DataFrame:
return self._dataset.buildInformationDataFrame(request=self.getKey(), InfoData=InfoData,
EnvironmentData=EnvironmentData, TargetData=TargetData)
def ExportInfoDataToCsv(self, Storage: str, FileName: str, InfoData: bool = True,
EnvironmentData: bool = True, TargetData: bool = True) -> None:
if Storage != '':
Storage = FixPath(FileName=Storage, extension='/')
ExportFile(DataFrame=self.BuildFullInfoData(InfoData=InfoData, EnvironmentData=EnvironmentData,
TargetData=TargetData),
FilePath=FixPath(FileName=f"{Storage}{FileName}", extension=".csv"))
return None
# [2]: Adding Data: ----------------------------------------------------------------------------------------------
# [2.0]: Adding Data: --------------------------------------------------------------------------------------------
def _refreshAttribute_(self):
self._dataset.cleanAttribute()
self._dataset.setRequestLabels(value=self._PrebuiltInfoLabels, request="Info")
self._generator.refreshAttribute()
self.ConvertedSmiles: ndarray = np.array([[None, None]], dtype=np.object_)
self.y_pred, self.InterOutput, self.BuiltDataFrame, self.DensityDataFrame = None, None, None, None
self._isStandardized: bool = False # Prevent Multiple Standardization
self._resetTime_()
gc.collect()
def _startNewProcess_(self) -> float:
self._refreshAttribute_()
return perf_counter()
def _readCsvFile_(self, path: str, Molecule: int = 0, Radicals: Optional[Union[List[int], Tuple[int, ...]]] = None,
BondIndex: Optional[int] = None, BondType: Optional[int] = None, Target: Optional[int] = None,
sorting: bool = False, ascending: bool = True) -> ndarray:
inputFullCheck(value=sorting, name='sorting', dtype='bool')
inputFullCheck(value=ascending, name='ascending', dtype='List-bool-Tuple', delimiter='-')
useCols: List[int] = allocateFeatureLocation(MoleculeCol=Molecule, RadicalCols=Radicals, BondIdxCol=BondIndex,
BondTypeCol=BondType, TargetCol=Target)
if sorting:
DataFrame: pd.DataFrame = ReadFile(FilePath=path, header=0, usecols=useCols, get_values=False,
get_columns=False)
if BondIndex is not None:
modified_ascending: List[bool] = [ascending, ascending] if isinstance(ascending, bool) else ascending
DataFrame.sort_values(by=[DataFrame.columns[self._mol], DataFrame.columns[self._bondIndex]],
ascending=modified_ascending, inplace=True)
else:
DataFrame.sort_values(by=DataFrame.columns[self._mol], ascending=ascending, inplace=True)
return DataFrame.values
return ReadFile(FilePath=path, header=0, usecols=useCols, get_values=True, get_columns=False)
def _displayAfterReadCsv_(self) -> None:
gc.collect()
print(f'Adding Time: {self._timer["add"]:.6f}s')
return None
# [3.1]: Single-Adding Method: ---------------------------------------------------------------------------------
def addMolArray(self, MolArray: Union[List[str], Tuple[str, ...]], mode: str = "SMILES", **kwargs: bool) -> None:
"""
Implementation of Reading Molecule by either Smiles or InChi Key
:param MolArray: Array of Molecules by String
:type MolArray: List[str] or Tuple[str, ...]
:param mode: either to be InChi or Smiles
:type mode: List[str] or Tuple[str, ...]
:return: None
"""
if True:
request: Tuple[str, ...] = ("SMILES", "InChi")
if mode not in request:
raise TypeError("The mode is in-valid, which should be either 'SMILES' or 'InChi'.")
request_index: int = request.index(mode)
inputFullCheck(value=MolArray, name="Array of Molecules", dtype='List-Tuple', delimiter='-')
if 'canonicalize' not in kwargs:
kwargs['canonicalize']: bool = False
if 'useChiral' not in kwargs:
kwargs['useChiral']: bool = False
if request_index == 0 and kwargs['canonicalize']:
warning(' Your input SMILES will all be re-canonicalize to avoid shifting of bond index')
def SmilesToMol(smiles: str, chiral: bool = kwargs['useChiral']):
return str(CanonSmiles(smiles, useChiral=chiral))
def InchiToMol(inchi: str):
return MolToSmiles(MolFromInchi(inchi))
_FunctionWrapper_: Callable = SmilesToMol if request_index == 0 else InchiToMol
pass
# [1]: Initialization
print("-", self.addMolArray, "-" * 30)
max_size: int = len(MolArray)
self._RecordedMolArray = np.zeros(shape=(len(MolArray), 2), dtype=np.object_)
FalseLine: Dict[str, List[Union[int, str]]] = {'Index': [], 'Mol': []}
# [2]: Testing Smiles
timer: float = self._startNewProcess_()
for line in range(max_size):
try:
self._RecordedMolArray[line, :] = [MolArray[line], _FunctionWrapper_(MolArray[line])]
except (ValueError, TypeError):
FalseLine['Index'].append(line)
FalseLine['Mol'].append(MolArray[line])
warning(f" Input #{line} - Molecule: {MolArray[line]} is invalid")
if FalseLine['Index']:
print(f"Totally, these {mode} will be deleted: {FalseLine['Mol']}")
self._RecordedMolArray = np.delete(self._RecordedMolArray, obj=FalseLine['Index'], axis=0)
self._MolArray = self._RecordedMolArray[:, 1].tolist() if request_index == 1 or kwargs['canonicalize'] \
else self._RecordedMolArray[:, 0].tolist()
self._timer['add'] = perf_counter() - timer
def addMol(self, mol: str, mode: str = "SMILES", **kwargs: bool) -> None:
if mode not in ["SMILES", "InChi"]:
raise TypeError("The mode is in-valid, which should be either 'SMILES' or 'InChi'.")
inputFullCheck(value=mol, name="Molecule", dtype='str')
return self.addMolArray(MolArray=[mol], mode=mode, **kwargs)
def addMolFile(self, FilePath: str, mode: str = "SMILES", MoleculeCol: int = 0, sorting: bool = False,
ascending: bool = True, **kwargs: bool) -> None:
if mode not in ["SMILES", "InChi"]:
raise TypeError("The mode is in-valid, which should be either 'SMILES' or 'InChi'.")
database: ndarray = self._readCsvFile_(path=FilePath, Molecule=MoleculeCol, sorting=sorting,
ascending=ascending)
self.addMolArray(MolArray=database.tolist(), mode=mode, **kwargs)
return self._displayAfterReadCsv_()
def createInformation(self, verbose: bool = False, useAtoms: Optional[Union[List[str], str]] = None,
useBonds: Optional[Union[List[str], str]] = None, useDoubleTriple: bool = False) -> None:
"""
Implementation of converting SMILES into pre-defined data
:param verbose: Tracking progress
:type verbose: bool
:param useAtoms: Specify atom in the considered domain
:type useAtoms: str or List[str] or Tuple[str]
:param useBonds: Specify bond in the considered domain
:type useBonds: str or List[str] or Tuple[str]
:param useDoubleTriple: Whether to use double bond and triple bond into consideration. However, since trained
model has been trained only by non-ring single bond, setting to True is dangerous
(default as False).
:type useDoubleTriple: bool
:return: None
"""
# Hyper-parameter Verification
if True:
if len(self._MolArray) == 0:
warning(" NotImplementedError: This function cannot be executed")
return None
if self.isDataAvailable(request="Info"):
warning(" NotImplementedError: InfoData has been initialized.")
return None
if useAtoms is not None:
if isinstance(useAtoms, str):
useAtoms = [useAtoms]
elif isinstance(useAtoms, (List, Tuple)):
useAtoms = list(sorted(set(useAtoms)))
else:
raise TypeError("useAtoms must be either string, or List, or Tuple, or None")
for idx, value in enumerate(useAtoms):
if not isinstance(value, str):
raise TypeError(f"useAtoms[{idx}] ({value}) must be a string")
if not value.isalpha():
raise TypeError(f"useAtoms[{idx}] ({value}) contains character only")
useAtoms[idx] = "".join(value.split()) # Remove all space
if useBonds is not None:
if isinstance(useBonds, str):
useBonds = [useBonds]
elif isinstance(useBonds, (List, Tuple)):
useBonds = sorted(list(set(useBonds)))
else:
raise TypeError("useBonds must be either string, or List, or Tuple, or None")
false_bond: List[str] = []
new_bonds: List[str] = []
for idx, value in enumerate(useBonds):
if not isinstance(value, str):
raise TypeError(f"useBonds[{idx}] ({value}) must be a string")
for character in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]:
if value.find(character) != -1:
raise TypeError(f"useBonds[{idx}] ({value}) contains character only")
useBonds[idx] = "".join(value.split()) # Remove all space
useBonds[idx] = value.replace("-", "-") # Converting all False Bond
useBonds[idx] = value.replace("#", "-") # Converting all False Bond
connection = value.find('-')
r_connection = value.rfind('-')
if connection == -1 or connection != r_connection:
warning(f"useBonds[{idx}] ({value}) is in-valid")
false_bond.append(value)
continue
new_bonds.append(f'{value[(connection + 1):]}-{value[0:connection]}')
useBonds = sorted(list(set(useBonds + new_bonds)))
if false_bond:
print("False Bond has been added:", false_bond)
from .Preprocessing import BinarySearch
for value in false_bond:
del useBonds[BinarySearch(array_1d=useBonds, value=value, getIndex=True)]
inputFullCheck(value=verbose, name="verbose", dtype='bool')
inputFullCheck(value=useDoubleTriple, name="useDoubleTriple", dtype='bool')
# [1]: Initialization
print("-" * 30, self.createInformation, "-" * 30)
start: float = perf_counter()
MolArrayWithHs: List[Mol] = [None] * len(self._MolArray)
BondCountWithHs: List[int] = [0] * len(self._MolArray)
for idx, smiles in enumerate(self._MolArray):
MolArrayWithHs[idx] = AddHs(MolFromSmiles(smiles))
BondCountWithHs[idx] = MolArrayWithHs[idx].GetNumBonds()
initTime: float = perf_counter() - start
InfoData: ndarray = np.zeros(shape=(sum(BondCountWithHs), len(self._PrebuiltInfoLabels)), dtype=np.object_)
FalseLine: List[int] = []
currentLine: int = 0
cleaningRequestLoop: int = 1000 # For every 100 bonds pass, garbage will be activated
# [2]: Acquire true information
for idx, mol in enumerate(MolArrayWithHs):
if currentLine % cleaningRequestLoop == 0:
gc.collect()
if verbose:
print('Molecule:',self._MolArray[idx])
Kekulize(mol, clearAromaticFlags=True)
for bond in mol.GetBonds():
# [2.1]: Bond Validation
if bond.IsInRing() or (not useDoubleTriple and str(bond.GetBondType()) != 'SINGLE'):
FalseLine.append(currentLine)
currentLine += 1
continue
# [2.2]: Separate Bond & Re-Validation
TempMol = RWMol(mol)
BeginAtom, EndAtom = bond.GetBeginAtom(), bond.GetEndAtom()
atomType: List[str] = list(sorted([BeginAtom.GetSymbol(), EndAtom.GetSymbol()]))
bondType = f'{atomType[0]}-{atomType[1]}'
if useAtoms is not None:
if atomType[0] not in useAtoms and atomType[1] not in useAtoms:
FalseLine.append(currentLine)
currentLine += 1
continue
if useBonds is not None:
if bondType not in useBonds:
FalseLine.append(currentLine)
currentLine += 1
continue
TempMol.RemoveBond(BeginAtom.GetIdx(), EndAtom.GetIdx())
TempMol.GetAtomWithIdx(BeginAtom.GetIdx()).SetNoImplicit(True)
TempMol.GetAtomWithIdx(EndAtom.GetIdx()).SetNoImplicit(True)
SanitizeMol(TempMol) # Call SanitizeMol to update radicals (Used when kekulize before)
# Convert the two molecules into a SMILES string
FragA, FragB = sorted(MolToSmiles(TempMol).split('.'))
FragA = MolToSmiles(MolFromSmiles(MolToSmiles(MolFromSmiles(FragA))))
FragB = MolToSmiles(MolFromSmiles(MolToSmiles(MolFromSmiles(FragB))))
InfoData[currentLine, :] = [self._MolArray[idx], FragA, FragB, bond.GetIdx(), bondType]
currentLine += 1
del TempMol
if FalseLine:
FalseLine.sort()
InfoData: ndarray = np.delete(InfoData, obj=FalseLine, axis=0)
self.setData(value=InfoData, request='Info')
self._timer['add'] += perf_counter() - start
self._MolArray: List[str] = []
self._RecordedMolArray: Optional[ndarray] = None
print(f'Executing Time: {self._timer["add"]:.6f} (s) with Initialization Time: {initTime:.6f} (s)')
def preprocess(self, showConnection: bool = True, removeIdenticalRadicals: bool = True,
removeTwoSameFragments: bool = True) -> None:
"""
Implementation of converting self.infoData for adaptation
:param showConnection: Whether to make a duplication of A -> B + C to be A -> C + B
:type showConnection: bool
:param removeIdenticalRadicals: Whether to remove identical molecule in the close-range searching domain
:type removeIdenticalRadicals: bool
:param removeTwoSameFragments: Attach to showConnection. If True, when B == C, that reaction would be removed
:type removeTwoSameFragments: bool
:return: None
"""
# Hyper-parameter Verification
inputFullCheck(value=showConnection, name="showConnection", dtype='bool')
inputFullCheck(value=removeIdenticalRadicals, name="removeIdenticalRadicals", dtype='bool')
if not showConnection and not removeIdenticalRadicals:
warning(" No execution is implemented")
return None
inputFullCheck(value=removeTwoSameFragments, name="removeTwoSameFragments", dtype='bool')
print("-" * 35, self.preprocess, "-" * 35)
timer: float = perf_counter()
InfoData: ndarray = self.getData(request="Info") if self.isTargetReferenceAvailable() \
else np.concatenate((self.getData(request="Info"), self.getTargetReference()), axis=1)
if removeIdenticalRadicals:
from .Preprocessing import RemoveRepeatedRadicals
InfoData: ndarray = RemoveRepeatedRadicals(database=InfoData, RadicalCols=self._radical,
MoleculeCol=self._mol, RemoveConnection=False)
if showConnection:
from .Preprocessing import DuplicateRadical
InfoData: ndarray = DuplicateRadical(database=InfoData, RadicalCols=self._radical,
RemoveTwoSameFragments=removeTwoSameFragments)
if self.isTargetReferenceAvailable():
self.setTargetReference(value=InfoData[:, InfoData.shape[1] - 1:].copy())
self.setData(value=InfoData[:, InfoData.shape[1] - 1:].copy(), request="Info")
else:
self.setData(value=InfoData, request="Info")
self._timer['add'] += perf_counter() - timer
gc.collect()
return None
def addDefinedArray(self, database: ndarray, MoleculeCol: int = 0, RadicalCols: MULTI_COLUMNS = (1, 2),
BondIdxCol: int = 3, BondTypeCol: int = 4, TargetCol: int = None, isZeroIndex: bool = True,
sorting: bool = False) -> None:
"""
Implementation of Reading Molecule with Complete Information
:param database: The array of database
:type database: ndarray
:param MoleculeCol: The position of molecule in database
:type MoleculeCol: int
:param RadicalCols: The position of radical columns in database
:type RadicalCols: List[int] or Tuple[int]
:param BondIdxCol: The position of bond index in database
:type BondIdxCol: int
:param BondTypeCol: The position of bond InputKey in database
:type BondTypeCol: int
:param TargetCol: The position of bond dissociation energy in database
:type TargetCol: int or None
:param isZeroIndex: Whether to guarantee if all the index is starting from zero.
If False, all the bond index will be decrease by one (1)
:type isZeroIndex: bool
:param sorting: Whether to implement sorting. Sorting will boost up time-complexity from O(N*K) into
O(N + NlogN) with k is the number of bonds and N is the number of row
:type sorting: bool
:return: None
"""
# Hyper-parameter Verification
if True:
database: ndarray = _FixData_(database=database)
maxSize: int = database.shape[1]
EvaluateInputPosition(maxSize=maxSize, MoleculeCol=MoleculeCol, RadicalCols=RadicalCols,
BondIdxCol=BondIdxCol, BondTypeCol=BondTypeCol, TargetCol=TargetCol)
column: List[int] = allocateFeatureLocation(MoleculeCol=MoleculeCol, RadicalCols=RadicalCols,
BondIdxCol=BondIdxCol, BondTypeCol=BondTypeCol)
inputFullCheck(value=isZeroIndex, name='isZeroIndex', dtype='bool')
inputFullCheck(value=sorting, name='sorting', dtype='bool')
print("-" * 31, self.addDefinedArray, "-" * 32)
timer: float = self._startNewProcess_()
if not isZeroIndex:
database[:, BondIdxCol] = np.array(database[:, BondIdxCol], dtype=np.uint16) - 1
if sorting:
database = SortWithIdx(database=database, IndexCol=MoleculeCol, SortCol=BondIdxCol, IndexSorting=sorting)
self.setData(value=np.array(database[:, column], dtype=np.object_), request="Info")
if TargetCol is not None:
self.setData(value=np.array(database[:, TargetCol:TargetCol + 1], dtype=np.float32), request="Target")
self._timer['add']: float = perf_counter() - timer
def addDefinedFile(self, FilePath: str, MoleculeCol: int = 0, RadicalCols: MULTI_COLUMNS = (1, 2),
BondIdxCol: int = 3, BondTypeCol: int = 4, TargetCol: int = None, isZeroIndex: bool = True,
sorting: bool = False, ascending: bool = True) -> None:
database: ndarray = self._readCsvFile_(path=FilePath, Molecule=MoleculeCol, Radicals=RadicalCols,
BondIndex=BondIdxCol, BondType=BondTypeCol, Target=TargetCol,
sorting=sorting, ascending=ascending)
self.addDefinedArray(database=database, MoleculeCol=MoleculeCol, RadicalCols=RadicalCols, BondIdxCol=BondIdxCol,
BondTypeCol=BondTypeCol, TargetCol=TargetCol, isZeroIndex=isZeroIndex, sorting=False)
return self._displayAfterReadCsv_()
# [3.2]: Multi-Adding Method: ----------------------------------------------------------------------------------
def addArray_BondIndex(self, database: ndarray, MoleculeCol: int, BondIdxCol: int, TargetCol: int = None,
isZeroIndex: bool = True, simplifyHydro: bool = True, doubleTriple: bool = False,
sorting: bool = False, ascending: bool = True) -> None:
"""
Implementation of Reading Molecule by SMILES and Bond Index
:param database: The array of database
:type database: ndarray or pd.DataFrame
:param MoleculeCol: The position of molecule in database
:type MoleculeCol: int
:param BondIdxCol: The position of bond index in database
:type BondIdxCol: int
:param TargetCol: The position of bond dissociation energy in database
:type TargetCol: int or None
:param isZeroIndex: Whether to guarantee if all the index is starting from zero.
If False, all the bond index will be decrease by one (1)
:type isZeroIndex: bool
:param sorting: Whether to implement sorting. Sorting will boost up time-complexity from O(N*K) into
O(N + NlogN) with k is the number of bonds and N is the number of row
:type sorting: bool
:param ascending: Whether to sort ascending sorting
:type ascending: bool
:param simplifyHydro: Whether to not include normal Hydro in the SMILES (Default to be True)
:type simplifyHydro: bool
:param doubleTriple: Whether to allow double bonds - triple bonds into breakable mode (default to be False)
:type doubleTriple: bool
:return: None
"""
# Hyper-parameter Verification
if True:
database: ndarray = _FixData_(database=database)
maxSize: int = database.shape[1]
EvaluateInputPosition(maxSize=maxSize, MoleculeCol=MoleculeCol, BondIdxCol=BondIdxCol, TargetCol=TargetCol)
label: List[int] = allocateFeatureLocation(MoleculeCol=MoleculeCol, RadicalCols=None, BondIdxCol=BondIdxCol,
BondTypeCol=None, TargetCol=None)
inputFullCheck(value=isZeroIndex, name="isZeroIndex", dtype='bool')
inputFullCheck(value=sorting, name="sorting", dtype='bool')
inputFullCheck(value=simplifyHydro, name="simplifyHydro", dtype='bool')
inputFullCheck(value=doubleTriple, name="doubleTriple", dtype='bool')
print("-" * 30, self.addArray_BondIndex, "-" * 30)
# [1]: Initialization
timer: float = self._startNewProcess_()
if sorting:
inputFullCheck(value=ascending, name="ascending", dtype='bool')
database = SortWithIdx(database=database, IndexCol=MoleculeCol, SortCol=BondIdxCol, IndexSorting=sorting,
IndexReverse=not ascending)
InfoData: ndarray = np.zeros(shape=(database.shape[0], len(self._PrebuiltInfoLabels)), dtype=np.object_)
InfoData[:, [self._mol, self._bondIndex]] = database[:, label]
if not isZeroIndex:
InfoData[:, self._bondIndex] -= 1
IndexData: List[Tuple[int, str]] = GetIndexOnArrangedData(database=InfoData, column=self._mol, get_last=True)
FalseLine: List[int] = []
AddLine: Tuple[int, ...] = (self._radical[0], self._radical[1], self._bondType)
# [2]: Retrieving radicals and bond type
for molSet in range(0, len(IndexData) - 1):
begin, end = IndexData[molSet][0], IndexData[molSet + 1][0]
try:
mol: Mol = AddHs(MolFromSmiles(str(InfoData[begin, self._mol])))
except (ValueError, TypeError, RuntimeError):
warning(f" At row #[{begin},{end - 1}]: Molecule: {InfoData[begin, self._mol]} (False Molecule)")
for line in range(begin, end):
FalseLine.append(line)
continue
InfoData[begin:end, AddLine], Error = \
getRadicalsByBondIdx(ParentMol=mol, bondIdx=InfoData[begin:end, self._bondIndex],
simplifyHydro=simplifyHydro, reverse=False, doubleTriple=doubleTriple)
if len(Error[0]) == 0:
continue
for idx, value in enumerate(Error[0]):
FalseLine.append(begin + value)
warning(f" False Bond Index (={InfoData[begin + value, self._bondIndex]} at row #{begin + value}: "
f"Molecule: {InfoData[begin, self._mol]}")
if TargetCol is not None:
TrueReference: ndarray = np.array(database[:, TargetCol:TargetCol + 1], dtype=np.float32)
InfoData, TrueReference = _tuneFinalDataset_(InfoData=InfoData, FalseLine=FalseLine,
TrueReference=TrueReference)
else:
InfoData, TrueReference = _tuneFinalDataset_(InfoData=InfoData, FalseLine=FalseLine, TrueReference=None)
self.setData(value=InfoData, request='Info')
self.setTargetReference(value=TrueReference)
self._timer['add'] = perf_counter() - timer
def addFile_BondIndex(self, FilePath: str, MoleculeCol: int, BondIdxCol: int, TargetCol: int = None,
isZeroIndex: bool = True, simplifyHydro: bool = True, doubleTriple: bool = False,
sorting: bool = False, ascending: bool = True) -> None:
database: ndarray = self._readCsvFile_(path=FilePath, Molecule=MoleculeCol, BondIndex=BondIdxCol,
Target=TargetCol, sorting=sorting, ascending=ascending)
self.addArray_BondIndex(database=database, MoleculeCol=self._mol, BondIdxCol=self._bondIndex,
TargetCol=TargetCol, isZeroIndex=isZeroIndex, simplifyHydro=simplifyHydro,
doubleTriple=doubleTriple, sorting=False, ascending=ascending)
return self._displayAfterReadCsv_()
def addArray_AtomIndex(self, database: INPUT_FOR_DATABASE, MoleculeCol: int, AtomCols: MULTI_COLUMNS,
TargetCol: int = None, isZeroIndex: Union[bool, List[bool], Tuple[bool, ...]] = True,
simplifyHydro: bool = True, doubleTriple: bool = False, sorting: bool = False,
ascending: bool = True) -> None:
"""
Implementation of Reading Molecule by SMILES and Atomic Index
:param database: The array of database
:type database: ndarray or pd.DataFrame
:param MoleculeCol: The position of molecule in database
:type MoleculeCol: int
:param AtomCols: The position of atom columns in database
:type AtomCols: List[int] or Tuple[int]
:param TargetCol: The position of bond dissociation energy in database
:type TargetCol: int or None
:param isZeroIndex: Whether to guarantee if all the index is starting from zero.
If False, all the bond index will be decrease by one (1)
:type isZeroIndex: bool
:param simplifyHydro: Whether to not include normal Hydro in the SMILES (Default to be True)
:type simplifyHydro: bool
:param doubleTriple: Whether to allow double bonds - triple bonds into breakable mode
(default to be False)
:type doubleTriple: bool
:param sorting: Whether to implement sorting. Sorting will boost up time-complexity from O(N*K) into
O(N + NlogN) with k is the number of bonds and N is the number of row
:type sorting: bool
:param ascending: Whether to sort ascending sorting
:type ascending: bool
:return: None
"""
# Hyper-parameter Verification
if True:
database: ndarray = _FixData_(database=database)
maxSize: int = database.shape[1]
EvaluateInputPosition(maxSize=maxSize, MoleculeCol=MoleculeCol, AtomCols=AtomCols, TargetCol=TargetCol)
inputFullCheck(value=isZeroIndex, name='isZeroIndex', dtype='List-bool-Tuple', delimiter='-')
if isinstance(isZeroIndex, bool):
modified_isZeroIndex: Tuple[bool, bool] = (isZeroIndex, isZeroIndex)
else:
if len(isZeroIndex) != 2:
raise TypeError('isZeroIndex must have two boolean inputs')
inputFullCheck(value=isZeroIndex[0], name='isZeroIndex[0]', dtype='bool')
inputFullCheck(value=isZeroIndex[1], name='isZeroIndex[1]', dtype='bool')
modified_isZeroIndex: Tuple[bool, bool] = isZeroIndex
inputFullCheck(value=sorting, name='sorting', dtype='bool')
inputFullCheck(value=simplifyHydro, name='simplifyHydro', dtype='bool')
inputFullCheck(value=doubleTriple, name='doubleTriple', dtype='bool')
warning(f" This method is not robust, please re-use {self.addArray_Radicals} to validate your bond index")
# [1]: Initialization
print("-" * 30, self.addArray_AtomIndex, "-" * 30)
timer: float = self._startNewProcess_()
if sorting:
inputFullCheck(value=ascending, name='ascending', dtype='bool')
database = ArraySorting(database=database, column=MoleculeCol, reverse=not ascending)
InfoData: ndarray = np.zeros(shape=(database.shape[0], len(self._PrebuiltInfoLabels)), dtype=np.object_)
InfoData[:, self._mol] = database[:, MoleculeCol]
for index in range(0, len(AtomCols)):
if not modified_isZeroIndex[index]:
database[:, AtomCols[index]] = np.array(database[:, AtomCols[index]], dtype=np.uint16) - 1
FalseLine: List[int] = []
IndexData: List[Tuple[int, str]] = GetIndexOnArrangedData(database=database, column=self._mol, get_last=True)
AddLine: Tuple[int, ...] = (self._radical[0], self._radical[1], self._bondType)
# [2]: Retrieving radicals
for molSet in range(0, len(IndexData) - 1):
begin, end = IndexData[molSet][0], IndexData[molSet + 1][0]
try:
mol: Mol = AddHs(MolFromSmiles(str(InfoData[begin, self._mol])))
except (ValueError, TypeError, RuntimeError):
warning(f" At row #[{begin},{end - 1}]: Molecule: {InfoData[begin, self._mol]} (False Molecule)")
for line in range(begin, end):
FalseLine.append(line)
continue
BondIndex, Error = getBondIndexByAtom(ParentMol=mol, atomicIndex=database[begin:end, AtomCols])
if len(Error[0]) != 0:
for idx, value in enumerate(Error[0]):
FalseLine.append(begin + value)
warning(f" False Atomic Index {Error[1][idx]} at row {begin + value} with "
f"molecule: {InfoData[begin, self._mol]}")
InfoData[begin:end, AddLine], NewError = \
getRadicalsByBondIdx(ParentMol=mol, bondIdx=BondIndex, simplifyHydro=simplifyHydro, reverse=False,
doubleTriple=doubleTriple)
InfoData[begin:end, self._bondIndex] = BondIndex
if len(NewError[0]) == 0:
continue
for idx, value in enumerate(NewError[0]):
FalseLine.append(begin + value)
warning(f" False Bond Index (={InfoData[begin + value, self._bondIndex]} at row #{begin + value}: "
f"Molecule: {InfoData[begin, self._mol]}")
if TargetCol is not None:
TrueReference: ndarray = np.array(database[:, TargetCol:TargetCol + 1], dtype=np.float32)
InfoData, TrueReference = _tuneFinalDataset_(InfoData=InfoData, FalseLine=FalseLine,
TrueReference=TrueReference)
if sorting:
total: ndarray = SortWithIdx(database=np.concatenate(arrays=(InfoData, TrueReference), axis=1),
IndexCol=self._mol, SortCol=self._bondIndex, IndexSorting=sorting,
IndexReverse=not ascending)
TrueReference = total[:, total.shape[1] - 1:].copy()
InfoData = np.delete(arr=total, obj=[total.shape[1] - 1], axis=1)
del total
else:
InfoData, TrueReference = _tuneFinalDataset_(InfoData=InfoData, FalseLine=FalseLine, TrueReference=None)
if sorting:
InfoData = SortWithIdx(database=InfoData, IndexCol=self._mol, SortCol=self._bondIndex,
IndexSorting=sorting, IndexReverse=not ascending)
self.setData(value=InfoData, request='Info')
self.setTargetReference(value=TrueReference)
self._timer['add'] = perf_counter() - timer
def addFile_AtomIndex(self, FilePath: str, MoleculeCol: int, AtomCols: MULTI_COLUMNS, TargetCol: int = None,
isZeroIndex: Union[bool, List, Tuple] = True, simplifyHydro: bool = True,
doubleTriple: bool = False, sorting: bool = False, ascending: bool = True) -> None:
database: ndarray = self._readCsvFile_(path=FilePath, Molecule=MoleculeCol, Radicals=AtomCols, Target=TargetCol,
sorting=sorting, ascending=ascending)
self.addArray_AtomIndex(database=database, MoleculeCol=self._mol, AtomCols=self._radical, TargetCol=TargetCol,
isZeroIndex=isZeroIndex, simplifyHydro=simplifyHydro, doubleTriple=doubleTriple,
sorting=sorting, ascending=ascending)
return self._displayAfterReadCsv_()
def addArray_Radicals(self, database: INPUT_FOR_DATABASE, MoleculeCol: int, RadicalCols: MULTI_COLUMNS,
TargetCol: int = None, sorting: bool = False, rematch: bool = True,
ascending: bool = True) -> None:
"""
Implementation of Reading Molecule by SMILES and Radicals
:param database: The array of database
:type database: ndarray or pd.DataFrame
:param MoleculeCol: The position of molecule in database
:type MoleculeCol: int
:param RadicalCols: The position of radical columns in database
:type RadicalCols: List[int] or Tuple[int]
:param TargetCol: The position of bond dissociation energy in database
:type TargetCol: int or None
:param rematch: Whether to guarantee if the two radicals is accurate (default to be True).
:type rematch: bool
:param sorting: Whether to implement sorting. Sorting will boost up time-complexity from O(N*K) into
O(N + NlogN) with k is the number of bonds and N is the number of row
:type sorting: bool
:param ascending: Whether to sort ascending sorting
:type ascending: bool
:return: None
"""
# Hyper-parameter Verification
if True:
database: ndarray = _FixData_(database=database)
maxSize: int = database.shape[1]
EvaluateInputPosition(maxSize=maxSize, MoleculeCol=MoleculeCol, RadicalCols=RadicalCols, TargetCol=TargetCol)
label: List[int] = allocateFeatureLocation(MoleculeCol=MoleculeCol, RadicalCols=RadicalCols, BondIdxCol=None,
BondTypeCol=None, TargetCol=None)
inputFullCheck(value=rematch, name='rematch', dtype='bool')
inputFullCheck(value=sorting, name='sorting', dtype='bool')
# [1]: Initialization
print("-" * 30, self.addArray_Radicals, "-" * 30)
timer: float = self._startNewProcess_()
if sorting:
inputFullCheck(value=ascending, name='ascending', dtype='bool')
database = ArraySorting(database=database, column=MoleculeCol, reverse=not ascending)
InfoData: ndarray = np.zeros(shape=(database.shape[0], len(self._PrebuiltInfoLabels)), dtype=np.object_)
InfoData[:, [self._mol, self._radical[0], self._radical[1]]] = database[:, label]
# [2]: Retrieving radicals
IndexData: List[Tuple[int, str]] = GetIndexOnArrangedData(database=database, column=MoleculeCol, get_last=True)
ReversedRadicalCols: Tuple[int, int] = (RadicalCols[1], RadicalCols[0])
ReversedLocation: List[int] = [0, 2, 1, 3, 4]
FalseLine: List[int] = []
AddLine: List[int] = [self._bondIndex, self._bondType]
for row in range(0, len(IndexData) - 1):
begin, end = IndexData[row][0], IndexData[row + 1][0]
try:
mol: Mol = AddHs(MolFromSmiles(str(InfoData[begin, self._mol])))
except (RuntimeError, ValueError, TypeError):
warning(f" At row #[{begin},{end - 1}]: Molecule: {InfoData[begin, self._mol]} (False Molecule)")
for line in range(begin, end):
FalseLine.append(line)
continue
gc.collect()
for current in range(begin, end):
try:
FragX = AddHs(MolFromSmiles(str(InfoData[current, self._radical[0]])))
FragY = AddHs(MolFromSmiles(str(InfoData[current, self._radical[1]])))
except (RuntimeError, ValueError, TypeError):
warning(f" At row #{current}: Two Fragments are incorrect: "
f"{InfoData[current, self._radical[0]]} <-> {InfoData[current, self._radical[1]]}")
FalseLine.append(current)
continue
if current != begin:
if ArrayEqual(database[current, RadicalCols], database[current - 1, RadicalCols]):
InfoData[current, self._bondIndex:] = InfoData[current - 1, self._bondIndex:]
elif ArrayEqual(database[begin, RadicalCols], database[current - 1, ReversedRadicalCols]):
InfoData[current, self._radical[0]:] = InfoData[begin - 1, ReversedLocation]
BondIndex: int = getBondIndex(ParentMol=mol, FragMolX=FragX, FragMolY=FragY, current=0,
maxBonds=int(mol.GetNumBonds()), rematch=rematch)
if BondIndex == -1:
warning(f'At row {current}: Remove ChiralTag Specification.')
x, y, z = \
str(CanonSmiles(str(database[begin, MoleculeCol]), useChiral=False)), \
str(CanonSmiles(str(database[current, RadicalCols[0]]), useChiral=False)), \
str(CanonSmiles(str(database[current, RadicalCols[1]]), useChiral=False))
mol, FragX, FragY = AddHs(MolFromSmiles(x)), AddHs(MolFromSmiles(y)), AddHs(MolFromSmiles(z))
BondIndex: int = getBondIndex(ParentMol=mol, FragMolX=FragX, FragMolY=FragY, current=0,
maxBonds=int(mol.GetNumBonds()), rematch=rematch)
if BondIndex == -1:
warning(f" No bond was found at molecule {InfoData[begin, self._mol]} with this fragments: "
f" {InfoData[current, self._radical[0]]} <-> {InfoData[current, self._radical[1]]}")
FalseLine.append(current)
continue
bond = mol.GetBondWithIdx(BondIndex)
atomType = sorted([bond.GetBeginAtom().GetSymbol(), bond.GetEndAtom().GetSymbol()])
InfoData[row, AddLine] = [BondIndex, f'{atomType[0]}-{atomType[1]}']
if TargetCol is not None:
TrueReference: ndarray = np.array(database[:, TargetCol:TargetCol + 1], dtype=np.float32)
InfoData, TrueReference = _tuneFinalDataset_(InfoData=InfoData, FalseLine=FalseLine,
TrueReference=TrueReference)
if sorting:
total = SortWithIdx(database=np.concatenate(arrays=(InfoData, TrueReference), axis=1),
IndexCol=self._mol, SortCol=self._bondIndex, IndexSorting=sorting,
IndexReverse=not ascending)
TrueReference = total[:, total.shape[1] - 1:].copy()
InfoData = np.delete(arr=total, obj=[total.shape[1] - 1], axis=1)
del total
else:
InfoData, TrueReference = _tuneFinalDataset_(InfoData=InfoData, FalseLine=FalseLine, TrueReference=None)
if sorting:
InfoData = SortWithIdx(database=InfoData, IndexCol=self._mol, SortCol=self._bondIndex,
IndexSorting=sorting, IndexReverse=not ascending)
self.setData(value=InfoData, request='Info')
self.setTargetReference(value=TrueReference)
self._timer['add'] = perf_counter() - timer
def addFile_Radicals(self, FilePath: str, MoleculeCol: int, RadicalCols: MULTI_COLUMNS, TargetCol: int = None,
sorting: bool = False, rematch: bool = True, ascending: bool = True) -> None:
database: ndarray = self._readCsvFile_(path=FilePath, Molecule=MoleculeCol, Radicals=RadicalCols,
Target=TargetCol, sorting=sorting, ascending=ascending)
self.addArray_Radicals(database=database, MoleculeCol=self._mol, RadicalCols=self._radical,
TargetCol=TargetCol, sorting=False, rematch=rematch, ascending=ascending)
return self._displayAfterReadCsv_()
# [4]: Prediction Data: ------------------------------------------------------------------------------------------
def createData(self, cleanMemoryLoop: int = 1000, activateBondTypeData: bool = True,
activateExtraData: bool = True, activateFingerprintData: bool = True,
activateSpecificFingerprint: Tuple[bool, ...] = (True, True)) -> None:
"""
Implementation of create data and features
:param cleanMemoryLoop: Tracking progress (default to be 1000)
:type cleanMemoryLoop: int or bool
:param activateBondTypeData: Whether the bond type identifier was marked.
:type activateBondTypeData: bool
:param activateExtraData: Whether the extra identifier (no bond type) was marked.
:type activateExtraData: bool
:param activateFingerprintData: Whether the fingerprint identifier (no bond type) was marked.
:type activateFingerprintData: bool
:param activateSpecificFingerprint: Whether to activate/deactivate at some specific locations
:type activateSpecificFingerprint: Tuple[bool, bool]
:return: None
"""
# Hyper-parameter Verification
if True:
if not self.isDataAvailable(request='Info'):
if self._MolArray is not None:
warning(' No data available. Please use function: create_information')
self.createInformation()
else:
raise TypeError("No data available. Need to reconsider your code")
else:
if self.isTargetReferenceAvailable():
if self.getData(request='Info').shape[0] != self.getTargetReference().shape[0]:
raise ValueError("Error Source Code: The number of observations is not compatible.")
if self._dataset.getInputKey() == 1:
warning(" We are assuming you try to predict with pre-defined environment. However, the result proposed"
" later is not accurately robust. \nMAE and RMSE would probably be extra 10% - 20% higher, as"
"stereochemistry in this case will be denoted as zero as well as some cis-trans encoding.")
warning(" This implementation is only works if that molecule is extremely small such as CH4 or C2H6.")
self._generator.refreshAttribute()
# [1]: Generate Features
print("-" * 80)
print('The trained model is trying to create data and features. Please wait for a sec ...')
timer: float = perf_counter()
self._generator.activate(FingerprintData=activateFingerprintData, ExtraData=activateExtraData,
BondTypeData=activateBondTypeData, SpecificFingerprint=activateSpecificFingerprint)
self._generator.createData(cleaningLoopMemory=cleanMemoryLoop, DataCleaning=False)
self._timer["create"] = perf_counter() - timer
print(f'Executing Time for Data Creation Time: {self._timer["create"]:.6f}s')
# [2]: Pre-processing data
timer: float = perf_counter()
self._dataset.testPrep(UnlockKey=self.getKey())
self._timer["process"] = perf_counter() - timer
print(f'Executing Time for Processing Time: {self._timer["process"]:.6f}s')
gc.collect()
def _getLabelsOfLastLayer_(self) -> List[str]:
return self.TF_model.getLabelsOfLastLayer()
def _verifyPredict_(self, standardize: bool, getLastLayer: bool, force: bool, Sfs: int) -> None:
if not (self.isDataAvailable(request="Info") and self.isDataAvailable(request="CData") and
self.isDataAvailable(request="SData")):
raise ValueError("No pre-defined value or information was found")
inputFullCheck(value=standardize, name="standardize", dtype='bool')
if not self._dataset.getIsContainedFragmentedRadicals() and standardize:
warning("The following input key does not allow standardization, thus disabling the standardization.")
standardize = False
inputFullCheck(value=getLastLayer, name="getLastLayer", dtype='bool')
if self.getData(request='CData').shape[0] > int(3e5) and getLastLayer:
warning(' Your data file contained too many samples it is hard to acquire all of those data.')
sleep(1e-4)
if self._isStandardized and standardize:
warning(" Your current database has been standardized once. Please be careful.")
inputFullCheck(value=force, name="force", dtype='bool')
inputCheckRange(value=Sfs, name="Sfs", maxValue=64, minValue=0)
def _predictFirstTime_(self, force: bool, standardize: bool, get_last_layer: bool):
CData, SData = self.getData(request="CData"), self.getData(request="SData")
if force or (not force and self.y_pred is None):
self.y_pred = self.TF_model.predict(CData=CData, SData=SData, reverse=False)
else:
warning(" We already have the prediction. Disable prediction")
sleep(1e-4)
if standardize:
print("However, standardization can also be allowed")
if not get_last_layer:
return None
if force or (not force and self.InterOutput is None):
print("AIP-BDET is retrieved the last layer value: ...")
self.InterOutput = self.TF_model.getLastLayerInput(CData=CData, SData=SData, reverse=False)
else:
warning(" We already have the result at the final layer. Disable prediction")
sleep(1e-4)
if standardize:
print("However, standardization can also be allowed")
return None
def _standardize(self, get_last_layer: bool) -> None:
"""
Implementation of standardizing prediction: A -> B + C will be equal as A --> C + B
:param get_last_layer: Whether to retrieve the last training layer (attached to self.predict())
:type get_last_layer: bool
:return: None
"""
start: float = perf_counter()
CData, SData, TargetReference = self.getFeatures()
self.y_pred /= 2
self.y_pred += self.TF_model.predict(CData=CData, SData=SData, reverse=True) / 2
if get_last_layer:
print("AIP-BDET is retrieved the last layer value: ...")
self.InterOutput /= 2
self.InterOutput += self.TF_model.getLastLayerInput(CData=CData, SData=SData, reverse=True) / 2
gc.collect()
exec_time: float = perf_counter() - start
self._timer["predictFunction"] += exec_time
self._timer["predictMethod"] += exec_time
def _exportPrediction_(self, output: str, getLastLayer: bool, convertKJ_eV: int, sorting: bool, bde_sorting: bool,
ascending: bool, Sfs: Optional[int], display: bool) -> None:
if True:
inputFullCheck(value=output, name="output", dtype='str-None', delimiter='-')
inputFullCheck(value=bde_sorting, name="bde_sorting", dtype='bool')
inputFullCheck(value=sorting, name="molecule_sorting", dtype='bool')
inputCheckRange(value=Sfs, name="Sfs", maxValue=64, minValue=0)
inputCheckRange(value=convertKJ_eV, name="Sfs", maxValue=4, minValue=0)
print("Result Converting: ...")
# [0]: Preparation
if convertKJ_eV == 1:
self.y_pred *= 4.184
elif convertKJ_eV == 2:
self.y_pred *= 4.1868
elif convertKJ_eV == 3:
# self.y_pred *= 1 / 23.0605476253
self.y_pred *= 1 / 23.0605
# [1]: Sort position: Calculate position needed for sorting BDE only
InfoData, EnvData, TargetData = self.getInformationData()
InfoLabels, EnvLabels, TargetLabels = self.getInformationLabels()
max_uint8: int = np.iinfo(np.uint8).max
self.BuiltDataFrame: pd.DataFrame = pd.DataFrame(data=InfoData, index=None, columns=InfoLabels)
SortingPosition: int = InfoData.shape[1] + 1
maxValue = np.uint8 if self.BuiltDataFrame[InfoLabels[self._bondIndex]].max() < max_uint8 else np.uint16
self.BuiltDataFrame[InfoLabels[self._bondIndex]] = \
self.BuiltDataFrame[InfoLabels[self._bondIndex]].astype(maxValue)
if EnvData is not None:
for idx, sorting_label in enumerate(EnvLabels):
self.BuiltDataFrame[sorting_label] = EnvData[:, idx]
SortingPosition += EnvData.shape[1]
maxValue = np.uint8 if self.BuiltDataFrame[EnvLabels[-1]].max() < max_uint8 else np.uint16
self.BuiltDataFrame[EnvLabels[-1]] = self.BuiltDataFrame[EnvLabels[-1]].astype(maxValue)
# [2]: Generate DataFrame
if self.isTargetReferenceAvailable():
target: ndarray = self.getTargetReference().astype(np.float32)
self.BuiltDataFrame[self._SavedPredLabels[0]] = target if Sfs is None else target.round(Sfs)
self.BuiltDataFrame[self._SavedPredLabels[1]] = self.y_pred if Sfs is None else self.y_pred.round(Sfs)
self.BuiltDataFrame[self._SavedPredLabels[2]] = np.absolute(target - self.y_pred)
else:
self.BuiltDataFrame[self._SavedPredLabels[1]] = self.y_pred if Sfs is None else self.y_pred.round(Sfs)
if getLastLayer:
self.BuiltDataFrame[self._getLabelsOfLastLayer_()] = \
self.InterOutput if Sfs is None else self.InterOutput.astype(np.float32)
# [2.x]: Extra Work in needed
if sorting:
sorting_label = [InfoLabels[self._mol], self.BuiltDataFrame.columns[SortingPosition]] \
if bde_sorting else [InfoLabels[self._mol], InfoLabels[self._bondIndex]]
ascend: List[bool] = [ascending] * len(sorting_label)
if bde_sorting:
ascend[0] = input("Do you want the molecule to be sorted ascending (Yes/No)").lower()[0] in ('y', 't')
self.BuiltDataFrame.sort_values(sorting_label, inplace=True, kind='mergesort', ascending=ascend)
if output is not None:
ExportFile(DataFrame=self.BuiltDataFrame, FilePath=output)
if self.y_pred.shape[0] <= 125 and display:
from sys import maxsize
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
np.set_printoptions(threshold=maxsize)
print(self.BuiltDataFrame[self._PrebuiltInfoLabels + [self._SavedPredLabels[1]]])
return None
def _displayPredictionTiming_(self, build_dataframe: bool) -> None:
print('-' * 80)
print('The AIP-BDET has finished prediction. Please give us some credit')
size: int = self.getData(request="Info").shape[0]
convertToBondTime: Callable = lambda t: 1e3 * t / size
speed: str = '-~-~-> Speed'
print(f'Adding Dataset: {self._timer["add"]:.6f} (s) '
f'{speed}: {convertToBondTime(self._timer["add"]):.6f} (ms/bond)')
print(f'Create Features: {self._timer["create"]:.6f} (s) '
f'{speed}: {convertToBondTime(self._timer["create"]):.6f} ms / bond')
print(f'Proceed Dataset: {self._timer["process"]:.6f} (s) '
f'{speed}: {convertToBondTime(self._timer["process"]):.6f} ms / bond')
print(f'Prediction Time Only: {self._timer["predictFunction"]:.6f} (s) '
f'{speed}: {convertToBondTime(self._timer["predictFunction"]):.6f} (ms/bond)')
if build_dataframe:
timing: float = self._timer["predictMethod"] - self._timer["predictFunction"]
print(f'Constructing DataFrame: {timing:.6f} (s) {speed}: {convertToBondTime(timing):.6f} (ms/bond)')
print(f'Full Process: {self._getProcessTime_() :.6f} (s) '
f'{speed}: {convertToBondTime(self._getProcessTime_()):.6f} ms / bond')
return None
def predict(self, output: str = None, display: bool = True, standardize: bool = False, getLastLayer: bool = False,
force: bool = False, Sfs: int = 6, bde_sorting: bool = False, sorting: bool = False,
ascending: bool = True, convertKJ_eV: bool = 0, build_dataframe: bool = True) -> Optional[pd.DataFrame]:
"""
Implementation of predicting values
:param output: Directory of output
:type output: str
:param display: Whether to show all prediction in DataFrame (only active if less than 125 prediction rows)
:type display: bool
:param standardize: Whether to standardize all prediction (BDE)
:type standardize: bool
:param bde_sorting: Whether to sort BDE by molecule
:type bde_sorting: bool
:param sorting: Whether to sort by molecule along with index
:type sorting: bool
:param ascending: Allow to reverse prediction
:type ascending: bool
:param force: Force the result to be updated
:type force: bool
:param getLastLayer: Whether to retrieve last layer
:type getLastLayer: bool
:param Sfs: Number of integer for rounding calculation
:type Sfs: int
:param convertKJ_eV: The mode for conversion to other unit
:type convertKJ_eV: int
:param build_dataframe: Whether to export dataframe
:type build_dataframe: bool
:param warningSamples: Give warning when meet large file
:type warningSamples: int
:return: pd.DataFrame
"""
# [0]: Hyper-parameter Verification
self._verifyPredict_(standardize=standardize, getLastLayer=getLastLayer, force=force, Sfs=Sfs)
# [1]: Predict target
print("-" * 30, self.predict, "-" * 30)
print('AIP-BDET is predicting data. Please wait for a secs ...')
start: float = perf_counter()
self._predictFirstTime_(force=force, standardize=standardize, get_last_layer=getLastLayer)
if not self._dataset.getIsContainedFragmentedRadicals():
warning(f" There are no benefits or performance gained from standardization "
f"if not contained radical fragments. Thus, disable standardization")
standardize: bool = False
# [2]: Standardize target
if standardize:
print("AIP-BDET is standardizing: ...")
self._isStandardized: bool = True
self._standardize(get_last_layer=getLastLayer)
self._timer["predictFunction"]: float = perf_counter() - start
if build_dataframe:
self._exportPrediction_(output=output, getLastLayer=getLastLayer, convertKJ_eV=convertKJ_eV,
sorting=sorting, bde_sorting=bde_sorting, ascending=ascending, Sfs=Sfs,
display=display)
self._timer["predictMethod"] = perf_counter() - start
self._displayPredictionTiming_(build_dataframe=build_dataframe)
return self.BuiltDataFrame
# [5]: Other Methods ----------------------------------------------------------------------------------------------
# [5.1]: Density Estimation & Visualization -----------------------------------------------------------------------
def _getCurrentCPosition_(self):
return self._dataset.getCurrentCLabelsInfo()[0], self._dataset.getCurrentCLabelsInfo()[1]
def _getCurrentSPosition_(self):
return self._dataset.getCurrentSLabelsInfo()[0], self._dataset.getCurrentSLabelsInfo()[1]
def _getSavedCPosition_(self):
return self._dataset.SavedCLabelsInfo[0], self._dataset.SavedCLabelsInfo[1]
def _getSavedSPosition_(self):
return self._dataset.SavedSLabelsInfo[0], self._dataset.SavedSLabelsInfo[1]
def _getExtraSaved_(self) -> Union[ndarray, List[str]]:
return self._dataset.getSavedCRemainderLabels()[0]
def _getBondTypeSaved_(self) -> Union[ndarray, List[str]]:
return self._dataset.getSavedCRemainderLabels()[1]
def _verifyLastVisualizer_(self, mode: str, marker_trace: Optional[str]) -> Tuple[ndarray, str, str]:
# [1]: Data Checker
if self.InterOutput is None or self.y_pred is None:
warning('No data is available. We are using available data. Last layer is applied')
self.predict(standardize=True, getLastLayer=True, force=False, build_dataframe=True)
# [2]: Hyper-parameter Verification
if marker_trace is not None:
inputFullCheck(value=marker_trace, name='marker_trace', dtype='str')
if marker_trace.lower() not in ['random', 'circle', 'circle-open', 'square', 'square-open',
'diamond', 'diamond-open', 'cross', 'x']:
link: str = "https://plotly.com/python/reference/scattergl/#scattergl-marker-symbol"
warning(f" See all plotly markers at here: {link}. Switch back to default (='circle'")
marker_trace: str = "circle"
else:
marker_trace: str = "circle"
inputFullCheck(value=mode, name='mode', dtype='str')
if mode not in ['pred', 'true', 'error', 'abs_error']:
raise TypeError("Your mode should be one of these values: "
"['pred', 'true', 'error', 'abs_error']")
if mode == 'true':
if self.getTargetReference() is None:
raise ValueError("No comparing reference in this mode")
target, associated_labels = self.getTargetReference(), self._SavedPredLabels[0]
elif mode == 'pred':
if self.y_pred is None:
raise ValueError("No prediction in this mode")
target, associated_labels = self.y_pred, self._SavedPredLabels[1]
else:
if self.getTargetReference() is None or self.y_pred is None:
raise ValueError("No error could be found in this mode")
associated_labels = self._SavedPredLabels[2]
target: ndarray = self.getTargetReference() - self.y_pred
if mode == 'abs_error':
target = np.absolute(target)
return target, associated_labels, marker_trace
def getLastLayerDataframeForVisualize(self, mode: str):
# [1]: Update the result
target, targetLabel, marker_trace = self._verifyLastVisualizer_(mode=mode, marker_trace=None)
datatype = np.object_
# [2]: Initialize data
self._timer["visualize"] = perf_counter()
print("-" * 35, self.visualize, "-" * 35)
print('[1]: Generate Features for Visualization')
WeightsMultiplier = bool(input("Allowing multiply your last layer result (Y/N): ").lower()[0] == 'y')
data: ndarray = self._ConcatenateData_(features=self.InterOutput, target=target)
if WeightsMultiplier and self._getLabelsOfLastLayer_() is not None:
weight: ndarray = self.TF_model.get_layer(name=self.TF_model.getLastLayerName()).get_weights()[0]
for col in range(data.shape[1] - 2):
data[:, col + 1:col + 2] *= weight[col, 0]
elif WeightsMultiplier:
warning(" Unable to find the label")
if data.shape[1] - 2 not in [2, 3]:
warning(" Bit2Edge structure may not be incompatible for all methods.")
df_col = self._labelsForVisualization_(visualizerMode='last', n_components=data.shape[1] - 2,
targetLabel=targetLabel)
DF: pd.DataFrame = pd.DataFrame(data=data, index=None, columns=df_col)
DF[df_col[-1]], DF[df_col[0]] = DF[df_col[-1]].astype(datatype), DF[df_col[0]].astype(str)
gc.collect()
print(DF.head(5))
return DF
def visualizeLastLayer(self, mode: str = 'true', marker_trace: str = "circle",
dropBonds: Optional[List[str]] = None) -> pd.DataFrame:
# [1]: Update the result
target, targetLabel, marker_trace = self._verifyLastVisualizer_(mode=mode, marker_trace=marker_trace)
# [2]: Initialize data
self._timer["visualize"] = perf_counter()
print("-" * 35, self.visualize, "-" * 35)
print('[1]: Generate Features for Visualization')
data: ndarray = self._ConcatenateData_(features=self.InterOutput, target=target, dropBonds=dropBonds)
# WeightsMultiplier = bool(input("Allowing multiply your last layer result (Y/N): ").lower()[0] == 'y')
WeightsMultiplier = False
if WeightsMultiplier:
weight: ndarray = self.TF_model.get_layer(name=self.TF_model.getLastLayerName()).get_weights()[0]
for col in range(data.shape[1] - 2):
data[:, col + 1:col + 2] *= weight[col, 0]
if data.shape[1] - 2 not in [2, 3]:
warning(" Bit2Edge structure may not be incompatible for all methods.")
COLS: List[str] = self._labelsForVisualization_(visualizerMode='last', n_components=data.shape[1] - 2,
targetLabel=targetLabel)
DF: pd.DataFrame = pd.DataFrame(data=data, index=None, columns=COLS)
DF[COLS[-1]], DF[COLS[0]] = DF[COLS[-1]].astype(np.object_), DF[COLS[0]].astype(str)
print(DF.head(5), '\nShape: ', DF.shape)
gc.collect()
print('[2]: Start to Visualize')
import plotly
import plotly.io as pio
pio.renderers.default = "browser"
import plotly.express as px
from .config import VISUALIZE
print("PLOTLY Version:", plotly.__version__)
print(f"PLOTLY is drawing via the selection key (last)")
figure = px.scatter_3d(data_frame=DF, x=COLS[1], y=COLS[2], z=COLS[3], opacity=VISUALIZE['opacity'],
color=COLS[0 - int(self.InterOutput.shape[1] == 3)], symbol=COLS[0])
print('[2.9]: Finish the figure. Preparing to upload.')
if marker_trace.lower() != "random":
figure.update_traces(marker_symbol=marker_trace)
# figure.write_html('resources/Figure.html', auto_open=True)
figure.show()
self._timer["visualize"] = perf_counter() - self._timer["visualize"]
print(f'Executing Times for Data Visualization: {self._timer["visualize"]:.6f}s')
return DF
def visualize(self, visualizerMode: str = 'last', decomposeMethod: str = 'UMAP', n_components: int = 2,
mode: int = 0, marker_trace: str = "circle", CModel: bool = True, n_jobs: int = -1,
*args, **kwargs) -> pd.DataFrame:
"""
Implementation of data visualization
:param visualizerMode: The data used for visualization. If 'last', retrieve last_layer. If "fingerprints",
dimensionality reduction analysis is applied on connection data or significant data.
If "single", DRA on specific fingerprint set; If "meaning", DRA on product and reactant,
If "density", apply dataframe from self.estimate_density
:type visualizerMode: str
:param decomposeMethod: The dimensionality reduction method (Default to be UMAP)
:type decomposeMethod: str
:param n_components: Number of components needs to be dimensionality reduction
:type n_components: str
:param mode: The coloring mode
:type mode: int
:param marker_trace: The code for representation (default by "circle")
:type marker_trace: str
:param CModel: If "C": connection fingerprint. If "S" the significant fingerprint
:type CModel: str
:param n_jobs: Whether to use joblib parallelism in sklearn
:type n_jobs: int
:return:
"""
# Hyper-parameter Verification
if True:
inputFullCheck(value=mode, name='mode', dtype='int')
inputFullCheck(value=marker_trace, name='marker_trace', dtype='str')
inputFullCheck(value=visualizerMode, name="visualizerMode", dtype='str')
if marker_trace.lower() not in ['random', 'circle', 'circle-open', 'square', 'square-open',
'diamond', 'diamond-open', 'cross', 'x']:
link: str = "https://plotly.com/python/reference/scattergl/#scattergl-marker-symbol"
warning(f" See all plotly markers at here: {link}. Switch back to default (='circle'")
marker_trace: str = "circle"
visualizerMode: str = visualizerMode.lower()
x = ("last", 'full-fingerprint', 'single', 'reaction', 'density', 'sparsity', 'full-struct')
if visualizerMode not in x:
raise ValueError(f"Un-identified method. Please choose again: {x}")
if visualizerMode == "last":
if self.InterOutput is None or self.y_pred is None:
warning('No data is available. We are using available data. Last layer is applied')
self.predict(standardize=True, getLastLayer=True, force=False, build_dataframe=True)
if len(self._getLabelsOfLastLayer_()) == 2:
while mode not in (0, 1, 5):
mode: int = int(input("Re-choose your mode of these values (0, 1, 5): "))
else:
if self.y_pred is None:
warning('No data is available. We are using available data. Last layer is not applied')
self.predict(standardize=True, getLastLayer=False, force=False, build_dataframe=True)
if self.BuiltDataFrame is None:
self._exportPrediction_(getLastLayer=self.InterOutput is not None)
inputCheckRange(value=mode, name="mode", minValue=0, maxValue=5, rightBound=True)
datatype = np.object_
if mode == 0:
if self.getTargetReference() is None:
raise ValueError("No comparing reference in this mode")
target, associated_labels = self.getTargetReference(), self._SavedPredLabels[0]
elif mode == 1:
if self.y_pred is None:
raise ValueError("No prediction in this mode")
target, associated_labels = self.y_pred, self._SavedPredLabels[1]
elif mode == 2:
if self.getTargetReference() is None or self.y_pred is None:
raise ValueError("No error could be found in this mode")
target, associated_labels = self.getTargetReference() - self.y_pred, self._SavedPredLabels[2]
elif mode == 3:
if self.getTargetReference() is None or self.y_pred is None:
raise ValueError("No absolute error could be found in this mode")
target, associated_labels = np.absolute(self.getTargetReference() - self.y_pred), \
self._SavedPredLabels[2]
elif mode == 4:
target, associated_labels = self.getData(request="Info")[:, self._bondIndex], \
self._PrebuiltInfoLabels[self._bondIndex]
else:
print("Notation:", self._dataset.getFingerprintNotation())
print("Extra Features:", self._getExtraSaved_())
associated_labels: str = input(f"Choose your coloring labels in your feature: ")
try:
datatype = self.dataType
label, feature = self.getDataLabels(request="CData" if CModel else "SData")
if not isinstance(label, List):
label: List = label.tolist()
location: int = label.index(associated_labels)
target = feature[:, location:location + 1]
except (IndexError, TypeError, ValueError):
raise ValueError("No compatible labels found")
inputFullCheck(value=decomposeMethod, name="decomposeMethod", dtype='str')
decomposeMethod: str = decomposeMethod.lower()
inputCheckRange(value=n_components, name="n_components", maxValue=4, minValue=2)
inputFullCheck(value=CModel, name="CModel", dtype='bool')
inputFullCheck(value=n_jobs, name="n_jobs", dtype='int')
import plotly.express as px
from .config import VISUALIZE
print("-" * 35, self.visualize, "-" * 35)
self._timer["visualize"] = perf_counter()
print('[0]: Choose the data for visualization')
# Intention: At the end, we would have this 2D-array:
# 1st Column: Bond Type, 2 -> 4 || 5: Updated n_components, Last Column: Target & Target Col
print('[1]: Generate Features for Visualization')
data: Optional[ndarray] = None
if visualizerMode == 'last':
# WeightsMultiplier = bool(input("Allowing multiply your last layer result (Y/N): ").lower()[0] == 'y')
n_components, data = self._visualizeLastLayerMode_(target=target, WeightsMultiplication=False)
else:
dra_time: float = perf_counter()
if visualizerMode in ['full-fingerprint']:
n_components, data = \
self._visualizeFullFingerprintsMode_(target, CModel, decomposeMethod, n_components, n_jobs, *args,
**kwargs)
elif visualizerMode in ['single']:
n_components, data = self._visualizeSingleMode_(target, CModel, decomposeMethod, n_jobs, *args,
**kwargs)
elif visualizerMode in ['reaction']:
n_components, data = self._visualizeReactionMode_(target, CModel, decomposeMethod, n_jobs, *args,
**kwargs)
elif visualizerMode in ["density", "sparsity"]:
n_components, data = self._visualizeSparseDenseMode_(target=target, CModel=CModel,
densityMode=visualizerMode == "density")
elif visualizerMode in ['full-struct']:
n_components, data = \
self._visualizeFullInputMode_(target, CModel, decomposeMethod, n_components, n_jobs, *args,
**kwargs)
print(f'Dimensionality Reduction Time: {perf_counter() - dra_time:.6f}s')
print('[2]: Start to Visualize')
df_col = self._labelsForVisualization_(visualizerMode=visualizerMode, n_components=n_components,
targetLabel=associated_labels)
if len(df_col) != data.shape[1]:
raise ValueError("Input Error or Source Code Error: Incompatible Function Called")
DataFrame: pd.DataFrame = pd.DataFrame(data=data, index=None, columns=df_col)
DataFrame[df_col[-1]] = DataFrame[df_col[-1]].astype(datatype)
DataFrame[df_col[0]] = DataFrame[df_col[0]].astype(str)
print(f"PLOTLY is drawing via the selection key ({visualizerMode})")
print(DataFrame.head(5))
if n_components == 2:
figure = px.scatter_3d(DataFrame, x=df_col[1], y=df_col[2], z=df_col[3], opacity=VISUALIZE['opacity'],
color=df_col[0], symbol=associated_labels)
else:
figure = px.scatter_3d(DataFrame, x=df_col[1], y=df_col[2], z=df_col[3], opacity=VISUALIZE['opacity'],
color=associated_labels, symbol=associated_labels)
if marker_trace.lower() != "random":
figure.update_traces(marker_symbol=marker_trace)
figure.show()
self._timer["visualize"] = perf_counter() - self._timer["visualize"]
print(f'Executing Times for Data Visualization: {self._timer["visualize"]:.6f}s')
return DataFrame
def _buildVisualizer_(self, decomposeMethod: str = 'UMAP', components: int = 2, n_jobs: int = -1, *args,
**kwargs) -> BaseEstimator:
from .config import VISUALIZE
from sklearn.decomposition import (NMF, PCA, DictionaryLearning, FactorAnalysis, IncrementalPCA,
KernelPCA, LatentDirichletAllocation, MiniBatchDictionaryLearning,
MiniBatchSparsePCA, SparsePCA, TruncatedSVD)
from sklearn.manifold import TSNE, SpectralEmbedding, Isomap, LocallyLinearEmbedding, MDS
from umap import UMAP
bondTypeSaved = self._getBondTypeSaved_()
if decomposeMethod == 'pca':
dra = PCA(n_components=components, *args, **kwargs)
elif decomposeMethod == 'k-pca' or decomposeMethod == 'kpca':
dra = KernelPCA(n_components=components, *args, **kwargs)
elif decomposeMethod == 's-pca' or decomposeMethod == 'spca':
dra = SparsePCA(n_components=components, verbose=1, *args, **kwargs)
elif decomposeMethod == 'mini s-pca' or decomposeMethod == 'mini spca':
dra = MiniBatchSparsePCA(n_components=components, n_jobs=n_jobs, *args, **kwargs)
elif decomposeMethod == 'i-pca' or decomposeMethod == 'ipca':
dra = IncrementalPCA(n_components=components)
elif decomposeMethod == 't-svd' or decomposeMethod == 'tsvd':
dra = TruncatedSVD(n_components=components)
elif decomposeMethod == 'lda':
dra = LatentDirichletAllocation(components=components, n_jobs=n_jobs, *args, **kwargs)
elif decomposeMethod == 'fa':
dra = FactorAnalysis(components=components, *args, **kwargs)
elif decomposeMethod == 'mini dict-learn':
dra = MiniBatchDictionaryLearning(components=components, n_jobs=n_jobs, *args, **kwargs)
elif decomposeMethod == 'dict-learn':
dra = DictionaryLearning(components=components, n_jobs=n_jobs, *args, **kwargs)
elif decomposeMethod == 'nmf':
dra = NMF(components=components, *args, **kwargs)
elif decomposeMethod == 'spectralEmbedding':
dra = SpectralEmbedding(n_neighbors=len(bondTypeSaved) * VISUALIZE["neighbors_coef"],
n_components=components, n_jobs=n_jobs, *args, **kwargs)
elif decomposeMethod == 'localEmbedding':
dra = LocallyLinearEmbedding(n_neighbors=len(bondTypeSaved) * VISUALIZE["neighbors_coef"],
n_components=components, max_iter=250, n_jobs=n_jobs, *args, **kwargs)
elif decomposeMethod == 'isomap':
dra = Isomap(n_neighbors=len(bondTypeSaved) * VISUALIZE["neighbors_coef"],
n_components=components, n_jobs=n_jobs, *args, **kwargs)
elif decomposeMethod == 'mds':
dra = MDS(n_neighbors=len(bondTypeSaved) * VISUALIZE["neighbors_coef"],
n_components=components, verbose=1, n_jobs=n_jobs, *args, **kwargs)
elif decomposeMethod == 't-sne' or decomposeMethod == 'tsne':
dra = TSNE(n_components=components, perplexity=VISUALIZE["tsne_perplexity"],
learning_rate=VISUALIZE["learning_rate"], n_jobs=n_jobs, *args, **kwargs)
else:
print("Default to Uniform Manifold Approximation and Projection (UMAP)")
dra = UMAP(n_neighbors=len(bondTypeSaved) * VISUALIZE["neighbors_coef"],
n_components=components, min_dist=VISUALIZE["min_dist"],
learning_rate=VISUALIZE["learning_rate"], *args, **kwargs)
print(dra)
return dra
def _labelsForVisualization_(self, visualizerMode: str, n_components: int, targetLabel: str) -> List[str]:
labels: List[str] = [str(self.getLabels(request="Info")[self._bondType])]
if visualizerMode in ["last"]:
labels.extend(self._getLabelsOfLastLayer_())
elif n_components == 3:
labels.extend([f"{unit}-axis" for unit in ["X", "Y", "Z"]])
else:
labels.extend([f"{unit}-axis" for unit in ["X", "Y"]])
labels.append(targetLabel)
return labels
def _ConcatenateData_(self, features: ndarray, target: ndarray, dropBonds: Optional[List[str]] = None) -> ndarray:
result: ndarray = np.concatenate((self.getData(request="Info")[:, self._bondType:self._bondType + 1],
features, target), axis=1)
if dropBonds is None:
return result
if len(dropBonds) >= 4: # Hash for faster search
dropBonds = set(dropBonds)
deleteLine: List[int] = []
bondType = result[:, 0].tolist()
for idx, value in enumerate(bondType):
if value in dropBonds:
deleteLine.append(idx)
return np.delete(result, obj=deleteLine, axis=0)
def _visualizeLastLayerMode_(self, target: ndarray, WeightsMultiplication: bool = False) -> Tuple[int, ndarray]:
print('[1]: Loading Last Layer')
data = self._ConcatenateData_(features=self.InterOutput, target=target)
if WeightsMultiplication and self._getLabelsOfLastLayer_() is not None:
weight: ndarray = self.TF_model.get_layer(name=self.TF_model.getLastLayerName()).get_weights()[0]
for col in range(data.shape[1] - 2):
data[:, col + 1:col + 2] *= weight[col, 0]
if data.shape[1] - 2 not in [2, 3]:
warning(" Bit2Edge structure may not be incompatible for all methods.")
return data.shape[1] - 2, data
def _visualizeFullFingerprintsMode_(self, target: ndarray, CModel: bool, decomposeMethod: str = 'UMAP',
components: int = 2, n_jobs: int = -1, *args, **kwargs) -> Tuple[int, ndarray]:
print("[#1]: Loading Connection // Significant Fingerprints")
model = self._buildVisualizer_(decomposeMethod, components, n_jobs, *args, **kwargs)
if CModel:
feature: ndarray = self.getData(request='CData')[:, :self._getCurrentCPosition_()[1][-1]]
else:
feature: ndarray = self.getData(request='SData')[:, :self._getCurrentSPosition_()[1][-1]]
return components, self._ConcatenateData_(features=model.fit_transform(feature), target=target)
def _visualizeSingleMode_(self, target: ndarray, CModel: bool, decomposeMethod: str = 'UMAP',
n_jobs: int = -1, *args, **kwargs) -> Tuple[int, ndarray]:
print("[#1]: Loading Connection // Significant Fingerprints")
if CModel:
input_: ndarray = self.getData(request='CData')
Starting, Ending = self._getCurrentCPosition_()
else:
input_: ndarray = self.getData(request='SData')
Starting, Ending = self._getCurrentSPosition_()
X, Y, Z = input_[:, Starting[0]:Ending[-3]], input_[:, Starting[-2]:Ending[-2]], \
input_[:, Starting[-1]:Ending[-1]]
array: ndarray = np.zeros(shape=(input_.shape[0], 3), dtype=np.float32)
for idx, feature in enumerate([X, Y, Z]):
model = self._buildVisualizer_(decomposeMethod, 1, n_jobs, *args, **kwargs)
array[:, idx:idx + 1] = model.fit_transform(feature)
return 3, self._ConcatenateData_(features=array, target=target)
def _visualizeReactionMode_(self, target: ndarray, CModel: bool, decomposeMethod: str = 'UMAP',
n_jobs: int = -1, *args, **kwargs) -> Tuple[int, ndarray]:
print("[#1]: Loading Connection // Significant Fingerprints")
if CModel:
input_: ndarray = self.getData(request='CData')
Starting, Ending = self._getCurrentCPosition_()
else:
input_: ndarray = self.getData(request='SData')
Starting, Ending = self._getCurrentSPosition_()
outer: ndarray = input_[:, Starting[0]:Ending[-3]]
key = input("Do you want to build summation operation (True/False): ")
if key.lower() in ["true", 't', 'yes', 'y']:
inner: ndarray = np.add(input_[:, Starting[-2]:Ending[-2]], input_[:, Starting[-1]:Ending[-1]])
gc.collect()
else:
inner: ndarray = input_[:, Starting[-2]:Ending[-1]]
array: ndarray = np.zeros(shape=(input_.shape[0], 2), dtype=np.float32)
for idx, feature in enumerate((outer, inner)):
model = self._buildVisualizer_(decomposeMethod, 1, n_jobs, *args, **kwargs)
array[:, idx:idx + 1] = model.fit_transform(feature)
return 2, self._ConcatenateData_(features=array, target=target)
def _visualizeSparseDenseMode_(self, target: ndarray, CModel: bool, densityMode: bool) -> Tuple[int, ndarray]:
print("[#1]: Loading Connection // Significant Density")
if self.BuiltDataFrame is None:
self.estimateDensity(output=None, sparsity=not densityMode, percentage=False, useMean=False)
else:
warning(" The density has been saved. Thus it would be used for visualizing instead of re-creating.")
print("Dimensionality Reduction Analysis Mode: "
"\nSingle: Decompose everything environment into one value"
"\nReaction: Decompose everything environment into one value, except radical environments"
"\nFull-Fingerprint: Decompose all fingerprints features into 2-value vectors at both models."
"\nFull-Struct: Decompose full vectors into 2-value vectors")
answer: str = input("Choose your dimensionality reduction analysis method: ").lower()
while answer not in ["single", "reaction", "full-fingerprint", "full-struct"]:
answer: str = input("Choose your dimensionality reduction analysis method: ").lower()
columns = self.BuiltDataFrame.columns
Starting: int = 0 if CModel else len(columns) // 2
Ending: int = len(columns) // 2 if CModel else len(columns)
size: int = self.getData(request="Info").shape[0]
if answer == "single" or answer == "reaction":
array: ndarray = np.zeros(shape=(size, 3 if answer == 'single' else 2), dtype=np.float32)
if self._dataset.getNumsInput() == 3:
array[:, 0] = self.BuiltDataFrame[columns[Starting]].values
else:
warning("Single matrix addition with four input may not result in robust representation")
array[:, 0] = (self.BuiltDataFrame[columns[Starting]].values +
self.BuiltDataFrame[columns[Starting + 1]].values) / 2
if answer == 'single':
array[:, 1] = self.BuiltDataFrame[columns[Ending - 4]].values
array[:, 2] = self.BuiltDataFrame[columns[Ending - 3]].values
else:
array[:, 1] = (self.BuiltDataFrame[columns[Ending - 4]].values +
self.BuiltDataFrame[columns[Ending - 3]].values) / 2
return array.shape[1], self._ConcatenateData_(features=array, target=target)
array: ndarray = np.zeros(shape=(size, 2), dtype=np.float32)
array[:, 0] = self.BuiltDataFrame[columns[len(columns) // 2 - 1]].values
if answer == "full-fingerprint":
CFullSize, CExtraSize = self.getData("CData").shape[1], self._dataset.getCurrentCLabelsInfo()[2]
CFpSize = CFullSize - CExtraSize
CDensityExtra = self.BuiltDataFrame[columns[len(columns) // 2 - 2]].values
array[:, 0] = (array[:, 0] * CFullSize - CDensityExtra * CExtraSize) / CFpSize
if self._dataset.getIsSharedInput():
array[:, 1] = array[:, 0]
else:
SFullSize, SExtraSize = self.getData("SData").shape[1], self._dataset.getCurrentSLabelsInfo()[2]
SFpSize = SFullSize - SExtraSize
SDensityExtra = self.BuiltDataFrame[columns[len(columns) - 2]].values
array[:, 1] = (array[:, 1] * SFullSize - SDensityExtra * SExtraSize) / SFpSize
return array.shape[1], self._ConcatenateData_(features=array, target=target)
def _visualizeFullInputMode_(self, target: ndarray, CModel: bool, decomposeMethod: str = 'UMAP',
components: int = 2, n_jobs: int = -1, *args, **kwargs) -> Tuple[int, ndarray]:
print("[#1]: Loading Connection // Significant Matrix")
model = self._buildVisualizer_(decomposeMethod, components, n_jobs, *args, **kwargs)
data: ndarray = model.fit_transform(self.getData("CData") if CModel else self.getData("SData"))
return components, self._ConcatenateData_(features=data, target=target)
@MeasureExecutionTime
def estimateDensity(self, output: str = None, sparsity: bool = False, percentage: bool = False,
dataType=np.float32, useMean: bool = False) -> pd.DataFrame:
"""
Implementation of estimate data density
:param output: The directory of file
:type output: str
:param sparsity: Whether to reverse density into sparsity
:type sparsity: bool
:param percentage: Whether to switch calculation into percentage mode (multiply with 100)
:type percentage: bool
:param useMean: Choosing the method to compute. If False, it calculate the density (!= 0) to calculate
:type useMean: bool or None
:param dataType: numpy datatype
:type dataType: bool
:return:
"""
# Hyper-parameter Verification
if True:
if not (self.isDataAvailable(request="CData") and self.isDataAvailable(request="SData")):
raise ValueError("No pre-defined value or information was found")
inputFullCheck(value=output, name="output", dtype='str-None', delimiter='-')
inputFullCheck(value=sparsity, name="sparsity", dtype='bool')
inputFullCheck(value=percentage, name="percentage", dtype='bool')
inputFullCheck(value=useMean, name="useMean", dtype='bool')
def MeanMethod(arr) -> ndarray:
return
|
np.mean(arr, axis=1, dtype=dataType)
|
numpy.mean
|
from copy import deepcopy
from scipy.sparse import diags
import numpy as np
from time import time
from yaglm.linalg_utils import leading_sval, euclid_norm
# TODO: handle matrix shaped parameters
# TODO: allow A1 and or A2 to be None for the identity
# TODO: allow g1 and or g2 to be None for zero
def solve(g1, g2, A1, A2,
primal_init=None, dual_init=None,
D_mat='diag',
rho=1,
rho_update=True,
atol=1e-4,
rtol=1e-4,
eta=2,
mu=10,
max_iter=1000,
tracking_level=0
):
"""
Uses ADMM algorithm described in Section 2.4 of (Zhu, 2017) to solve a problem of the form
min_theta g1(A_1 theta) + g2(A_2 theta)
We only need assume g1 and g2 have easy to evaluate proximal operators.
Parameters
----------
g1, g2: yaglm.opt.base.Func
The two functions that make up the obejctive. Both must implement conj_prox which evaluates the proxial operator of the conjugate funtion, which is easily obtained from the proximal operator of the original function via Moreau's identity.
A1, A2: array-like
The two matrices in the objective function. Both matrices must have same number of columns, d.
primal_init: None, array-like shape (d, )
Optinal initialization for primal variable.
dual_init: None, list list of of array-like.
Optional initialization for the dual variables.
The first list is the dual variables; the second list is the dual_bar variables.
The first dual variable has shape (n_row(A_2), ) and the second
has shape (n_row(A_2), ).
D_mat: str, yaglm.addm.addm.DMatrix
The D matrix. If str, must be one of ['prop_id', 'diag'].
If 'prop_id' then D will be ||A||_op * I_d.
If 'diag', then D will be the diagonal matrix whose ith element is given by sum_{j=1}^d |A^TA|_{ij}.
rho: float
The ADMM penalty parameter.
rho_update: bool
Whether or not to adpatively update the rho parameter.
atol, rtol: float
The absolute and relative stopping criteria.
eta: float
Amount to increase/decrease rho by.
mu: float
Parameter for deciding whether or not to increase rho. See (15) from (Zhu, 2017).
max_iter: int
Maximum number of iterations.
tracking_level: int
How much data to track.
Output
------
solution, admm_data, opt_info
solution: array-like
The solution.
admm_data: dict
Data related to ADMM e.g. the dual variables.
opt_info: dict
Opimization tracking data e.g. the dual/primal residual history.
References
----------
<NAME>., 2017. An augmented ADMM algorithm with application to the generalized lasso problem. Journal of Computational and Graphical Statistics, 26(1), pp.195-204.
"""
start_time = time()
# shape data
n_row_1 = A1.shape[0]
n_row_2 = A2.shape[0]
d = A1.shape[1]
assert A2.shape[1] == d
########################
# initialize variables #
########################
# primal = np.zeros(d)
# dual_1 = np.zeros(n_row_1)
# dual_1_bar = np.zeros(n_row_1)
# dual_2 = np.zeros(n_row_2)
# dual_2_bar = np.zeros(n_row_2)
if primal_init is None:
primal = np.zeros(d)
else:
primal = deepcopy(primal_init)
# dual variables
if dual_init is not None:
dual_1, dual_2 = dual_init[0]
dual_1_bar, dual_2_bar = dual_init[1]
else:
# technically this initializes from 0 and takes one ADMM step
dual_1 = g1.prox(rho * A1 @ primal, step=rho)
dual_2 = g2.prox(rho * A2 @ primal, step=rho)
dual_1_bar = 2 * dual_1
dual_2_bar = 2 * dual_2
# make sure we have correct shapes
assert dual_1.shape[0] == n_row_1
assert dual_2.shape[0] == n_row_2
dual_cat = np.concatenate([dual_1, dual_2])
dual_cat_prev = deepcopy(dual_cat)
#################################
# setup D matrix and A matrices #
#################################
if D_mat == 'prop_id':
D_mat = DMatrixPropId()
elif D_mat == 'diag':
D_mat = DMatrixDiag()
D_mat.setup(A1=A1, A2=A2)
# Other setup
# TODO: represent this lazily to avoid copying
# A = np.vstack([A1, A2])
A_mat = AMat(A1=A1, A2=A2)
##########################
# setup history tracking #
##########################
history = {}
if tracking_level >= 1:
history['primal_resid'] = []
history['dual_resid'] = []
history['rho'] = [rho]
history['primal_tol'] = []
history['dual_tol'] = []
if tracking_level >= 2:
history['primal'] = [primal]
for it in range(int(max_iter)):
# primal update
primal_new = primal - \
(1 / rho) * D_mat.inv_prod(A1.T @ dual_1_bar + A2.T @ dual_2_bar)
# update dual variables
dual_1_new = g1.conj_prox(rho * (A1 @ primal_new) + dual_1, step=rho)
dual_2_new = g2.conj_prox(rho * (A2 @ primal_new) + dual_2, step=rho)
dual_1_bar_new = 2 * dual_1_new - dual_1
dual_2_bar_new = 2 * dual_2_new - dual_2
# check stopping
dual_cat_new = np.concatenate([dual_1_new, dual_2_new])
primal_resid_norm = euclid_norm(dual_cat_new - dual_cat) / rho
dual_resid = rho * A_mat.AtA_prod(primal_new - primal) + \
A_mat.At_prod(2 * dual_cat - dual_cat_prev - dual_cat_new)
dual_resid_norm = euclid_norm(dual_resid)
# check stopping criteria
# TODO: the relative part is not quite right, but I can't quite tell what it should be from the paper. Probably need to stare at it longer
primal_tol = np.sqrt(A_mat.shape[0]) * atol + \
rtol * euclid_norm(A_mat.A_prod(primal))
dual_tol = np.sqrt(A_mat.shape[1]) * atol + \
rtol * euclid_norm(A_mat.At_prod(dual_cat))
# possibly track history
if tracking_level >= 1:
history['primal_resid'].append(primal_resid_norm)
history['dual_resid'].append(dual_resid_norm)
history['rho'].append(rho)
# TODO: do we want to track these?
history['primal_tol'].append(primal_tol)
history['dual_tol'].append(dual_tol)
if tracking_level >= 2:
history['primal'].append(primal_new)
if primal_resid_norm <= primal_tol and dual_resid_norm <= dual_tol:
break
# update variables if not stopping
primal = primal_new
dual_1 = dual_1_new
dual_2 = dual_2_new
dual_cat_prev = deepcopy(dual_cat)
dual_cat = dual_cat_new
dual_1_bar = dual_1_bar_new
dual_2_bar = dual_2_bar_new
# update rho
# TODO: dont do every iteration
if rho_update:
primal_ratio = (primal_resid_norm / primal_tol)
dual_ratio = (dual_resid_norm / dual_tol)
if primal_ratio >= mu * dual_ratio:
rho *= eta
elif dual_ratio >= mu * primal_ratio:
rho /= eta
#################
# Format output #
#################
# optimizaton data
opt_info = {'iter': it,
'runtime': time() - start_time,
'history': history
}
# other data
admm_data = {'dual_vars': [[dual_1_new, dual_2_new],
[dual_1_bar_new, dual_2_bar_new]],
'rho': rho,
'D_mat': D_mat
}
return primal_new, admm_data, opt_info
def solve_path(prob_data_iter, **kws):
"""
An iterator that computes the solution path over a sequence of problems using warm starts.
Parameters
----------
prob_data_iter: iterator
Iterator yielding the sequence of problems to solve.
Each element should be the tuple (g1, g2, A1, A2).
**kws:
Keyword arguments to yaglm.opt.zhu_admm.solve
Output
------
soln, opt_data, admm_data
"""
for (g1, g2, A1, A2) in prob_data_iter:
soln, opt_data, admm_data = solve(g1=g1, g2=g2, A1=A1, A2=A2,
**kws)
yield soln, opt_data, admm_data
kws['dual_init'] = admm_data['dual_vars']
kws['rho'] = admm_data['rho']
kws['D_mat'] = admm_data['D_mat']
kws['primal_init'] = soln
class DMatrix:
def setup(self, A1, A2):
"""
Sets up the D matrix from the A1, A2 matrices.
Parameters
----------
A1, A2: array-like
The two matrices in the objective function.
Output
------
self
"""
pass
def inv_prod(self, v):
"""
Computes the product D_mat^{-1} @ v
Parameters
----------
v: array-like, shape (d, )
The vector to mulitply by.
Output
------
D^{-1} v
"""
pass
class DMatrixPropId(DMatrix):
"""
Represents the matrix ||A||_op^2 * I_d
"""
def setup(self, A1, A2):
"""
Sets up the D matrix from the A1, A2 matrices.
Parameters
----------
A1, A2: array-like
The two matrices in the objective function.
Output
------
self
"""
AtA = A1.T @ A1 + A2.T @ A2
self.sval_sq = leading_sval(AtA) ** 2
# self.val = leading_sval(A1) + leading_sval(A2)
def inv_prod(self, v):
"""
Computes the product D_mat^{-1} @ v
Parameters
----------
v: array-like, shape (d, )
The vector to mulitply by.
Output
------
D^{-1} v
"""
return v / self.sval_sq
class DMatrixDiag(DMatrix):
"""
Represents the diagonal matrix whose diagonal elements are given by sum_{j=1}^d |A^TA|_{ij}.
"""
def setup(self, A1, A2):
"""
Sets up the D matrix from the A1, A2 matrices.
Parameters
----------
A1, A2: array-like
The two matrices in the objective function.
Output
------
self
"""
AtA = A1.T @ A1 + A2.T @ A2
row_sums = abs(AtA).sum(axis=1)
row_sums = np.array(row_sums).reshape(-1) # annoying issue with sparse matrices
self.diag_mat_inv = diags(1 / row_sums)
def inv_prod(self, v):
"""
Computes the product D_mat^{-1} @ v
Parameters
----------
v: array-like, shape (d, )
The vector to mulitply by.
Output
------
D^{-1} v
"""
return self.diag_mat_inv @ v
class DMatrixAtA(DMatrix):
"""
D = A.T @ A
"""
def setup(self, A1, A2):
"""
Sets up the D matrix from the A1, A2 matrices.
Parameters
----------
A1, A2: array-like
The two matrices in the objective function.
Output
------
self
"""
A = np.vstack([A1, A2])
self.AtA_inv =
|
np.linalg.pinv(A.T @ A)
|
numpy.linalg.pinv
|
import random
import numpy as np
from random import randrange, gauss
import note_seq
from note_seq.sequences_lib import (
stretch_note_sequence,
transpose_note_sequence,
NegativeTimeError
)
def train_test_split(dataset, split=0.90):
train = list()
train_size = split * len(dataset)
dataset_copy = list(dataset)
while len(train) < train_size:
index = randrange(len(dataset_copy))
train.append(dataset_copy.pop(index))
return train, dataset_copy
def load_seq_files(files):
res = []
for fname in files:
with open(fname, 'rb') as f:
ns = note_seq.NoteSequence()
ns.ParseFromString(f.read())
res.append(ns)
return res
class Data:
def __init__(self, sequences, midi_encoder, token_eos, pad_token):
self.token_eos = token_eos
self.pad_token = pad_token
self.sequences = sequences
self.midi_encoder = midi_encoder
def __len__(self):
return sum(len(s) for s in self.sequences.values())
def batch(self, batch_size, length, mode='train'):
batch_data = [
self._get_seq(seq, length, mode)
for seq in random.sample(self.sequences[mode], k=batch_size)
]
return np.array(batch_data) # batch_size, seq_len
def slide_seq2seq_batch(self, batch_size, length, mode='train'):
data = self.batch(batch_size, length + 1, mode)
assert(data.shape == (batch_size, length + 1))
x = data[:, :-1]
y = data[:, 1:]
return x, y
def augment(self, ns):
stretch_factor = gauss(1.0, 0.5)
velocity_factor = gauss(1.0, 0.2)
transpose = randrange(-5, 7)
ns = stretch_note_sequence(ns, stretch_factor)
for note in ns.notes:
note.velocity = max(1, min(127, int(note.velocity * velocity_factor)))
return transpose_note_sequence(ns, transpose, in_place=True)[0]
def _get_seq(self, ns, max_length, mode):
if mode == 'train':
try:
data = self.midi_encoder.encode_note_sequence(self.augment(ns))
except NegativeTimeError:
data = self.midi_encoder.encode_note_sequence(ns)
else:
data = self.midi_encoder.encode_note_sequence(ns)
if len(data) > max_length:
start = random.randrange(0, len(data) - max_length)
data = data[start:start + max_length]
elif len(data) < max_length:
data =
|
np.append(data, self.token_eos)
|
numpy.append
|
"""
Implementation of a class for the analysis of hyperfine structure spectra.
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import copy
from fractions import Fraction
import lmfit as lm
from satlas.models.basemodel import BaseModel, SATLASParameters
from satlas.models.summodel import SumModel
from satlas.loglikelihood import poisson_llh
from satlas.utilities import poisson_interval
import satlas.profiles as p
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.optimize as optimize
from sympy.physics.wigner import wigner_6j, wigner_3j
W6J = wigner_6j
W3J = wigner_3j
__all__ = ['HFSModel']
class HFSModel(BaseModel):
r"""Constructs a HFS spectrum, consisting of different
peaks described by a certain profile. The number of peaks is governed by
the nuclear spin and the atomic spins of the levels."""
__shapes__ = {'gaussian': p.Gaussian,
'lorentzian': p.Lorentzian,
'crystalball': p.Crystalball,
'voigt': p.Voigt,
'pseudovoigt': p.PseudoVoigt,
'asymmlorentzian': p.AsymmLorentzian}
def __init__(self, I, J, ABC, centroid, fwhm=[50.0, 50.0], scale=1.0, background_params=[0.001], shape='voigt', use_racah=False, use_saturation=False, saturation=0.001, shared_fwhm=True, sidepeak_params={'N': 0, 'Poisson': 0.68, 'Offset': 0}, crystalballparams={'Taillocation': 1, 'Tailamplitude': 1}, pseudovoigtparams={'Eta': 0.5, 'A': 0}, asymmetryparams={'a': 0}):
"""Builds the HFS with the given atomic and nuclear information.
Parameters
----------
I: float
The nuclear spin.
J: list of 2 floats
The spins of the fine structure levels.
ABC: list of 6 floats
The hyperfine structure constants A, B and C for ground- and excited
fine level. The list should be given as [A :sub:`lower`,
A :sub:`upper`, B :sub:`lower`, B :sub:`upper`, C :sub:`upper`,
C :sub:`lower`].
centroid: float
Centroid of the spectrum.
Other parameters
----------------
fwhm: float or list of 2 floats, optional
Depending on the used shape, the FWHM is defined by one or two floats.
Defaults to [50.0, 50.0]
scale: float, optional
Sets the strength of the spectrum, defaults to 1.0. Comparable to the
amplitude of the spectrum.
background_params: list of float, optional
Sets the coefficients of the polynomial background to the given values.
Order of polynomial is equal to the number of parameters given minus one.
Highest order coefficient is the first element, etc.
shape : string, optional
Sets the transition shape. String is converted to lowercase. For
possible values, see *HFSModel__shapes__*.keys()`.
Defaults to Voigt if an incorrect value is supplied.
use_racah: boolean, optional
If True, fixes the relative peak intensities to the Racah intensities.
Otherwise, gives them equal intensities and allows them to vary during
fitting.
use_saturation: boolean, optional
If True, uses the saturation parameter to calculate relative intensities.
saturation: float, optional
If different than 0, calculate the saturation effect on the intensity of
transition intensity. This is done by an exponential transition between
Racah intensities and the saturated intensities.
shared_fwhm: boolean, optional
If True, the same FWHM is used for all peaks. Otherwise, give them all
the same initial FWHM and let them vary during the fitting.
sidepeak_params: dict
A dictionary with the following keys and values:
n: int
Sets the number of sidepeaks that are present in the spectrum.
Defaults to 0.
poisson: float
Sets the relative intensity of the first side peak. The intensity of the
other sidepeaks is calculated from the Poisson-factor.
offset: float
Sets the distance (in MHz) of each sidepeak in the spectrum.
crystalballparams: dict
A dictionary with the following keys and values:
tailamp: float
Sets the relative amplitude of the tail for the Crystalball shape function.
tailloc: float
Sets the location of the tail for the Crystalball shape function.
pseudovoigtparams: dict
A dictionary with the following keys and values:
Eta: float between 0 and 1
Describes the mixing percentage of the Gaussian and Lorentzian shapes
A: float
Describes the asymmetry of the peak.
Note
----
The list of parameter keys is:
* *FWHM* (only for profiles with one float for the FWHM)
* *FWHMG* (only for profiles with two floats for the FWHM)
* *FWHML* (only for profiles with two floats for the FWHM)
* *Al*
* *Au*
* *Bl*
* *Bu*
* *Cl*
* *Cu*
* *Centroid*
* *Background*
* *Poisson* (only if the attribute *n* is greater than 0)
* *Offset* (only if the attribute *n* is greater than 0)
* *Amp* (with the correct labeling of the transition)
* *scale*"""
super(HFSModel, self).__init__()
shape = shape.lower()
if shape not in self.__shapes__:
print("""Given profile shape not yet supported.
Defaulting to Voigt lineshape.""")
shape = 'voigt'
fwhm = [50.0, 50.0]
self.I_value = {0.0: ((False, 0), (False, 0), (False, 0),
(False, 0), (False, 0), (False, 0)),
0.5: ((True, 1), (True, 1),
(False, 0), (False, 0), (False, 0), (False, 0)),
1.0: ((True, 1), (True, 1),
(True, 1), (True, 1),
(False, 0), (False, 0))
}
self.J_lower_value = {0.0: ((False, 0), (False, 0), (False, 0)),
0.5: ((True, 1),
(False, 0), (False, 0)),
1.0: ((True, 1),
(True, 1), (False, 0))
}
self.J_upper_value = {0.0: ((False, 0), (False, 0), (False, 0)),
0.5: ((True, 1),
(False, 0), (False, 0)),
1.0: ((True, 1),
(True, 1), (False, 0))
}
self.shape = shape
self._use_racah = use_racah
self._use_saturation = use_saturation
self.shared_fwhm = shared_fwhm
self.I = I
self.J = J
self._calculate_F_levels()
self._calculate_energy_coefficients()
self._calculate_transitions()
self._vary = {}
self._constraints = {}
self.ratioA = (None, 'lower')
self.ratioB = (None, 'lower')
self.ratioC = (None, 'lower')
self._roi = (-np.inf, np.inf)
self._populateparams(ABC, centroid, fwhm, scale, saturation,
background_params,
sidepeak_params,
crystalballparams,
pseudovoigtparams,
asymmetryparams)
@property
def locations(self):
"""Contains the locations of the peaks."""
return self._locations
@locations.setter
def locations(self, locations):
self._locations = np.array(locations)
for p, l in zip(self.parts, locations):
p.mu = l
@property
def use_racah(self):
"""Boolean to set the behaviour to Racah intensities (True)
or to individual amplitudes (False)."""
return self._use_racah
@use_racah.setter
def use_racah(self, value):
self._use_racah = value
self.params['Scale'].vary = self._use_racah or self._use_saturation
for label in self.ftof:
self.params['Amp' + label].vary = not (self._use_racah or self._use_saturation)
@property
def use_saturation(self):
"""Boolean to set the behaviour to the saturation model (True)
or not (False)."""
return self._use_saturation
@use_saturation.setter
def use_saturation(self, value):
self._use_saturation = value
self.params['Saturation'].vary = value
self.params['Scale'].vary = self._use_racah or self._use_saturation
for label in self.ftof:
self.params['Amp' + label].vary = not (self._use_racah or self._use_saturation)
@property
def params(self):
"""Instance of lmfit.Parameters object characterizing the
shape of the HFS."""
return self._parameters
@params.setter
def params(self, params):
p = params.copy()
p._prefix = self._prefix
self._parameters = self._check_variation(p)
# self._parameters.pretty_print()
# print(self._prefix)
# When changing the parameters, the energies and
# the locations have to be recalculated
self._calculate_energies()
self._calculate_transition_locations()
if not self.use_racah and not self.use_saturation:
# When not using set amplitudes, they need
# to be changed after every iteration
self._set_amplitudes()
elif self.use_saturation:
self._set_transitional_amplitudes()
else:
pass
# Finally, the fwhm of each peak needs to be set
self._set_fwhm()
if self.shape.lower() == 'crystalball':
for part in self.parts:
part.alpha = self.params['Taillocation'].value
part.n = self.params['Tailamplitude'].value
if self.shape.lower() == 'pseudovoigt':
for label, part in zip(self.ftof, self.parts):
if self.shared_fwhm:
part.n = self.params['Eta'].value
part.a = self.params['Asym'].value
else:
part.n = self.params['Eta'+label].value
part.a = self.params['Asym'+label].value
elif self.shape.lower() == 'asymmlorentzian':
for label, part in zip(self.ftof, self.parts):
if self.shared_fwhm:
part.asymm = self.params['Asym'].value
else:
part.asymm = self.params['Asym'+label].value
def _set_transitional_amplitudes(self):
values = self._calculate_transitional_intensities(self.params['Saturation'].value)
for p, l, v in zip(self.parts, self.ftof, values):
self.params['Amp' + l].value = v
p.amp = v
@property
def ftof(self):
"""List of transition labels, of the form *Flow__Fhigh* (half-integers
have an underscore instead of a division sign), same ordering
as given by the attribute :attr:`.locations`."""
return self._ftof
@ftof.setter
def ftof(self, value):
self._ftof = value
def _calculate_energies(self):
r"""The hyperfine addition to a central frequency (attribute *centroid*)
for a specific level is calculated. The formula comes from
:cite:`Schwartz1955` and in a simplified form, reads
.. math::
C_F &= F(F+1) - I(I+1) - J(J+1)
D_F &= \frac{3 C_F (C_F + 1) - 4 I (I + 1) J (J + 1)}{2 I (2 I - 1)
J (2 J - 1)}
E_F &= \frac{10 (\frac{C_F}{2})^3 + 20(\frac{C_F}{2})^2 + C_F(-3I(I
+ 1)J(J + 1) + I(I + 1) + J(J + 1) + 3) - 5I(I + 1)J(J + 1)}{I(I -
1)(2I - 1)J(J - 1)(2J - 1)}
E &= centroid + \frac{A C_F}{2} + \frac{B D_F}{4} + C E_F
A, B and C are the dipole, quadrupole and octupole hyperfine
parameters. Octupole contributions are calculated when both the
nuclear and electronic spin is greater than 1, quadrupole contributions
when they are greater than 1/2, and dipole contributions when they are
greater than 0.
Parameters
----------
level: int, 0 or 1
Integer referring to the lower (0) level, or the upper (1) level.
F: integer or half-integer
F-quantum number for which the hyperfine-corrected energy has to be
calculated.
Returns
-------
energy: float
Energy in MHz."""
A = np.append(np.ones(self.num_lower) * self.params['Al'].value,
np.ones(self.num_upper) * self.params['Au'].value)
B = np.append(np.ones(self.num_lower) * self.params['Bl'].value,
np.ones(self.num_upper) * self.params['Bu'].value)
C = np.append(np.ones(self.num_lower) * self.params['Cl'].value,
np.ones(self.num_upper) * self.params['Cu'].value)
centr = np.append(np.zeros(self.num_lower),
np.ones(self.num_upper) * self.params['Centroid'].value)
self.energies = centr + self.C * A + self.D * B + self.E * C
def _calculate_transition_locations(self):
self.locations = [self.energies[ind_high] - self.energies[ind_low] for (ind_low, ind_high) in self.transition_indices]
def _set_amplitudes(self):
for p, label in zip(self.parts, self.ftof):
p.amp = self.params['Amp' + label].value
def _set_fwhm(self):
if self.shape.lower() == 'voigt':
fwhm = [[self.params['FWHMG'].value, self.params['FWHML'].value] for _ in self.ftof] if self.shared_fwhm else [[self.params['FWHMG' + label].value, self.params['FWHML' + label].value] for label in self.ftof]
else:
fwhm = [self.params['FWHM'].value for _ in self.ftof] if self.shared_fwhm else [self.params['FWHM' + label].value for label in self.ftof]
for p, f in zip(self.parts, fwhm):
p.fwhm = f
####################################
# INITIALIZATION METHODS #
####################################
def _populateparams(self, ABC, centroid, fwhm, scale, saturation, background_params, sidepeak_params, crystalballparams, pseudovoigtparams, asymmetryparams):
# Prepares the params attribute with the initial values
par = SATLASParameters()
if not self.shape.lower() == 'voigt':
if self.shared_fwhm:
par.add('FWHM', value=fwhm, vary=True, min=0)
if self.shape.lower() == 'pseudovoigt':
Eta = pseudovoigtparams['Eta']
A = pseudovoigtparams['A']
par.add('Eta', value=Eta, vary=True, min=0, max=1)
par.add('Asym', value=A, vary=True)
if self.shape.lower() == 'asymmlorentzian':
par.add('Asym', value=asymmetryparams['a'], vary=True)
else:
fwhm = [fwhm for _ in range(len(self.ftof))]
for label, val in zip(self.ftof, fwhm):
par.add('FWHM' + label, value=val, vary=True, min=0)
if self.shape.lower() == 'pseudovoigt':
Eta = pseudovoigtparams['Eta']
A = pseudovoigtparams['A']
par.add('Eta' + label, value=Eta, vary=True, min=0, max=1)
par.add('Asym' + label, value=A, vary=True)
if self.shape.lower() == 'asymmlorentzian':
par.add('Asym' + label, value=asymmetryparams['a'], vary=True)
else:
if self.shared_fwhm:
par.add('FWHMG', value=fwhm[0], vary=True, min=1)
par.add('FWHML', value=fwhm[1], vary=True, min=1)
val = 0.5346 * fwhm[1] + np.sqrt(0.2166 * fwhm[1] ** 2 + fwhm[0] ** 2)
par.add('TotalFWHM', value=val, vary=False,
expr='0.5346*FWHML+(0.2166*FWHML**2+FWHMG**2)**0.5')
else:
fwhm = np.array(fwhm)
fwhm = np.array([[fwhm[0], fwhm[1]] for _ in range(len(self.ftof))])
for label, val in zip(self.ftof, fwhm):
par.add('FWHMG' + label, value=val[0], vary=True, min=0)
par.add('FWHML' + label, value=val[1], vary=True, min=0)
val = 0.5346 * val[1] + np.sqrt(0.2166 * val[1] ** 2
+ val[0] ** 2)
par.add('TotalFWHM' + label, value=val, vary=False,
expr='0.5346*FWHML' + label +
'+(0.2166*FWHML' + label +
'**2+FWHMG' + label + '**2)**0.5')
if self.shape.lower() == 'crystalball':
taillocation = crystalballparams['Taillocation']
tailamplitude = crystalballparams['Tailamplitude']
par.add('Taillocation', value=taillocation, vary=True)
par.add('Tailamplitude', value=tailamplitude, vary=True)
for part in self.parts:
part.alpha = taillocation
part.n = tailamplitude
par.add('Scale', value=scale, vary=self.use_racah or self.use_saturation, min=0)
par.add('Saturation', value=saturation * self.use_saturation, vary=self.use_saturation, min=0)
amps = self._calculate_transitional_intensities(saturation)
for label, amp in zip(self.ftof, amps):
label = 'Amp' + label
par.add(label, value=amp, vary=not (self.use_racah or self.use_saturation), min=0)
par.add('Al', value=ABC[0], vary=True)
par.add('Au', value=ABC[1], vary=True)
par.add('Bl', value=ABC[2], vary=True)
par.add('Bu', value=ABC[3], vary=True)
par.add('Cl', value=ABC[4], vary=True)
par.add('Cu', value=ABC[5], vary=True)
ratios = (self.ratioA, self.ratioB, self.ratioC)
labels = (('Al', 'Au'), ('Bl', 'Bu'), ('Cl', 'Cu'))
for r, (l, u) in zip(ratios, labels):
if r[0] is not None:
if r[1].lower() == 'lower':
fixed, free = l, u
else:
fixed, free = u, l
par[fixed].expr = str(r[0]) + '*' + free
par[fixed].vary = False
par.add('Centroid', value=centroid, vary=True)
for i, val in enumerate(reversed(background_params)):
par.add('Background' + str(i), value=background_params[i], vary=True)
self.background_degree = i
n, poisson, offset = sidepeak_params['N'], sidepeak_params['Poisson'], sidepeak_params['Offset']
par.add('N', value=n, vary=False)
if n > 0:
par.add('Poisson', value=poisson, vary=True, min=0, max=1)
par.add('Offset', value=offset, vary=False, min=None, max=None)
self.params = self._check_variation(par)
def _set_ratios(self, par):
# Process the set ratio's for the hyperfine parameters.
ratios = (self.ratioA, self.ratioB, self.ratioC)
labels = (('Al', 'Au'), ('Bl', 'Bu'), ('Cl', 'Cu'))
for r, (l, u) in zip(ratios, labels):
if r[0] is not None:
if r[1].lower() == 'lower':
fixed, free = l, u
else:
fixed, free = u, l
par[fixed].expr = str(r[0]) + '*' + free
par[fixed].vary = False
par[free].vary = True
return par
def _check_variation(self, par):
par = super(HFSModel, self)._check_variation(par)
# Make sure the variations in the params are set correctly.
for key in self._vary.keys():
if key in par.keys():
par[key].vary = self._vary[key]
par['N'].vary = False
if self.I in self.I_value:
Al, Au, Bl, Bu, Cl, Cu = self.I_value[self.I]
if not Al[0]:
par['Al'].vary, par['Al'].value = Al
if not Au[0]:
par['Au'].vary, par['Au'].value = Au
if not Bl[0]:
par['Bl'].vary, par['Bl'].value = Bl
if not Bu[0]:
par['Bu'].vary, par['Bu'].value = Bu
if not Cl[0]:
par['Cl'].vary, par['Cl'].value = Cl
if not Cu[0]:
par['Cu'].vary, par['Cu'].value = Cu
if self.J[0] in self.J_lower_value:
Al, Bl, Cl = self.J_lower_value[self.J[0]]
if not Al[0]:
par['Al'].vary, par['Al'].value = Al
if not Bl[0]:
par['Bl'].vary, par['Bl'].value = Bl
if not Cl[0]:
par['Cl'].vary, par['Cl'].value = Cl
if self.J[self.num_lower] in self.J_upper_value:
Au, Bu, Cu = self.J_upper_value[self.J[self.num_lower]]
if not Au[0]:
par['Au'].vary, par['Au'].value = Au
if not Bu[0]:
par['Bu'].vary, par['Bu'].value = Bu
if not Cu[0]:
par['Cu'].vary, par['Cu'].value = Cu
for key in self._constraints.keys():
for bound in self._constraints[key]:
if bound.lower() == 'min':
par[key].min = self._constraints[key][bound]
elif bound.lower() == 'max':
par[key].max = self._constraints[key][bound]
else:
pass
return par
def _calculate_F_levels(self):
F1 = np.arange(abs(self.I - self.J[0]), self.I+self.J[0]+1, 1)
self.num_lower = len(F1)
F2 = np.arange(abs(self.I - self.J[1]), self.I+self.J[1]+1, 1)
self.num_upper = len(F2)
F = np.append(F1, F2)
self.J = np.append(np.ones(len(F1)) * self.J[0],
np.ones(len(F2)) * self.J[1])
self.F = F
def _calculate_transitions(self):
f_f = []
indices = []
amps = []
sat_amp = []
for i, F1 in enumerate(self.F[:self.num_lower]):
for j, F2 in enumerate(self.F[self.num_lower:]):
if abs(F2 - F1) <= 1 and not F2 == F1 == 0.0:
sat_amp.append(2*F1+1)
j += self.num_lower
intensity = self._calculate_racah_intensity(self.J[i],
self.J[j],
self.F[i],
self.F[j])
if intensity > 0:
amps.append(intensity)
indices.append([i, j])
s = ''
temp = Fraction(F1).limit_denominator()
if temp.denominator == 1:
s += str(temp.numerator)
else:
s += str(temp.numerator) + '_' + str(temp.denominator)
s += '__'
temp = Fraction(F2).limit_denominator()
if temp.denominator == 1:
s += str(temp.numerator)
else:
s += str(temp.numerator) + '_' + str(temp.denominator)
f_f.append(s)
self.ftof = f_f # Stores the labels of all transitions, in order
self.transition_indices = indices # Stores the indices in the F and energy arrays for the transition
self.racah_amplitudes = np.array(amps) # Sets the initial amplitudes to the Racah intensities
self.racah_amplitudes = self.racah_amplitudes / self.racah_amplitudes.max()
self.saturated_amplitudes = np.array(sat_amp)
self.saturated_amplitudes = self.saturated_amplitudes / self.saturated_amplitudes.max()
self.parts = tuple(self.__shapes__[self.shape](amp=a) for a in self.racah_amplitudes)
def _calculate_transitional_intensities(self, s):
if s <= 0:
return self.racah_amplitudes
else:
sat = self.saturated_amplitudes
rac = self.racah_amplitudes
transitional = -sat*np.expm1(-rac * s / sat)
return transitional / transitional.max()
def _calculate_racah_intensity(self, J1, J2, F1, F2, order=1.0):
return float((2 * F1 + 1) * (2 * F2 + 1) * \
W6J(J2, F2, self.I, F1, J1, order) ** 2) # DO NOT REMOVE CAST TO FLOAT!!!
def _calculate_energy_coefficients(self):
# Since I, J and F do not change, these factors can be calculated once
# and then stored.
I, J, F = self.I, self.J, self.F
C = (F*(F+1) - I*(I+1) - J*(J + 1)) * (J/J) if I > 0 else 0 * J #*(J/J) is a dirty trick to avoid checking for J=0
D = (3*C*(C+1) - 4*I*(I+1)*J*(J+1)) / (2*I*(2*I-1)*J*(2*J-1))
E = (10*(0.5*C)**3 + 20*(0.5*C)**2 + C*(-3*I*(I+1)*J*(J+1) + I*(I+1) + J*(J+1) + 3) - 5*I*(I+1)*J*(J+1)) / (I*(I-1)*(2*I-1)*J*(J-1)*(2*J-1))
C = np.where(np.isfinite(C), 0.5 * C, 0)
D = np.where(np.isfinite(D), 0.25 * D, 0)
E = np.where(np.isfinite(E), E, 0)
self.C, self.D, self.E = C, D, E
##########################
# USER METHODS #
##########################
def fix_ratio(self, value, target='upper', parameter='A'):
"""Fixes the ratio for a given hyperfine parameter to the given value.
Parameters
----------
value: float
Value to which the ratio is set
target: {'upper', 'lower'}
Sets the target level. If 'upper', the upper parameter is
calculated as lower * ratio, 'lower' calculates the lower
parameter as upper * ratio.
parameter: {'A', 'B', 'C'}
Selects which hyperfine parameter to set the ratio for."""
if target.lower() not in ['lower', 'upper']:
raise KeyError("Target must be 'lower' or 'upper'.")
if parameter.lower() not in ['a', 'b', 'c']:
raise KeyError("Parameter must be 'A', 'B' or 'C'.")
if parameter.lower() == 'a':
self.ratioA = (value, target)
if parameter.lower() == 'b':
self.ratioB = (value, target)
if parameter.lower() == 'c':
self.ratioC = (value, target)
self.params = self._set_ratios(self.params)
###########################
# MAGIC METHODS #
###########################
def __call__(self, x):
"""Get the response for frequency *x* (in MHz) of the spectrum.
Parameters
----------
x : float or array_like
Frequency in MHz
Returns
-------
float or NumPy array
Response of the spectrum for each value of *x*."""
if self.params['N'].value > 0:
s = np.zeros(x.shape)
for i in range(self.params['N'].value + 1):
s += (self.params['Poisson'].value ** i) * (sum([prof(x - i * self.params['Offset'].value)
for prof in self.parts])) / np.math.factorial(i)
s *= self.params['Scale'].value
else:
s = self.params['Scale'].value * sum([prof(x) for prof in self.parts])
# background_params = [self.params[par_name].value for par_name in self.params if par_name.startswith('Background')]
background_params = [self.params['Background' + str(int(deg))].value for deg in reversed(list(range(self.background_degree + 1)))]
return s + np.polyval(background_params, x)
###############################
# PLOTTING ROUTINES #
###############################
def plot(self, x=None, y=None, yerr=None,
no_of_points=10**3, ax=None, show=True, plot_kws={}):
"""Plot the hfs, possibly on top of experimental data.
Parameters
----------
x: array
Experimental x-data. If None, a suitable region around
the peaks is chosen to plot the hfs.
y: array
Experimental y-data.
yerr: array or dict('high': array, 'low': array)
Experimental errors on y.
no_of_points: int
Number of points to use for the plot of the hfs if
experimental data is given.
ax: matplotlib axes object
If provided, plots on this axis.
show: boolean
If True, the plot will be shown at the end.
plot_kws: dictionary
A dictionary possibly containing the following entries:
legend: string, optional
If given, an entry in the legend will be made for the spectrum.
data_legend: string, optional
If given, an entry in the legend will be made for the experimental
data.
xlabel: string, optional
If given, sets the xlabel to this string. Defaults to 'Frequency (MHz)'.
ylabel: string, optional
If given, sets the ylabel to this string. Defaults to 'Counts'.
model: boolean, optional
If given, the region around the fitted line will be shaded, with
the luminosity indicating the pmf of the Poisson
distribution characterized by the value of the fit. Note that
the argument *yerr* is ignored if *model* is True.
normalized: boolean, optional
If True, the data and fit are plotted normalized such that the highest
data point is one.
background: boolean, optional
If True, the background is used, otherwise the pure spectrum is plotted.
Returns
-------
fig, ax: matplotlib figure and axis
Figure and axis used for the plotting."""
kws = copy.deepcopy(plot_kws)
legend = kws.pop('legend', None,)
data_legend = kws.pop('data_legend', None)
xlabel = kws.pop('xlabel', 'Frequency (MHz)')
ylabel = kws.pop('ylabel', 'Counts',)
indicate = kws.pop('indicate', False)
model = kws.pop('model', False)
colormap = kws.pop('colormap', 'bone_r',)
normalized = kws.pop('normalized', False)
distance = kws.pop('distance', 4)
background = kws.pop('background', True)
if ax is None:
fig, ax = plt.subplots(1, 1)
else:
fig = ax.get_figure()
toReturn = fig, ax
color_points = next(ax._get_lines.prop_cycler)['color']
color_lines = next(ax._get_lines.prop_cycler)['color']
if x is None:
ranges = []
fwhm = self.parts[0].fwhm
for pos in self.locations:
r = np.linspace(pos - distance * fwhm,
pos + distance * fwhm,
2 * 10**2*0+50)
ranges.append(r)
superx = np.sort(np.concatenate(ranges))
else:
superx = np.linspace(x.min(), x.max(), int(no_of_points))
if 'sigma_x' in self.params:
xerr = self.params['sigma_x'].value
else:
xerr = 0
if normalized:
norm = np.max(y)
y,yerr = y/norm,yerr/norm
else:
norm = 1
if x is not None and y is not None:
if not model:
try:
ax.errorbar(x, y, yerr=[yerr['low'], yerr['high']],
xerr=xerr, fmt='o', label=data_legend, color=color_points)
except:
ax.errorbar(x, y, yerr=yerr, fmt='o', label=data_legend, color=color_points)
else:
ax.plot(x, y, 'o', color=color_points)
if model:
superx = np.linspace(superx.min(), superx.max(), len(superx))
range = (self.locations.min(), self.locations.max())
if range[0] == range[1]:
max_counts = self(range[0])
else:
max_counts = np.ceil(-optimize.brute(lambda x: -self(x), (range,), full_output=True, Ns=1000, finish=optimize.fmin)[1])
min_counts = [self.params[par_name].value for par_name in self.params if par_name.startswith('Background')][-1]
min_counts = np.floor(max(0, min_counts - 3 * min_counts ** 0.5))
y = np.arange(min_counts, max_counts + 3 * max_counts ** 0.5 + 1)
x, y = np.meshgrid(superx, y)
from scipy import stats
z = stats.poisson(self(x)).pmf(y)
z = z / z.sum(axis=0)
ax.imshow(z, extent=(x.min(), x.max(), y.min(), y.max()), cmap=plt.get_cmap(colormap))
line, = ax.plot(superx, self(superx) / norm, label=legend, lw=0.5, color=color_lines)
else:
if background:
y = self(superx)
else:
background_params = [self.params[par_name].value for par_name in self.params if par_name.startswith('Background')]
y = self(superx) - np.polyval(background_params, superx)
line, = ax.plot(superx, y, label=legend, color=color_lines)
ax.set_xlim(superx.min(), superx.max())
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if show:
plt.show()
return toReturn
def plot_spectroscopic(self, **kwargs):
"""Plots the hfs on top of experimental data
with errorbar given by the square root of the data.
Parameters
----------
x: array
Experimental x-data. If None, a suitable region around
the peaks is chosen to plot the hfs.
y: array
Experimental y-data.
yerr: array or dict('high': array, 'low': array)
Experimental errors on y.
no_of_points: int
Number of points to use for the plot of the hfs if
experimental data is given.
ax: matplotlib axes object
If provided, plots on this axis.
show: boolean
If True, the plot will be shown at the end.
legend: string, optional
If given, an entry in the legend will be made for the spectrum.
data_legend: string, optional
If given, an entry in the legend will be made for the experimental
data.
Returns
-------
fig, ax: matplotlib figure and axis
Figure and axis used for the plotting."""
y = kwargs.get('y', None)
if y is not None:
ylow, yhigh = poisson_interval(y)
yerr = {'low': y - ylow, 'high': yhigh - y}
else:
yerr = None
kwargs['yerr'] = yerr
return self.plot(**kwargs)
def plot_scheme(self, show=True, upper_color='#D55E00', lower_color='#009E73', arrow_color='#0072B2', distance=5, spectrum=False):
"""Create a figure where both the splitting of the upper and lower state is drawn,
and the hfs associated with this.
Parameters
----------
show: boolean, optional
If True, immediately shows the figure. Defaults to True.
upper_color: matplotlib color definition
Sets the color of the upper state. Defaults to red.
lower_color: matplotlib color definition
Sets the color of the lower state. Defaults to black.
arrow_color: matplotlib color definition
Sets the color of the arrows indicating the transitions.
Defaults to blue.
Returns
-------
tuple
Tuple containing the figure and both axes, also in a tuple."""
from fractions import Fraction
from matplotlib import lines
length_plot = 0.4
fig = plt.figure(frameon=False)
ax = fig.add_axes([0.5, 0, length_plot, 0.5], axisbg=[1, 1, 1, 0])
self.plot(ax=ax, show=False, plot_kws={'distance': distance})
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
locations = self.locations
plotrange = ax.get_xlim()
distances = (locations - plotrange[0]) / (plotrange[1] - plotrange[0]) * length_plot
height = self(locations)
plotrange = ax.get_ylim()
if not spectrum:
plt.close(fig)
fig = plt.figure(frameon=False)
lower_state_height = 0.1
else:
lower_state_height = 0.525
upper_state_height = 0.95
height = (height - plotrange[0]) / (plotrange[1] - plotrange[0]) / 2
A = np.append(np.ones(self.num_lower) * self.params['Al'].value,
np.ones(self.num_upper) * self.params['Au'].value)
B = np.append(np.ones(self.num_lower) * self.params['Bl'].value,
np.ones(self.num_upper) * self.params['Bu'].value)
C = np.append(np.ones(self.num_lower) * self.params['Cl'].value,
np.ones(self.num_upper) * self.params['Cu'].value)
energies = self.C * A + self.D * B + self.E * C
energy_range = (upper_state_height - lower_state_height) / 2 - 0.025
energies_upper = energies[self.num_lower:]
energies_upper_norm = np.abs(energies_upper.max()) if not energies_upper.max()==0 else 1
energies_upper_norm = np.ptp(energies_upper)
energies_upper = energies_upper / energies_upper_norm * energy_range
energies_lower = energies[:self.num_lower]
energies_lower_norm = np.abs(energies_lower.max()) if not energies_lower.max()==0 else 1
energies_lower_norm =
|
np.ptp(energies_lower)
|
numpy.ptp
|
import numpy as np
from sklearn.covariance import LedoitWolf, OAS
import sys
def simulateLogNormal(data, covtype='Estimate', nsamples=2000, **kwargs):
"""
:param data:
:param covtype: Type of covariance matrix estimator. Allowed types are:
- Estimate (default):
- Diagonal:
- Shrinkage OAS:
:param int nsamples: Number of simulated samples to draw
:return: simulated data and empirical covariance est
"""
try:
# Offset data to make sure there are no 0 values for log transform
offset = np.min(data) + 1
offdata = data + offset
# log on the offsetted data
logdata = np.log(offdata)
# Get the means
meanslog = np.mean(logdata, axis=0)
# Specify covariance
# Regular covariance estimator
if covtype == "Estimate":
covlog = np.cov(logdata, rowvar=0)
# Shrinkage covariance estimator, using LedoitWolf
elif covtype == "ShrinkageLedoitWolf":
scov = LedoitWolf()
scov.fit(logdata)
covlog = scov.covariance_
elif covtype == "ShrinkageOAS":
scov = OAS()
scov.fit(logdata)
covlog = scov.covariance_
# Diagonal covariance matrix (no between variable correlation)
elif covtype == "Diagonal":
covlogdata = np.var(logdata, axis=0) #get variance of log data by each column
covlog = np.diag(covlogdata) #generate a matrix with diagonal of variance of log Data
else:
raise ValueError('Unknown Covariance type')
simData = np.random.multivariate_normal(meanslog, covlog, nsamples)
simData = np.exp(simData)
simData -= offset
##Set to 0 negative values
simData[
|
np.where(simData < 0)
|
numpy.where
|
import numpy as np
from numpy.testing import assert_array_almost_equal
from oasislmf.pytools.fm.common import fm_profile_dtype
from oasislmf.pytools.fm.policy_extras import calc, UnknownCalcrule
def test_calcrule_1():
loss_in = np.array([0., 10., 20., 30., 40., 50., 60.])
deductible = np.ones_like(loss_in) * 5
over_limit = np.ones_like(loss_in) * 5
under_limit = np.ones_like(loss_in) * 5
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 1, 15, 0, 0, 10, 30, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([0., 0., 5., 15., 25., 30., 30.])
deductible_expected = np.array([5., 15., 20., 20., 20., 20., 20.])
over_limit_expected = np.array([5., 5., 5., 5., 5., 10., 20.])
under_limit_expected = np.array([5., 15., 20., 15., 5., 0., 0.])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected)
def test_calcrule_2():
loss_in = np.array([0., 10., 20., 30., 40., 50., 60.])
deductible = np.ones_like(loss_in) * 5
over_limit = np.ones_like(loss_in) * 5
under_limit = np.ones_like(loss_in) * 5
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 2, 15, 0, 0, 10, 30, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([0., 0., 0., 2.5, 7.5, 12.5, 15.])
deductible_expected = np.array([5., 5., 5., 5., 5., 5., 5])
over_limit_expected = np.array([5., 5., 5., 5., 5., 5., 5])
under_limit_expected = np.array([5., 5., 5., 5., 5., 5., 5])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected)
def test_calcrule_3():
loss_in = np.array([0., 10., 20., 30., 40., 50., 60.])
deductible = np.ones_like(loss_in) * 5
over_limit = np.ones_like(loss_in) * 5
under_limit = np.ones_like(loss_in) * 5
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 3, 15, 0, 0, 10, 30, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([0., 0., 20., 30., 30., 30., 30.])
deductible_expected = np.array([5., 15., 5., 5., 5., 5., 5.])
over_limit_expected = np.array([5., 5., 5., 5., 15., 25., 35.])
under_limit_expected = np.array([5., 15., 5., 0., 0., 0., 0.])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected)
def test_calcrule_5():
# ded + limit > 1
loss_in = np.array([0., 10., 20., 30., 40., 50., 60.])
deductible = np.ones_like(loss_in) * 5
over_limit = np.ones_like(loss_in) * 5
under_limit = np.ones_like(loss_in) * 5
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 5, 0.25, 0, 0, 10, 0.8, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([0., 7.5, 15., 22.5, 30., 37.5, 45.])
deductible_expected = np.array([5., 7.5, 10., 12.5, 15., 17.5, 20.])
over_limit_expected = np.array([5., 5., 5., 5., 5., 5., 5.])
under_limit_expected = np.array([0., 0.5, 1., 1.5, 2., 2.5, 3.])
# ded + limit < 1
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected)
deductible = np.ones_like(loss_in) * 5
over_limit = np.ones_like(loss_in) * 5
under_limit = np.ones_like(loss_in) * 5
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 5, 0.25, 0, 0, 10, 0.5, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([0., 5., 10., 15., 20., 25., 30.])
deductible_expected = np.array([5., 7.5, 10., 12.5, 15., 17.5, 20.])
over_limit_expected = np.array([5., 7.5, 10., 12.5, 15., 17.5, 20.])
under_limit_expected = np.array([0., 0., 0., 0., 0., 0., 0.])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected)
def test_calcrule_7():
loss_in = np.array([20., 20., 20., 20., 20., 20., 1., 20, 60])
deductible = np.array([0., 0., 0., 30., 30., 30., 16., 10, 10])
over_limit = np.array([0., 3., 10., 10., 10., 0., 0., 10, 10])
under_limit = np.array([0., 10., 10., 0., 5., 15., 0., 10, 10])
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 7, 5, 10, 20, 0, 30, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([10., 13., 20., 20., 25., 30., 0., 15., 30])
deductible_expected = np.array([10., 10., 10., 20., 20., 20., 17., 15., 15])
over_limit_expected = np.array([0., 0., 5., 20., 15., 0., 0., 10., 35])
under_limit_expected = np.array([5., 12., 10., 0., 0., 0., 1., 15., 0.])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected)
def test_calcrule_8():
loss_in = np.array([20., 20., 20., 20., 20., 20., 1., 20, 60])
deductible = np.array([0., 0., 0., 30., 30., 30., 16., 10, 10])
over_limit = np.array([0., 3., 10., 10., 10., 0., 0., 10, 10])
under_limit = np.array([0., 10., 10., 0., 5., 15., 0., 10, 10])
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 8, 5, 10, 20, 0, 30, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([10., 13., 20., 15., 15., 15., 0., 15., 30])
deductible_expected = np.array([10., 10., 10., 35., 35., 35., 17., 15., 15])
over_limit_expected = np.array([0., 0., 5., 10., 10., 0., 0., 10., 35])
under_limit_expected = np.array([5., 12., 10., 5., 10., 15., 1., 15., 0.])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected)
def test_calcrule_10():
loss_in = np.array([20., 20., 20., 20., 20., 20., 1., 20, 60])
deductible = np.array([0., 0., 0., 30., 30., 30., 16., 10, 10])
over_limit = np.array([0., 3., 10., 10., 10., 0., 0., 10, 10])
under_limit = np.array([0., 10., 10., 0., 5., 15., 0., 10, 10])
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 10, 5, 10, 20, 0, 30, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([15., 15., 15., 20., 25., 30., 0., 15., 55])
deductible_expected = np.array([5., 5., 5., 20., 20., 20., 17., 15., 15])
over_limit_expected = np.array([0., 3., 10., 20., 15., 0., 0., 10., 10])
under_limit_expected = np.array([5., 15., 15., 0., 0., 5., 1., 15., 15.])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected)
def test_calcrule_11():
loss_in = np.array([20., 20., 20., 20., 20., 20., 1., 20, 60])
deductible = np.array([0., 0., 0., 30., 30., 30., 16., 10, 10])
over_limit = np.array([0., 3., 10., 10., 10., 0., 0., 10, 10])
under_limit = np.array([0., 10., 10., 0., 5., 15., 0., 10, 10])
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 11, 5, 10, 20, 0, 30, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([10., 13., 20., 15., 15., 15., 0., 15., 55])
deductible_expected = np.array([10., 10., 10., 35., 35., 35., 17., 15., 15])
over_limit_expected = np.array([0., 0., 5., 10., 10., 0., 0., 10., 10])
under_limit_expected = np.array([5., 12., 10., 5., 10., 20., 1., 15., 15.])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected)
def test_calcrule_12():
loss_in = np.array([0., 10., 20., 30., 40., 50., 60.])
deductible = np.ones_like(loss_in) * 5
over_limit = np.ones_like(loss_in) * 5
under_limit = np.ones_like(loss_in) * 5
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 12, 15, 0, 0, 10, 30, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([0., 0., 5., 15., 25., 35., 45.])
deductible_expected = np.array([5., 15., 20., 20., 20., 20., 20.])
over_limit_expected = np.array([5., 5., 5., 5., 5., 5., 5.])
under_limit_expected = np.array([5., 15., 20., 20., 20., 20., 20.])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected)
def test_calcrule_13():
loss_in = np.array([20., 20., 20., 20., 20., 20., 1., 20, 60])
deductible = np.array([0., 0., 0., 30., 30., 30., 16., 10, 10])
over_limit = np.array([0., 3., 10., 10., 10., 0., 0., 10, 10])
under_limit = np.array([0., 10., 10., 0., 5., 15., 0., 10, 10])
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 13, 5, 10, 20, 0, 30, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([10., 13., 20., 20., 25., 30., 0., 15., 55])
deductible_expected = np.array([10., 10., 10., 20., 20., 20., 17., 15., 15])
over_limit_expected = np.array([0., 0., 5., 20., 15., 0., 0., 10., 10])
under_limit_expected = np.array([5., 12., 10., 0., 0., 5., 1., 15., 15.])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected)
def test_calcrule_14():
loss_in = np.array([0., 10., 20., 30., 40., 50., 60.])
deductible = np.ones_like(loss_in) * 5
over_limit = np.ones_like(loss_in) * 5
under_limit = np.ones_like(loss_in) * 5
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 14, 15, 0, 0, 10, 30, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([0., 10., 20., 30., 30., 30., 30.])
deductible_expected = np.array([5., 5., 5., 5., 5., 5., 5.])
over_limit_expected = np.array([5., 5., 5., 5., 15., 25., 35.])
under_limit_expected = np.array([5., 5., 5., 0., 0., 0., 0.])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected)
def test_calcrule_15():
loss_in = np.array([0., 10., 20., 30., 40., 50., 60.])
deductible = np.ones_like(loss_in) * 5
over_limit = np.ones_like(loss_in) * 5
under_limit = np.ones_like(loss_in) * 5
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 15, 15, 0, 0, 10, 0.6, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([0., 0., 5., 15., 24., 30., 36.])
deductible_expected = np.array([5., 15., 20., 20., 20., 20., 20.])
over_limit_expected = np.array([5., 5., 5., 5., 6., 10., 14.])
under_limit_expected = np.array([5., 15., 17.5, 7.5, 0., 0., 0.])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected, decimal=4)
def test_calcrule_16():
loss_in = np.array([0., 10., 20., 30., 40., 50., 60.])
deductible = np.ones_like(loss_in) * 5
over_limit = np.ones_like(loss_in) * 5
under_limit = np.ones_like(loss_in) * 5
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 16, 1/4, 0, 0, 10, 0.6, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([0., 7.5, 15., 22.5, 30., 37.5, 45.])
deductible_expected = np.array([5., 7.5, 10., 12.5, 15., 17.5, 20.])
over_limit_expected = np.array([5., 5., 5., 5., 5., 5., 5.])
under_limit_expected = np.array([5., 7.5, 10., 12.5, 15., 17.5, 20.])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected, decimal=4)
def test_calcrule_17():
loss_in = np.array([0., 10., 20., 30., 40., 50., 60.])
deductible = np.ones_like(loss_in) * 5
over_limit = np.ones_like(loss_in) * 5
under_limit = np.ones_like(loss_in) * 5
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 17, 1/4, 0, 0, 10, 25, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([0., 0, 2.5, 6.25, 10., 12.5, 12.5])
deductible_expected = np.array([5., 5., 5., 5., 5., 5., 5.])
over_limit_expected = np.array([5., 5., 5., 5., 5., 5., 5.])
under_limit_expected = np.array([5., 5., 5., 5., 5., 5., 5.])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected)
def test_calcrule_19():
loss_in = np.array([20., 20., 20., 20., 20., 20., 1., 20, 60])
deductible = np.array([0., 0., 0., 30., 30., 30., 16., 10, 10])
over_limit = np.array([0., 3., 10., 10., 10., 0., 0., 10, 10])
under_limit = np.array([0., 10., 10., 0., 5., 15., 0., 10, 10])
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 19, 1/4, 10, 20, 0, 30, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([10., 13., 20., 20., 25., 30., 0.75, 15., 50])
deductible_expected = np.array([10., 10., 10., 20., 20., 20., 16.25, 15., 20])
over_limit_expected = np.array([0., 0., 0.25, 20., 15., 0., 0., 10., 10])
under_limit_expected = np.array([9.75, 16.75, 10., 0., 0., 5., 0.25, 15., 20.])
assert_array_almost_equal(loss_out, loss_expected)
assert_array_almost_equal(deductible, deductible_expected)
assert_array_almost_equal(over_limit, over_limit_expected)
assert_array_almost_equal(under_limit, under_limit_expected)
def test_calcrule_20():
loss_in = np.array([0., 10., 20., 30., 40., 50., 60.])
deductible = np.ones_like(loss_in) * 5
over_limit = np.ones_like(loss_in) * 5
under_limit = np.ones_like(loss_in) * 5
loss_out = np.empty_like(loss_in)
policy = np.array([(0, 20, 25, 0, 0, 10, 30, 0.5, 0, 0)], dtype=fm_profile_dtype)[0]
calc(policy, loss_out, loss_in, deductible, over_limit, under_limit, None)
loss_expected = np.array([0., 10., 20., 0., 0., 0., 0.])
assert_array_almost_equal(loss_out, loss_expected)
def test_calcrule_22():
loss_in = np.array([0., 10., 20., 30., 40., 50., 60.])
deductible = np.ones_like(loss_in) * 5
over_limit = np.ones_like(loss_in) * 5
under_limit =
|
np.ones_like(loss_in)
|
numpy.ones_like
|
import numpy as np
import scipy.misc as misc
import cPickle as pickle
import wget
import glob
import os
import tarfile
from ..utils import misc as umisc
def extract_patches(impath, step=2):
""" Get grayscale image patches. """
img = misc.imread(impath, flatten=True).astype(np.float32)
h, w = img.shape
patches = []
for i in range(0, h-7, step):
for j in range(0, w-7, step):
patch = np.reshape(img[i:i+8, j:j+8], (64,)) + np.random.rand(64)
patches.append(patch)
return np.array(patches)
def process_images(imdir, extension='.jpg'):
""" Extract all patches from images in a directory. """
impaths = glob.glob(os.path.join(imdir, '*'+extension))
im_patches = [extract_patches(ip) for ip in impaths]
return np.concatenate(im_patches, 0)
def make_dataset(bsds_imdir, save_path):
# Load patches, rescale, demean, drop last pixel
print('Loading training data...')
train_patches = process_images(os.path.join(bsds_imdir, 'train'))/256.0
train_patches = train_patches - np.mean(train_patches, 1, keepdims=True)
train_patches = train_patches[:, :-1].astype(np.float32)
print('Loading testing data...')
test_patches = process_images(os.path.join(bsds_imdir, 'test'))/256.0
test_patches = test_patches -
|
np.mean(test_patches, 1, keepdims=True)
|
numpy.mean
|
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import datetime
import csv
from dateutil import parser
import os
import csv
from ChefsHatGym.KEF import DataSetManager
from ChefsHatGym.KEF.DataSetManager import actionFinish, actionPass, actionDiscard, actionDeal
from matplotlib.collections import PolyCollection
statisticsGame = {
"GameDuration": "gameDuration",
"NumberGames":"numberGames",
"NumberRounds": "numberRounds",
"PlayerScore": "averagePoints",
"AgregatedScore": "agregatedScore",
}
statisticsPreGame = {
"Personality": "personality",
"Competitiveness": "competitiveness",
"Experience": "experience"
}
statisticsAfterGame = {
"Personality": "personality"
}
statisticsIntegrated = {
"Similarity": "similarity"
}
"""Plots per game"""
def plotPlayedTime(playedTimes, saveDirectory):
fig, ax = plt.subplots()
plt.grid()
playedTimes = numpy.array(playedTimes)
ax.set_xlabel('Players')
ax.set_ylabel('Seconds')
ax.bar(range(1, len(playedTimes)+1),playedTimes)
plt.title('Game duration per participant')
averagePlayedTime = playedTimes.mean()
averageLabel = str("{:10.2f}".format(averagePlayedTime))
ax.axhline(averagePlayedTime, color='red', linewidth=2, label='Avg: ' + str(averageLabel) + " s")
plt.legend()
plt.savefig(saveDirectory + "/Game_PlayedTime.png")
plt.clf()
def plotNumberGames(numberGames, saveDirectory):
fig, ax = plt.subplots()
plt.grid()
numberGames = numpy.array(numberGames)
ax.set_xlabel('Players')
ax.set_ylabel('# Games')
plt.title('Games per participant')
ax.bar(range(1, len(numberGames)+1),numberGames)
averageGames = numberGames.mean()
averageLabel = str("{:10.2f}".format(averageGames))
ax.axhline(averageGames, color='red', linewidth=2, label='Avg: ' + str(averageLabel) + " games")
plt.legend()
plt.savefig(saveDirectory + "/Game_NumberGames.png")
plt.clf()
def plotNumberRounds(numberRounds, saveDirectory):
fig, ax = plt.subplots()
plt.grid()
numberRounds = numpy.array(numberRounds)
ax.set_xlabel('Players')
ax.set_ylabel('# Pizzas')
plt.title('Rounds per participant')
ax.bar(range(1, len(numberRounds)+1),numberRounds)
averageGames = numberRounds.mean()
averageLabel = str("{:10.2f}".format(averageGames))
ax.axhline(averageGames, color='red', linewidth=2, label='Avg: ' + str(averageLabel) + " pizzas")
plt.legend()
plt.savefig(saveDirectory + "/Game_NumberPizzas.png")
plt.clf()
def plotPlayerScore(playerScore, saveDirectory):
fig, ax = plt.subplots()
plt.grid()
playerScore = numpy.array(playerScore)
ax.set_xlabel('Players')
ax.set_ylabel('Score')
plt.title('Score per participant')
ax.bar(range(1, len(playerScore)+1),playerScore)
averageGames = playerScore.mean()
averageLabel = str("{:10.2f}".format(averageGames))
ax.axhline(averageGames, color='red', linewidth=2, label='Avg: ' + str(averageLabel) + " points")
plt.legend()
plt.savefig(saveDirectory + "/Game_Score.png")
plt.clf()
def plotAgregatedScore(thisGameAgregatedScore, saveDirectory):
fig, ax = plt.subplots()
plt.grid()
agregatedScore = numpy.array(thisGameAgregatedScore)
# print ("agregated:" + str(thisGameAgregatedScore))
ax.set_xlabel('Players')
ax.set_ylabel('Agregated Score')
plt.title('Aggregated score per participant')
ax.bar(range(1, len(agregatedScore)+1),agregatedScore)
averageGames = agregatedScore.mean()
averageLabel = str("{:10.2f}".format(averageGames))
ax.axhline(averageGames, color='red', linewidth=2, label='Avg: ' + str(averageLabel) + " points")
plt.legend()
plt.savefig(saveDirectory + "/Game_ScoreAgregated.png")
plt.clf()
"""Plots pre-game"""
def plotPersonalitiesPreGame(agencies,competences,communnions,saveDirectory):
plt.grid()
examples = range(1, len(agencies)+1)
agencies = numpy.array(agencies)
competences = numpy.array(competences)
communnions = numpy.array(communnions)
width = 0.5
p1 = plt.bar(examples, agencies, width, yerr=competences.std())
p2 = plt.bar(examples, competences, width, bottom=agencies, yerr=competences.std())
p3 = plt.bar(examples, communnions, width, bottom=agencies+competences, yerr=communnions.std())
plt.title('Personalities per participant')
# plt.legend((p1[0], p2[0],), ('Agency', 'Competence'))
plt.legend((p1[0], p2[0], p3[0]), ('Agency', 'Competence', "Communnion"))
plt.xlabel('Players')
plt.ylabel('Value')
# plt.legend()
plt.savefig(saveDirectory + "/Player_Personalities.png")
plt.clf()
def plotCompetitiveness(competitiveness, saveDirectory):
fig, ax = plt.subplots()
plt.grid()
competitiveness = numpy.array(competitiveness)
ax.set_xlabel('Players')
ax.set_ylabel('Rating')
ax.bar(range(1, len(competitiveness)+1),competitiveness)
plt.title('Competitiveness per participant')
averagePlayedTime = competitiveness.mean()
averageLabel = str("{:10.2f}".format(averagePlayedTime))
ax.axhline(averagePlayedTime, color='red', linewidth=2, label='Avg: ' + str(averageLabel) + " s")
plt.legend()
plt.savefig(saveDirectory + "/Player_Competitiveness.png")
plt.clf()
def plotExperience(experience, saveDirectory):
fig, ax = plt.subplots()
plt.grid()
experience = numpy.array(experience)
ax.set_xlabel('Players')
ax.set_ylabel('Rating')
ax.bar(range(1, len(experience)+1),experience)
plt.title('Experience per participant')
averagePlayedTime = experience.mean()
averageLabel = str("{:10.2f}".format(averagePlayedTime))
ax.axhline(averagePlayedTime, color='red', linewidth=2, label='Avg: ' + str(averageLabel) + " s")
plt.legend()
plt.savefig(saveDirectory + "/Player_Experiences.png")
plt.clf()
"""Plots after-game"""
def plotPersonalitiesAfterGame(agencies,competences,communnions,saveDirectory):
finalAgency = []
finalCompetence = []
finaoComunion = []
agencies = numpy.array(agencies)
finalAgency.append(numpy.array(agencies[0]).mean())
finalAgency.append(numpy.array(agencies[1]).mean())
finalAgency.append(numpy.array(agencies[2]).mean())
finalCompetence.append(numpy.array(competences[0]).mean())
finalCompetence.append(numpy.array(competences[1]).mean())
finalCompetence.append(numpy.array(competences[2]).mean())
finaoComunion.append(numpy.array(communnions[0]).mean())
finaoComunion.append(numpy.array(communnions[1]).mean())
finaoComunion.append(numpy.array(communnions[2]).mean())
plt.grid()
examples = range(1, len(finalAgency)+1)
agencies = numpy.array(finalAgency)
competences = numpy.array(finalCompetence)
communnions = numpy.array(finaoComunion)
width = 0.5
# print ("Agencies:" +str(agencies))
p1 = plt.bar(examples, agencies, width, yerr=competences.std())
p2 = plt.bar(examples, competences, width, bottom=agencies, yerr=competences.std())
p3 = plt.bar(examples, communnions, width, bottom=agencies+competences, yerr=communnions.std())
plt.xticks(examples, ('PPO', 'Random', "DQL"))
plt.title('Personalities per Agent')
# plt.legend((p1[0], p2[0],), ('Agency', 'Competence'))
plt.legend((p1[0], p2[0], p3[0]), ('Agency', 'Competence', "Communnion"))
plt.xlabel('Agents')
plt.ylabel('Value')
# plt.legend()
plt.savefig(saveDirectory + "/Agents_Personalities.png")
plt.clf()
"""Plots Integrated"""
def plotSimilaritiesIntegrated(agenciesAgent, competencesAgent, communnionsAgent,agenciesPlayer, competencesPlayer, communnionsPlayer, saveDirectory):
finalAgency = []
finalCompetence = []
finaoComunion = []
agencies = numpy.array(agenciesAgent)
finalAgency.append(numpy.array(agencies[0]).mean())
finalAgency.append(numpy.array(agencies[1]).mean())
finalAgency.append(numpy.array(agencies[2]).mean())
finalCompetence.append(numpy.array(competencesAgent[0]).mean())
finalCompetence.append(numpy.array(competencesAgent[1]).mean())
finalCompetence.append(numpy.array(competencesAgent[2]).mean())
finaoComunion.append(numpy.array(communnionsAgent[0]).mean())
finaoComunion.append(numpy.array(communnionsAgent[1]).mean())
finaoComunion.append(numpy.array(communnionsAgent[2]).mean())
finalDistancesAvery = []
finalDistancesBeck = []
finalDistancesCass = []
for p in range(len(agenciesPlayer)):
print ("p:"+str(p) + " - " + str(len(agenciesPlayer)))
playerPoint = numpy.array([agenciesPlayer[p], competencesPlayer[p], communnionsPlayer[p]])
averyPoint = numpy.array([finalAgency[0], finalCompetence[0], finaoComunion[0]])
beckPoint =
|
numpy.array([finalAgency[1], finalCompetence[1], finaoComunion[1]])
|
numpy.array
|
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import rgb2hex, to_rgb, to_rgba
import pytest
from pytest import approx
import numpy.testing as npt
from distutils.version import LooseVersion
from numpy.testing import (
assert_array_equal,
assert_array_less,
)
from .. import categorical as cat
from .. import palettes
from .._core import categorical_order
from ..categorical import (
_CategoricalPlotterNew,
Beeswarm,
catplot,
stripplot,
swarmplot,
)
from ..palettes import color_palette
from ..utils import _normal_quantile_func, _draw_figure
from .._testing import assert_plots_equal
PLOT_FUNCS = [
catplot,
stripplot,
swarmplot,
]
class TestCategoricalPlotterNew:
@pytest.mark.parametrize(
"func,kwargs",
itertools.product(
PLOT_FUNCS,
[
{"x": "x", "y": "a"},
{"x": "a", "y": "y"},
{"x": "y"},
{"y": "x"},
],
),
)
def test_axis_labels(self, long_df, func, kwargs):
func(data=long_df, **kwargs)
ax = plt.gca()
for axis in "xy":
val = kwargs.get(axis, "")
label_func = getattr(ax, f"get_{axis}label")
assert label_func() == val
@pytest.mark.parametrize("func", PLOT_FUNCS)
def test_empty(self, func):
func()
ax = plt.gca()
assert not ax.collections
assert not ax.patches
assert not ax.lines
func(x=[], y=[])
ax = plt.gca()
assert not ax.collections
assert not ax.patches
assert not ax.lines
def test_redundant_hue_backcompat(self, long_df):
p = _CategoricalPlotterNew(
data=long_df,
variables={"x": "s", "y": "y"},
)
color = None
palette = dict(zip(long_df["s"].unique(), color_palette()))
hue_order = None
palette, _ = p._hue_backcompat(color, palette, hue_order, force_hue=True)
assert p.variables["hue"] == "s"
assert_array_equal(p.plot_data["hue"], p.plot_data["x"])
assert all(isinstance(k, str) for k in palette)
class CategoricalFixture:
"""Test boxplot (also base class for things like violinplots)."""
rs = np.random.RandomState(30)
n_total = 60
x = rs.randn(int(n_total / 3), 3)
x_df = pd.DataFrame(x, columns=pd.Series(list("XYZ"), name="big"))
y = pd.Series(rs.randn(n_total), name="y_data")
y_perm = y.reindex(rs.choice(y.index, y.size, replace=False))
g = pd.Series(np.repeat(list("abc"), int(n_total / 3)), name="small")
h = pd.Series(np.tile(list("mn"), int(n_total / 2)), name="medium")
u = pd.Series(np.tile(list("jkh"), int(n_total / 3)))
df = pd.DataFrame(dict(y=y, g=g, h=h, u=u))
x_df["W"] = g
class TestCategoricalPlotter(CategoricalFixture):
def test_wide_df_data(self):
p = cat._CategoricalPlotter()
# Test basic wide DataFrame
p.establish_variables(data=self.x_df)
# Check data attribute
for x, y, in zip(p.plot_data, self.x_df[["X", "Y", "Z"]].values.T):
npt.assert_array_equal(x, y)
# Check semantic attributes
assert p.orient == "v"
assert p.plot_hues is None
assert p.group_label == "big"
assert p.value_label is None
# Test wide dataframe with forced horizontal orientation
p.establish_variables(data=self.x_df, orient="horiz")
assert p.orient == "h"
# Test exception by trying to hue-group with a wide dataframe
with pytest.raises(ValueError):
p.establish_variables(hue="d", data=self.x_df)
def test_1d_input_data(self):
p = cat._CategoricalPlotter()
# Test basic vector data
x_1d_array = self.x.ravel()
p.establish_variables(data=x_1d_array)
assert len(p.plot_data) == 1
assert len(p.plot_data[0]) == self.n_total
assert p.group_label is None
assert p.value_label is None
# Test basic vector data in list form
x_1d_list = x_1d_array.tolist()
p.establish_variables(data=x_1d_list)
assert len(p.plot_data) == 1
assert len(p.plot_data[0]) == self.n_total
assert p.group_label is None
assert p.value_label is None
# Test an object array that looks 1D but isn't
x_notreally_1d = np.array([self.x.ravel(),
self.x.ravel()[:int(self.n_total / 2)]],
dtype=object)
p.establish_variables(data=x_notreally_1d)
assert len(p.plot_data) == 2
assert len(p.plot_data[0]) == self.n_total
assert len(p.plot_data[1]) == self.n_total / 2
assert p.group_label is None
assert p.value_label is None
def test_2d_input_data(self):
p = cat._CategoricalPlotter()
x = self.x[:, 0]
# Test vector data that looks 2D but doesn't really have columns
p.establish_variables(data=x[:, np.newaxis])
assert len(p.plot_data) == 1
assert len(p.plot_data[0]) == self.x.shape[0]
assert p.group_label is None
assert p.value_label is None
# Test vector data that looks 2D but doesn't really have rows
p.establish_variables(data=x[np.newaxis, :])
assert len(p.plot_data) == 1
assert len(p.plot_data[0]) == self.x.shape[0]
assert p.group_label is None
assert p.value_label is None
def test_3d_input_data(self):
p = cat._CategoricalPlotter()
# Test that passing actually 3D data raises
x = np.zeros((5, 5, 5))
with pytest.raises(ValueError):
p.establish_variables(data=x)
def test_list_of_array_input_data(self):
p = cat._CategoricalPlotter()
# Test 2D input in list form
x_list = self.x.T.tolist()
p.establish_variables(data=x_list)
assert len(p.plot_data) == 3
lengths = [len(v_i) for v_i in p.plot_data]
assert lengths == [self.n_total / 3] * 3
assert p.group_label is None
assert p.value_label is None
def test_wide_array_input_data(self):
p = cat._CategoricalPlotter()
# Test 2D input in array form
p.establish_variables(data=self.x)
assert np.shape(p.plot_data) == (3, self.n_total / 3)
npt.assert_array_equal(p.plot_data, self.x.T)
assert p.group_label is None
assert p.value_label is None
def test_single_long_direct_inputs(self):
p = cat._CategoricalPlotter()
# Test passing a series to the x variable
p.establish_variables(x=self.y)
npt.assert_equal(p.plot_data, [self.y])
assert p.orient == "h"
assert p.value_label == "y_data"
assert p.group_label is None
# Test passing a series to the y variable
p.establish_variables(y=self.y)
npt.assert_equal(p.plot_data, [self.y])
assert p.orient == "v"
assert p.value_label == "y_data"
assert p.group_label is None
# Test passing an array to the y variable
p.establish_variables(y=self.y.values)
npt.assert_equal(p.plot_data, [self.y])
assert p.orient == "v"
assert p.group_label is None
assert p.value_label is None
# Test array and series with non-default index
x = pd.Series([1, 1, 1, 1], index=[0, 2, 4, 6])
y = np.array([1, 2, 3, 4])
p.establish_variables(x, y)
assert len(p.plot_data[0]) == 4
def test_single_long_indirect_inputs(self):
p = cat._CategoricalPlotter()
# Test referencing a DataFrame series in the x variable
p.establish_variables(x="y", data=self.df)
npt.assert_equal(p.plot_data, [self.y])
assert p.orient == "h"
assert p.value_label == "y"
assert p.group_label is None
# Test referencing a DataFrame series in the y variable
p.establish_variables(y="y", data=self.df)
npt.assert_equal(p.plot_data, [self.y])
assert p.orient == "v"
assert p.value_label == "y"
assert p.group_label is None
def test_longform_groupby(self):
p = cat._CategoricalPlotter()
# Test a vertically oriented grouped and nested plot
p.establish_variables("g", "y", hue="h", data=self.df)
assert len(p.plot_data) == 3
assert len(p.plot_hues) == 3
assert p.orient == "v"
assert p.value_label == "y"
assert p.group_label == "g"
assert p.hue_title == "h"
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
# Test a grouped and nested plot with direct array value data
p.establish_variables("g", self.y.values, "h", self.df)
assert p.value_label is None
assert p.group_label == "g"
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
# Test a grouped and nested plot with direct array hue data
p.establish_variables("g", "y", self.h.values, self.df)
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
# Test categorical grouping data
df = self.df.copy()
df.g = df.g.astype("category")
# Test that horizontal orientation is automatically detected
p.establish_variables("y", "g", hue="h", data=df)
assert len(p.plot_data) == 3
assert len(p.plot_hues) == 3
assert p.orient == "h"
assert p.value_label == "y"
assert p.group_label == "g"
assert p.hue_title == "h"
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
# Test grouped data that matches on index
p1 = cat._CategoricalPlotter()
p1.establish_variables(self.g, self.y, hue=self.h)
p2 = cat._CategoricalPlotter()
p2.establish_variables(self.g, self.y[::-1], self.h)
for i, (d1, d2) in enumerate(zip(p1.plot_data, p2.plot_data)):
assert np.array_equal(d1.sort_index(), d2.sort_index())
def test_input_validation(self):
p = cat._CategoricalPlotter()
kws = dict(x="g", y="y", hue="h", units="u", data=self.df)
for var in ["x", "y", "hue", "units"]:
input_kws = kws.copy()
input_kws[var] = "bad_input"
with pytest.raises(ValueError):
p.establish_variables(**input_kws)
def test_order(self):
p = cat._CategoricalPlotter()
# Test inferred order from a wide dataframe input
p.establish_variables(data=self.x_df)
assert p.group_names == ["X", "Y", "Z"]
# Test specified order with a wide dataframe input
p.establish_variables(data=self.x_df, order=["Y", "Z", "X"])
assert p.group_names == ["Y", "Z", "X"]
for group, vals in zip(["Y", "Z", "X"], p.plot_data):
npt.assert_array_equal(vals, self.x_df[group])
with pytest.raises(ValueError):
p.establish_variables(data=self.x, order=[1, 2, 0])
# Test inferred order from a grouped longform input
p.establish_variables("g", "y", data=self.df)
assert p.group_names == ["a", "b", "c"]
# Test specified order from a grouped longform input
p.establish_variables("g", "y", data=self.df, order=["b", "a", "c"])
assert p.group_names == ["b", "a", "c"]
for group, vals in zip(["b", "a", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
# Test inferred order from a grouped input with categorical groups
df = self.df.copy()
df.g = df.g.astype("category")
df.g = df.g.cat.reorder_categories(["c", "b", "a"])
p.establish_variables("g", "y", data=df)
assert p.group_names == ["c", "b", "a"]
for group, vals in zip(["c", "b", "a"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
df.g = (df.g.cat.add_categories("d")
.cat.reorder_categories(["c", "b", "d", "a"]))
p.establish_variables("g", "y", data=df)
assert p.group_names == ["c", "b", "d", "a"]
def test_hue_order(self):
p = cat._CategoricalPlotter()
# Test inferred hue order
p.establish_variables("g", "y", hue="h", data=self.df)
assert p.hue_names == ["m", "n"]
# Test specified hue order
p.establish_variables("g", "y", hue="h", data=self.df,
hue_order=["n", "m"])
assert p.hue_names == ["n", "m"]
# Test inferred hue order from a categorical hue input
df = self.df.copy()
df.h = df.h.astype("category")
df.h = df.h.cat.reorder_categories(["n", "m"])
p.establish_variables("g", "y", hue="h", data=df)
assert p.hue_names == ["n", "m"]
df.h = (df.h.cat.add_categories("o")
.cat.reorder_categories(["o", "m", "n"]))
p.establish_variables("g", "y", hue="h", data=df)
assert p.hue_names == ["o", "m", "n"]
def test_plot_units(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", hue="h", data=self.df)
assert p.plot_units is None
p.establish_variables("g", "y", hue="h", data=self.df, units="u")
for group, units in zip(["a", "b", "c"], p.plot_units):
npt.assert_array_equal(units, self.u[self.g == group])
def test_default_palettes(self):
p = cat._CategoricalPlotter()
# Test palette mapping the x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, None, 1)
assert p.colors == palettes.color_palette(n_colors=3)
# Test palette mapping the hue position
p.establish_variables("g", "y", hue="h", data=self.df)
p.establish_colors(None, None, 1)
assert p.colors == palettes.color_palette(n_colors=2)
def test_default_palette_with_many_levels(self):
with palettes.color_palette(["blue", "red"], 2):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, None, 1)
npt.assert_array_equal(p.colors,
palettes.husl_palette(3, l=.7)) # noqa
def test_specific_color(self):
p = cat._CategoricalPlotter()
# Test the same color for each x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors("blue", None, 1)
blue_rgb = mpl.colors.colorConverter.to_rgb("blue")
assert p.colors == [blue_rgb] * 3
# Test a color-based blend for the hue mapping
p.establish_variables("g", "y", hue="h", data=self.df)
p.establish_colors("#ff0022", None, 1)
rgba_array = np.array(palettes.light_palette("#ff0022", 2))
npt.assert_array_almost_equal(p.colors,
rgba_array[:, :3])
def test_specific_palette(self):
p = cat._CategoricalPlotter()
# Test palette mapping the x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, "dark", 1)
assert p.colors == palettes.color_palette("dark", 3)
# Test that non-None `color` and `hue` raises an error
p.establish_variables("g", "y", hue="h", data=self.df)
p.establish_colors(None, "muted", 1)
assert p.colors == palettes.color_palette("muted", 2)
# Test that specified palette overrides specified color
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors("blue", "deep", 1)
assert p.colors == palettes.color_palette("deep", 3)
def test_dict_as_palette(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", hue="h", data=self.df)
pal = {"m": (0, 0, 1), "n": (1, 0, 0)}
p.establish_colors(None, pal, 1)
assert p.colors == [(0, 0, 1), (1, 0, 0)]
def test_palette_desaturation(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors((0, 0, 1), None, .5)
assert p.colors == [(.25, .25, .75)] * 3
p.establish_colors(None, [(0, 0, 1), (1, 0, 0), "w"], .5)
assert p.colors == [(.25, .25, .75), (.75, .25, .25), (1, 1, 1)]
class TestCategoricalStatPlotter(CategoricalFixture):
def test_no_bootstrappig(self):
p = cat._CategoricalStatPlotter()
p.establish_variables("g", "y", data=self.df)
p.estimate_statistic(np.mean, None, 100, None)
npt.assert_array_equal(p.confint, np.array([]))
p.establish_variables("g", "y", hue="h", data=self.df)
p.estimate_statistic(np.mean, None, 100, None)
npt.assert_array_equal(p.confint, np.array([[], [], []]))
def test_single_layer_stats(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y)
p.estimate_statistic(np.mean, 95, 10000, None)
assert p.statistic.shape == (3,)
assert p.confint.shape == (3, 2)
npt.assert_array_almost_equal(p.statistic,
y.groupby(g).mean())
for ci, (_, grp_y) in zip(p.confint, y.groupby(g)):
sem = grp_y.std() / np.sqrt(len(grp_y))
mean = grp_y.mean()
half_ci = _normal_quantile_func(.975) * sem
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_single_layer_stats_with_units(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 90))
y = pd.Series(np.random.RandomState(0).randn(270))
u = pd.Series(np.repeat(np.tile(list("xyz"), 30), 3))
y[u == "x"] -= 3
y[u == "y"] += 3
p.establish_variables(g, y)
p.estimate_statistic(np.mean, 95, 10000, None)
stat1, ci1 = p.statistic, p.confint
p.establish_variables(g, y, units=u)
p.estimate_statistic(np.mean, 95, 10000, None)
stat2, ci2 = p.statistic, p.confint
npt.assert_array_equal(stat1, stat2)
ci1_size = ci1[:, 1] - ci1[:, 0]
ci2_size = ci2[:, 1] - ci2[:, 0]
npt.assert_array_less(ci1_size, ci2_size)
def test_single_layer_stats_with_missing_data(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, order=list("abdc"))
p.estimate_statistic(np.mean, 95, 10000, None)
assert p.statistic.shape == (4,)
assert p.confint.shape == (4, 2)
rows = g == "b"
mean = y[rows].mean()
sem = y[rows].std() / np.sqrt(rows.sum())
half_ci = _normal_quantile_func(.975) * sem
ci = mean - half_ci, mean + half_ci
npt.assert_almost_equal(p.statistic[1], mean)
npt.assert_array_almost_equal(p.confint[1], ci, 2)
npt.assert_equal(p.statistic[2], np.nan)
npt.assert_array_equal(p.confint[2], (np.nan, np.nan))
def test_nested_stats(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
h = pd.Series(np.tile(list("xy"), 150))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, 95, 50000, None)
assert p.statistic.shape == (3, 2)
assert p.confint.shape == (3, 2, 2)
npt.assert_array_almost_equal(p.statistic,
y.groupby([g, h]).mean().unstack())
for ci_g, (_, grp_y) in zip(p.confint, y.groupby(g)):
for ci, hue_y in zip(ci_g, [grp_y[::2], grp_y[1::2]]):
sem = hue_y.std() / np.sqrt(len(hue_y))
mean = hue_y.mean()
half_ci = _normal_quantile_func(.975) * sem
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_bootstrap_seed(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
h = pd.Series(np.tile(list("xy"), 150))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, 95, 1000, 0)
confint_1 = p.confint
p.estimate_statistic(np.mean, 95, 1000, 0)
confint_2 = p.confint
npt.assert_array_equal(confint_1, confint_2)
def test_nested_stats_with_units(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 90))
h = pd.Series(np.tile(list("xy"), 135))
u = pd.Series(np.repeat(list("ijkijk"), 45))
y = pd.Series(np.random.RandomState(0).randn(270))
y[u == "i"] -= 3
y[u == "k"] += 3
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, 95, 10000, None)
stat1, ci1 = p.statistic, p.confint
p.establish_variables(g, y, h, units=u)
p.estimate_statistic(np.mean, 95, 10000, None)
stat2, ci2 = p.statistic, p.confint
npt.assert_array_equal(stat1, stat2)
ci1_size = ci1[:, 0, 1] - ci1[:, 0, 0]
ci2_size = ci2[:, 0, 1] - ci2[:, 0, 0]
npt.assert_array_less(ci1_size, ci2_size)
def test_nested_stats_with_missing_data(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
h = pd.Series(np.tile(list("xy"), 150))
p.establish_variables(g, y, h,
order=list("abdc"),
hue_order=list("zyx"))
p.estimate_statistic(np.mean, 95, 50000, None)
assert p.statistic.shape == (4, 3)
assert p.confint.shape == (4, 3, 2)
rows = (g == "b") & (h == "x")
mean = y[rows].mean()
sem = y[rows].std() / np.sqrt(rows.sum())
half_ci = _normal_quantile_func(.975) * sem
ci = mean - half_ci, mean + half_ci
npt.assert_almost_equal(p.statistic[1, 2], mean)
npt.assert_array_almost_equal(p.confint[1, 2], ci, 2)
npt.assert_array_equal(p.statistic[:, 0], [np.nan] * 4)
npt.assert_array_equal(p.statistic[2], [np.nan] * 3)
npt.assert_array_equal(p.confint[:, 0],
np.zeros((4, 2)) * np.nan)
npt.assert_array_equal(p.confint[2],
np.zeros((3, 2)) * np.nan)
def test_sd_error_bars(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y)
p.estimate_statistic(np.mean, "sd", None, None)
assert p.statistic.shape == (3,)
assert p.confint.shape == (3, 2)
npt.assert_array_almost_equal(p.statistic,
y.groupby(g).mean())
for ci, (_, grp_y) in zip(p.confint, y.groupby(g)):
mean = grp_y.mean()
half_ci = np.std(grp_y)
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_nested_sd_error_bars(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
h = pd.Series(np.tile(list("xy"), 150))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, "sd", None, None)
assert p.statistic.shape == (3, 2)
assert p.confint.shape == (3, 2, 2)
npt.assert_array_almost_equal(p.statistic,
y.groupby([g, h]).mean().unstack())
for ci_g, (_, grp_y) in zip(p.confint, y.groupby(g)):
for ci, hue_y in zip(ci_g, [grp_y[::2], grp_y[1::2]]):
mean = hue_y.mean()
half_ci = np.std(hue_y)
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_draw_cis(self):
p = cat._CategoricalStatPlotter()
# Test vertical CIs
p.orient = "v"
f, ax = plt.subplots()
at_group = [0, 1]
confints = [(.5, 1.5), (.25, .8)]
colors = [".2", ".3"]
p.draw_confints(ax, at_group, confints, colors)
lines = ax.lines
for line, at, ci, c in zip(lines, at_group, confints, colors):
x, y = line.get_xydata().T
npt.assert_array_equal(x, [at, at])
npt.assert_array_equal(y, ci)
assert line.get_color() == c
plt.close("all")
# Test horizontal CIs
p.orient = "h"
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors)
lines = ax.lines
for line, at, ci, c in zip(lines, at_group, confints, colors):
x, y = line.get_xydata().T
npt.assert_array_equal(x, ci)
npt.assert_array_equal(y, [at, at])
assert line.get_color() == c
plt.close("all")
# Test vertical CIs with endcaps
p.orient = "v"
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors, capsize=0.3)
capline = ax.lines[len(ax.lines) - 1]
caplinestart = capline.get_xdata()[0]
caplineend = capline.get_xdata()[1]
caplinelength = abs(caplineend - caplinestart)
assert caplinelength == approx(0.3)
assert len(ax.lines) == 6
plt.close("all")
# Test horizontal CIs with endcaps
p.orient = "h"
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors, capsize=0.3)
capline = ax.lines[len(ax.lines) - 1]
caplinestart = capline.get_ydata()[0]
caplineend = capline.get_ydata()[1]
caplinelength = abs(caplineend - caplinestart)
assert caplinelength == approx(0.3)
assert len(ax.lines) == 6
# Test extra keyword arguments
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors, lw=4)
line = ax.lines[0]
assert line.get_linewidth() == 4
plt.close("all")
# Test errwidth is set appropriately
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors, errwidth=2)
capline = ax.lines[len(ax.lines) - 1]
assert capline._linewidth == 2
assert len(ax.lines) == 2
plt.close("all")
class TestBoxPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
orient=None, color=None, palette=None,
saturation=.75, width=.8, dodge=True,
fliersize=5, linewidth=None)
def test_nested_width(self):
kws = self.default_kws.copy()
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", hue="h", data=self.df)
assert p.nested_width == .4 * .98
kws = self.default_kws.copy()
kws["width"] = .6
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", hue="h", data=self.df)
assert p.nested_width == .3 * .98
kws = self.default_kws.copy()
kws["dodge"] = False
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", hue="h", data=self.df)
assert p.nested_width == .8
def test_hue_offsets(self):
p = cat._BoxPlotter(**self.default_kws)
p.establish_variables("g", "y", hue="h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.2, .2])
kws = self.default_kws.copy()
kws["width"] = .6
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", hue="h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.15, .15])
p = cat._BoxPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
npt.assert_array_almost_equal(p.hue_offsets, [-.2, 0, .2])
def test_axes_data(self):
ax = cat.boxplot(x="g", y="y", data=self.df)
assert len(ax.artists) == 3
plt.close("all")
ax = cat.boxplot(x="g", y="y", hue="h", data=self.df)
assert len(ax.artists) == 6
plt.close("all")
def test_box_colors(self):
ax = cat.boxplot(x="g", y="y", data=self.df, saturation=1)
pal = palettes.color_palette(n_colors=3)
for patch, color in zip(ax.artists, pal):
assert patch.get_facecolor()[:3] == color
plt.close("all")
ax = cat.boxplot(x="g", y="y", hue="h", data=self.df, saturation=1)
pal = palettes.color_palette(n_colors=2)
for patch, color in zip(ax.artists, pal * 2):
assert patch.get_facecolor()[:3] == color
plt.close("all")
def test_draw_missing_boxes(self):
ax = cat.boxplot(x="g", y="y", data=self.df,
order=["a", "b", "c", "d"])
assert len(ax.artists) == 3
def test_missing_data(self):
x = ["a", "a", "b", "b", "c", "c", "d", "d"]
h = ["x", "y", "x", "y", "x", "y", "x", "y"]
y = self.rs.randn(8)
y[-2:] = np.nan
ax = cat.boxplot(x=x, y=y)
assert len(ax.artists) == 3
plt.close("all")
y[-1] = 0
ax = cat.boxplot(x=x, y=y, hue=h)
assert len(ax.artists) == 7
plt.close("all")
def test_unaligned_index(self):
f, (ax1, ax2) = plt.subplots(2)
cat.boxplot(x=self.g, y=self.y, ax=ax1)
cat.boxplot(x=self.g, y=self.y_perm, ax=ax2)
for l1, l2 in zip(ax1.lines, ax2.lines):
assert np.array_equal(l1.get_xydata(), l2.get_xydata())
f, (ax1, ax2) = plt.subplots(2)
hue_order = self.h.unique()
cat.boxplot(x=self.g, y=self.y, hue=self.h,
hue_order=hue_order, ax=ax1)
cat.boxplot(x=self.g, y=self.y_perm, hue=self.h,
hue_order=hue_order, ax=ax2)
for l1, l2 in zip(ax1.lines, ax2.lines):
assert np.array_equal(l1.get_xydata(), l2.get_xydata())
def test_boxplots(self):
# Smoke test the high level boxplot options
cat.boxplot(x="y", data=self.df)
plt.close("all")
cat.boxplot(y="y", data=self.df)
plt.close("all")
cat.boxplot(x="g", y="y", data=self.df)
plt.close("all")
cat.boxplot(x="y", y="g", data=self.df, orient="h")
plt.close("all")
cat.boxplot(x="g", y="y", hue="h", data=self.df)
plt.close("all")
cat.boxplot(x="g", y="y", hue="h", order=list("nabc"), data=self.df)
plt.close("all")
cat.boxplot(x="g", y="y", hue="h", hue_order=list("omn"), data=self.df)
plt.close("all")
cat.boxplot(x="y", y="g", hue="h", data=self.df, orient="h")
plt.close("all")
def test_axes_annotation(self):
ax = cat.boxplot(x="g", y="y", data=self.df)
assert ax.get_xlabel() == "g"
assert ax.get_ylabel() == "y"
assert ax.get_xlim() == (-.5, 2.5)
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
plt.close("all")
ax = cat.boxplot(x="g", y="y", hue="h", data=self.df)
assert ax.get_xlabel() == "g"
assert ax.get_ylabel() == "y"
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
npt.assert_array_equal([l.get_text() for l in ax.legend_.get_texts()],
["m", "n"])
plt.close("all")
ax = cat.boxplot(x="y", y="g", data=self.df, orient="h")
assert ax.get_xlabel() == "y"
assert ax.get_ylabel() == "g"
assert ax.get_ylim() == (2.5, -.5)
npt.assert_array_equal(ax.get_yticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_yticklabels()],
["a", "b", "c"])
plt.close("all")
class TestViolinPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
bw="scott", cut=2, scale="area", scale_hue=True,
gridsize=100, width=.8, inner="box", split=False,
dodge=True, orient=None, linewidth=None,
color=None, palette=None, saturation=.75)
def test_split_error(self):
kws = self.default_kws.copy()
kws.update(dict(x="h", y="y", hue="g", data=self.df, split=True))
with pytest.raises(ValueError):
cat._ViolinPlotter(**kws)
def test_no_observations(self):
p = cat._ViolinPlotter(**self.default_kws)
x = ["a", "a", "b"]
y = self.rs.randn(3)
y[-1] = np.nan
p.establish_variables(x, y)
p.estimate_densities("scott", 2, "area", True, 20)
assert len(p.support[0]) == 20
assert len(p.support[1]) == 0
assert len(p.density[0]) == 20
assert len(p.density[1]) == 1
assert p.density[1].item() == 1
p.estimate_densities("scott", 2, "count", True, 20)
assert p.density[1].item() == 0
x = ["a"] * 4 + ["b"] * 2
y = self.rs.randn(6)
h = ["m", "n"] * 2 + ["m"] * 2
p.establish_variables(x, y, hue=h)
p.estimate_densities("scott", 2, "area", True, 20)
assert len(p.support[1][0]) == 20
assert len(p.support[1][1]) == 0
assert len(p.density[1][0]) == 20
assert len(p.density[1][1]) == 1
assert p.density[1][1].item() == 1
p.estimate_densities("scott", 2, "count", False, 20)
assert p.density[1][1].item() == 0
def test_single_observation(self):
p = cat._ViolinPlotter(**self.default_kws)
x = ["a", "a", "b"]
y = self.rs.randn(3)
p.establish_variables(x, y)
p.estimate_densities("scott", 2, "area", True, 20)
assert len(p.support[0]) == 20
assert len(p.support[1]) == 1
assert len(p.density[0]) == 20
assert len(p.density[1]) == 1
assert p.density[1].item() == 1
p.estimate_densities("scott", 2, "count", True, 20)
assert p.density[1].item() == .5
x = ["b"] * 4 + ["a"] * 3
y = self.rs.randn(7)
h = (["m", "n"] * 4)[:-1]
p.establish_variables(x, y, hue=h)
p.estimate_densities("scott", 2, "area", True, 20)
assert len(p.support[1][0]) == 20
assert len(p.support[1][1]) == 1
assert len(p.density[1][0]) == 20
assert len(p.density[1][1]) == 1
assert p.density[1][1].item() == 1
p.estimate_densities("scott", 2, "count", False, 20)
assert p.density[1][1].item() == .5
def test_dwidth(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", data=self.df))
p = cat._ViolinPlotter(**kws)
assert p.dwidth == .4
kws.update(dict(width=.4))
p = cat._ViolinPlotter(**kws)
assert p.dwidth == .2
kws.update(dict(hue="h", width=.8))
p = cat._ViolinPlotter(**kws)
assert p.dwidth == .2
kws.update(dict(split=True))
p = cat._ViolinPlotter(**kws)
assert p.dwidth == .4
def test_scale_area(self):
kws = self.default_kws.copy()
kws["scale"] = "area"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)]
max_before = np.array([d.max() for d in density])
p.scale_area(density, max_before, False)
max_after = np.array([d.max() for d in density])
assert max_after[0] == 1
before_ratio = max_before[1] / max_before[0]
after_ratio = max_after[1] / max_after[0]
assert before_ratio == after_ratio
# Test nested grouping scaling across all densities
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
max_before = np.array([[r.max() for r in row] for row in density])
p.scale_area(density, max_before, False)
max_after = np.array([[r.max() for r in row] for row in density])
assert max_after[0, 0] == 1
before_ratio = max_before[1, 1] / max_before[0, 0]
after_ratio = max_after[1, 1] / max_after[0, 0]
assert before_ratio == after_ratio
# Test nested grouping scaling within hue
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
max_before = np.array([[r.max() for r in row] for row in density])
p.scale_area(density, max_before, True)
max_after = np.array([[r.max() for r in row] for row in density])
assert max_after[0, 0] == 1
assert max_after[1, 0] == 1
before_ratio = max_before[1, 1] / max_before[1, 0]
after_ratio = max_after[1, 1] / max_after[1, 0]
assert before_ratio == after_ratio
def test_scale_width(self):
kws = self.default_kws.copy()
kws["scale"] = "width"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)]
p.scale_width(density)
max_after = np.array([d.max() for d in density])
npt.assert_array_equal(max_after, [1, 1])
# Test nested grouping
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
p.scale_width(density)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[1, 1], [1, 1]])
def test_scale_count(self):
kws = self.default_kws.copy()
kws["scale"] = "count"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 20), self.rs.uniform(0, .2, 40)]
counts = np.array([20, 40])
p.scale_count(density, counts, False)
max_after = np.array([d.max() for d in density])
npt.assert_array_equal(max_after, [.5, 1])
# Test nested grouping scaling across all densities
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 5), self.rs.uniform(0, .2, 40)],
[self.rs.uniform(0, .1, 100), self.rs.uniform(0, .02, 50)]]
counts = np.array([[5, 40], [100, 50]])
p.scale_count(density, counts, False)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[.05, .4], [1, .5]])
# Test nested grouping scaling within hue
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 5), self.rs.uniform(0, .2, 40)],
[self.rs.uniform(0, .1, 100), self.rs.uniform(0, .02, 50)]]
counts = np.array([[5, 40], [100, 50]])
p.scale_count(density, counts, True)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[.125, 1], [1, .5]])
def test_bad_scale(self):
kws = self.default_kws.copy()
kws["scale"] = "not_a_scale_type"
with pytest.raises(ValueError):
cat._ViolinPlotter(**kws)
def test_kde_fit(self):
p = cat._ViolinPlotter(**self.default_kws)
data = self.y
data_std = data.std(ddof=1)
# Test reference rule bandwidth
kde, bw = p.fit_kde(data, "scott")
assert kde.factor == kde.scotts_factor()
assert bw == kde.scotts_factor() * data_std
# Test numeric scale factor
kde, bw = p.fit_kde(self.y, .2)
assert kde.factor == .2
assert bw == .2 * data_std
def test_draw_to_density(self):
p = cat._ViolinPlotter(**self.default_kws)
# p.dwidth will be 1 for easier testing
p.width = 2
# Test verical plots
support = np.array([.2, .6])
density = np.array([.1, .4])
# Test full vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, False)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.99 * -.4, .99 * .4])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Test left vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, "left")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.99 * -.4, 0])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Test right vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, "right")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [0, .99 * .4])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Switch orientation to test horizontal plots
p.orient = "h"
support = np.array([.2, .5])
density = np.array([.3, .7])
# Test full horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, False)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [.99 * -.7, .99 * .7])
plt.close("all")
# Test left horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, "left")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [.99 * -.7, 0])
plt.close("all")
# Test right horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, "right")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [0, .99 * .7])
plt.close("all")
def test_draw_single_observations(self):
p = cat._ViolinPlotter(**self.default_kws)
p.width = 2
# Test vertical plot
_, ax = plt.subplots()
p.draw_single_observation(ax, 1, 1.5, 1)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [0, 2])
npt.assert_array_equal(y, [1.5, 1.5])
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_single_observation(ax, 2, 2.2, .5)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [2.2, 2.2])
npt.assert_array_equal(y, [1.5, 2.5])
plt.close("all")
def test_draw_box_lines(self):
# Test vertical plot
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_box_lines(ax, self.y, p.support[0], p.density[0], 0)
assert len(ax.lines) == 2
q25, q50, q75 = np.percentile(self.y, [25, 50, 75])
_, y = ax.lines[1].get_xydata().T
npt.assert_array_equal(y, [q25, q75])
_, y = ax.collections[0].get_offsets().T
assert y == q50
plt.close("all")
# Test horizontal plot
kws = self.default_kws.copy()
kws.update(dict(x="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_box_lines(ax, self.y, p.support[0], p.density[0], 0)
assert len(ax.lines) == 2
q25, q50, q75 = np.percentile(self.y, [25, 50, 75])
x, _ = ax.lines[1].get_xydata().T
npt.assert_array_equal(x, [q25, q75])
x, _ = ax.collections[0].get_offsets().T
assert x == q50
plt.close("all")
def test_draw_quartiles(self):
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_quartiles(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(np.percentile(self.y, [25, 50, 75]), ax.lines):
_, y = line.get_xydata().T
npt.assert_array_equal(y, [val, val])
def test_draw_points(self):
p = cat._ViolinPlotter(**self.default_kws)
# Test vertical plot
_, ax = plt.subplots()
p.draw_points(ax, self.y, 0)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, np.zeros_like(self.y))
npt.assert_array_equal(y, self.y)
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_points(ax, self.y, 0)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.y)
npt.assert_array_equal(y, np.zeros_like(self.y))
plt.close("all")
def test_draw_sticks(self):
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
# Test vertical plot
_, ax = plt.subplots()
p.draw_stick_lines(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(self.y, ax.lines):
_, y = line.get_xydata().T
npt.assert_array_equal(y, [val, val])
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_stick_lines(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(self.y, ax.lines):
x, _ = line.get_xydata().T
npt.assert_array_equal(x, [val, val])
plt.close("all")
def test_validate_inner(self):
kws = self.default_kws.copy()
kws.update(dict(inner="bad_inner"))
with pytest.raises(ValueError):
cat._ViolinPlotter(**kws)
def test_draw_violinplots(self):
kws = self.default_kws.copy()
# Test single vertical violin
kws.update(dict(y="y", data=self.df, inner=None,
saturation=1, color=(1, 0, 0, 1)))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 1
npt.assert_array_equal(ax.collections[0].get_facecolors(),
[(1, 0, 0, 1)])
plt.close("all")
# Test single horizontal violin
kws.update(dict(x="y", y=None, color=(0, 1, 0, 1)))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 1
npt.assert_array_equal(ax.collections[0].get_facecolors(),
[(0, 1, 0, 1)])
plt.close("all")
# Test multiple vertical violins
kws.update(dict(x="g", y="y", color=None,))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 3
for violin, color in zip(ax.collections, palettes.color_palette()):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
# Test multiple violins with hue nesting
kws.update(dict(hue="h"))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 6
for violin, color in zip(ax.collections,
palettes.color_palette(n_colors=2) * 3):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
# Test multiple split violins
kws.update(dict(split=True, palette="muted"))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 6
for violin, color in zip(ax.collections,
palettes.color_palette("muted",
n_colors=2) * 3):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
def test_draw_violinplots_no_observations(self):
kws = self.default_kws.copy()
kws["inner"] = None
# Test single layer of grouping
x = ["a", "a", "b"]
y = self.rs.randn(3)
y[-1] = np.nan
kws.update(x=x, y=y)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 1
assert len(ax.lines) == 0
plt.close("all")
# Test nested hue grouping
x = ["a"] * 4 + ["b"] * 2
y = self.rs.randn(6)
h = ["m", "n"] * 2 + ["m"] * 2
kws.update(x=x, y=y, hue=h)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 3
assert len(ax.lines) == 0
plt.close("all")
def test_draw_violinplots_single_observations(self):
kws = self.default_kws.copy()
kws["inner"] = None
# Test single layer of grouping
x = ["a", "a", "b"]
y = self.rs.randn(3)
kws.update(x=x, y=y)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 1
assert len(ax.lines) == 1
plt.close("all")
# Test nested hue grouping
x = ["b"] * 4 + ["a"] * 3
y = self.rs.randn(7)
h = (["m", "n"] * 4)[:-1]
kws.update(x=x, y=y, hue=h)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 3
assert len(ax.lines) == 1
plt.close("all")
# Test nested hue grouping with split
kws["split"] = True
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 3
assert len(ax.lines) == 1
plt.close("all")
def test_violinplots(self):
# Smoke test the high level violinplot options
cat.violinplot(x="y", data=self.df)
plt.close("all")
cat.violinplot(y="y", data=self.df)
plt.close("all")
cat.violinplot(x="g", y="y", data=self.df)
plt.close("all")
cat.violinplot(x="y", y="g", data=self.df, orient="h")
plt.close("all")
cat.violinplot(x="g", y="y", hue="h", data=self.df)
plt.close("all")
order = list("nabc")
cat.violinplot(x="g", y="y", hue="h", order=order, data=self.df)
plt.close("all")
order = list("omn")
cat.violinplot(x="g", y="y", hue="h", hue_order=order, data=self.df)
plt.close("all")
cat.violinplot(x="y", y="g", hue="h", data=self.df, orient="h")
plt.close("all")
for inner in ["box", "quart", "point", "stick", None]:
cat.violinplot(x="g", y="y", data=self.df, inner=inner)
plt.close("all")
cat.violinplot(x="g", y="y", hue="h", data=self.df, inner=inner)
plt.close("all")
cat.violinplot(x="g", y="y", hue="h", data=self.df,
inner=inner, split=True)
plt.close("all")
# ====================================================================================
# ====================================================================================
class SharedAxesLevelTests:
def test_color(self, long_df):
ax = plt.figure().subplots()
self.func(data=long_df, x="a", y="y", ax=ax)
assert self.get_last_color(ax) == to_rgba("C0")
ax = plt.figure().subplots()
self.func(data=long_df, x="a", y="y", ax=ax)
self.func(data=long_df, x="a", y="y", ax=ax)
assert self.get_last_color(ax) == to_rgba("C1")
ax = plt.figure().subplots()
self.func(data=long_df, x="a", y="y", color="C2", ax=ax)
assert self.get_last_color(ax) == to_rgba("C2")
ax = plt.figure().subplots()
self.func(data=long_df, x="a", y="y", color="C3", ax=ax)
assert self.get_last_color(ax) == to_rgba("C3")
class SharedScatterTests(SharedAxesLevelTests):
"""Tests functionality common to stripplot and swarmplot."""
def get_last_color(self, ax):
colors = ax.collections[-1].get_facecolors()
unique_colors = np.unique(colors, axis=0)
assert len(unique_colors) == 1
return to_rgba(unique_colors.squeeze())
# ------------------------------------------------------------------------------
def test_color(self, long_df):
super().test_color(long_df)
ax = plt.figure().subplots()
self.func(data=long_df, x="a", y="y", facecolor="C4", ax=ax)
assert self.get_last_color(ax) == to_rgba("C4")
if LooseVersion(mpl.__version__) >= "3.1.0":
# https://github.com/matplotlib/matplotlib/pull/12851
ax = plt.figure().subplots()
self.func(data=long_df, x="a", y="y", fc="C5", ax=ax)
assert self.get_last_color(ax) == to_rgba("C5")
def test_supplied_color_array(self, long_df):
cmap = mpl.cm.get_cmap("Blues")
norm = mpl.colors.Normalize()
colors = cmap(norm(long_df["y"].to_numpy()))
keys = ["c", "facecolor", "facecolors"]
if LooseVersion(mpl.__version__) >= "3.1.0":
# https://github.com/matplotlib/matplotlib/pull/12851
keys.append("fc")
for key in keys:
ax = plt.figure().subplots()
self.func(x=long_df["y"], **{key: colors})
_draw_figure(ax.figure)
assert_array_equal(ax.collections[0].get_facecolors(), colors)
ax = plt.figure().subplots()
self.func(x=long_df["y"], c=long_df["y"], cmap=cmap)
_draw_figure(ax.figure)
assert_array_equal(ax.collections[0].get_facecolors(), colors)
@pytest.mark.parametrize(
"orient,data_type",
itertools.product(["h", "v"], ["dataframe", "dict"]),
)
def test_wide(self, wide_df, orient, data_type):
if data_type == "dict":
wide_df = {k: v.to_numpy() for k, v in wide_df.items()}
ax = self.func(data=wide_df, orient=orient)
_draw_figure(ax.figure)
palette = color_palette()
cat_idx = 0 if orient == "v" else 1
val_idx = int(not cat_idx)
axis_objs = ax.xaxis, ax.yaxis
cat_axis = axis_objs[cat_idx]
for i, label in enumerate(cat_axis.get_majorticklabels()):
key = label.get_text()
points = ax.collections[i]
point_pos = points.get_offsets().T
val_pos = point_pos[val_idx]
cat_pos = point_pos[cat_idx]
assert_array_equal(cat_pos.round(), i)
assert_array_equal(val_pos, wide_df[key])
for point_color in points.get_facecolors():
assert tuple(point_color) == to_rgba(palette[i])
@pytest.mark.parametrize("orient", ["h", "v"])
def test_flat(self, flat_series, orient):
ax = self.func(data=flat_series, orient=orient)
_draw_figure(ax.figure)
cat_idx = 0 if orient == "v" else 1
val_idx = int(not cat_idx)
axis_objs = ax.xaxis, ax.yaxis
cat_axis = axis_objs[cat_idx]
for i, label in enumerate(cat_axis.get_majorticklabels()):
points = ax.collections[i]
point_pos = points.get_offsets().T
val_pos = point_pos[val_idx]
cat_pos = point_pos[cat_idx]
key = int(label.get_text()) # because fixture has integer index
assert_array_equal(val_pos, flat_series[key])
assert_array_equal(cat_pos, i)
@pytest.mark.parametrize(
"variables,orient",
[
# Order matters for assigning to x/y
({"cat": "a", "val": "y", "hue": None}, None),
({"val": "y", "cat": "a", "hue": None}, None),
({"cat": "a", "val": "y", "hue": "a"}, None),
({"val": "y", "cat": "a", "hue": "a"}, None),
({"cat": "a", "val": "y", "hue": "b"}, None),
({"val": "y", "cat": "a", "hue": "x"}, None),
({"cat": "s", "val": "y", "hue": None}, None),
({"val": "y", "cat": "s", "hue": None}, "h"),
({"cat": "a", "val": "b", "hue": None}, None),
({"val": "a", "cat": "b", "hue": None}, "h"),
({"cat": "a", "val": "t", "hue": None}, None),
({"val": "t", "cat": "a", "hue": None}, None),
({"cat": "d", "val": "y", "hue": None}, None),
({"val": "y", "cat": "d", "hue": None}, None),
({"cat": "a_cat", "val": "y", "hue": None}, None),
({"val": "y", "cat": "s_cat", "hue": None}, None),
],
)
def test_positions(self, long_df, variables, orient):
cat_var = variables["cat"]
val_var = variables["val"]
hue_var = variables["hue"]
var_names = list(variables.values())
x_var, y_var, *_ = var_names
ax = self.func(
data=long_df, x=x_var, y=y_var, hue=hue_var, orient=orient,
)
_draw_figure(ax.figure)
cat_idx = var_names.index(cat_var)
val_idx = var_names.index(val_var)
axis_objs = ax.xaxis, ax.yaxis
cat_axis = axis_objs[cat_idx]
val_axis = axis_objs[val_idx]
cat_data = long_df[cat_var]
cat_levels = categorical_order(cat_data)
for i, label in enumerate(cat_levels):
vals = long_df.loc[cat_data == label, val_var]
points = ax.collections[i].get_offsets().T
cat_pos = points[var_names.index(cat_var)]
val_pos = points[var_names.index(val_var)]
assert_array_equal(val_pos, val_axis.convert_units(vals))
assert_array_equal(cat_pos.round(), i)
assert 0 <= np.ptp(cat_pos) <= .8
label = pd.Index([label]).astype(str)[0]
assert cat_axis.get_majorticklabels()[i].get_text() == label
@pytest.mark.parametrize(
"variables",
[
# Order matters for assigning to x/y
{"cat": "a", "val": "y", "hue": "b"},
{"val": "y", "cat": "a", "hue": "c"},
{"cat": "a", "val": "y", "hue": "f"},
],
)
def test_positions_dodged(self, long_df, variables):
cat_var = variables["cat"]
val_var = variables["val"]
hue_var = variables["hue"]
var_names = list(variables.values())
x_var, y_var, *_ = var_names
ax = self.func(
data=long_df, x=x_var, y=y_var, hue=hue_var, dodge=True,
)
cat_vals = categorical_order(long_df[cat_var])
hue_vals = categorical_order(long_df[hue_var])
n_hue = len(hue_vals)
offsets = np.linspace(0, .8, n_hue + 1)[:-1]
offsets -= offsets.mean()
nest_width = .8 / n_hue
for i, cat_val in enumerate(cat_vals):
for j, hue_val in enumerate(hue_vals):
rows = (long_df[cat_var] == cat_val) & (long_df[hue_var] == hue_val)
vals = long_df.loc[rows, val_var]
points = ax.collections[n_hue * i + j].get_offsets().T
cat_pos = points[var_names.index(cat_var)]
val_pos = points[var_names.index(val_var)]
if pd.api.types.is_datetime64_any_dtype(vals):
vals = mpl.dates.date2num(vals)
assert_array_equal(val_pos, vals)
assert_array_equal(cat_pos.round(), i)
assert_array_equal((cat_pos - (i + offsets[j])).round() / nest_width, 0)
assert 0 <= np.ptp(cat_pos) <= nest_width
@pytest.mark.parametrize("cat_var", ["a", "s", "d"])
def test_positions_unfixed(self, long_df, cat_var):
long_df = long_df.sort_values(cat_var)
kws = dict(size=.001)
if "stripplot" in str(self.func): # can't use __name__ with partial
kws["jitter"] = False
ax = self.func(data=long_df, x=cat_var, y="y", fixed_scale=False, **kws)
for i, (cat_level, cat_data) in enumerate(long_df.groupby(cat_var)):
points = ax.collections[i].get_offsets().T
cat_pos = points[0]
val_pos = points[1]
assert_array_equal(val_pos, cat_data["y"])
comp_level = np.squeeze(ax.xaxis.convert_units(cat_level)).item()
assert_array_equal(cat_pos.round(), comp_level)
@pytest.mark.parametrize(
"x_type,order",
[
(str, None),
(str, ["a", "b", "c"]),
(str, ["c", "a"]),
(str, ["a", "b", "c", "d"]),
(int, None),
(int, [3, 1, 2]),
(int, [3, 1]),
(int, [1, 2, 3, 4]),
(int, ["3", "1", "2"]),
]
)
def test_order(self, x_type, order):
if x_type is str:
x = ["b", "a", "c"]
else:
x = [2, 1, 3]
y = [1, 2, 3]
ax = self.func(x=x, y=y, order=order)
_draw_figure(ax.figure)
if order is None:
order = x
if x_type is int:
order = np.sort(order)
assert len(ax.collections) == len(order)
tick_labels = ax.xaxis.get_majorticklabels()
assert ax.get_xlim()[1] == (len(order) - .5)
for i, points in enumerate(ax.collections):
cat = order[i]
assert tick_labels[i].get_text() == str(cat)
positions = points.get_offsets()
if x_type(cat) in x:
val = y[x.index(x_type(cat))]
assert positions[0, 1] == val
else:
assert not positions.size
@pytest.mark.parametrize("hue_var", ["a", "b"])
def test_hue_categorical(self, long_df, hue_var):
cat_var = "b"
hue_levels = categorical_order(long_df[hue_var])
cat_levels = categorical_order(long_df[cat_var])
pal_name = "muted"
palette = dict(zip(hue_levels, color_palette(pal_name)))
ax = self.func(data=long_df, x=cat_var, y="y", hue=hue_var, palette=pal_name)
for i, level in enumerate(cat_levels):
sub_df = long_df[long_df[cat_var] == level]
point_hues = sub_df[hue_var]
points = ax.collections[i]
point_colors = points.get_facecolors()
assert len(point_hues) == len(point_colors)
for hue, color in zip(point_hues, point_colors):
assert tuple(color) == to_rgba(palette[hue])
@pytest.mark.parametrize("hue_var", ["a", "b"])
def test_hue_dodged(self, long_df, hue_var):
ax = self.func(data=long_df, x="y", y="a", hue=hue_var, dodge=True)
colors = color_palette(n_colors=long_df[hue_var].nunique())
collections = iter(ax.collections)
# Slightly awkward logic to handle challenges of how the artists work.
# e.g. there are empty scatter collections but the because facecolors
# for the empty collections will return the default scatter color
while colors:
points = next(collections)
if points.get_offsets().any():
face_color = tuple(points.get_facecolors()[0])
expected_color = to_rgba(colors.pop(0))
assert face_color == expected_color
@pytest.mark.parametrize(
"val_var,val_col,hue_col",
itertools.product(["x", "y"], ["b", "y", "t"], [None, "a"]),
)
def test_single(self, long_df, val_var, val_col, hue_col):
var_kws = {val_var: val_col, "hue": hue_col}
ax = self.func(data=long_df, **var_kws)
_draw_figure(ax.figure)
axis_vars = ["x", "y"]
val_idx = axis_vars.index(val_var)
cat_idx = int(not val_idx)
cat_var = axis_vars[cat_idx]
cat_axis = getattr(ax, f"{cat_var}axis")
val_axis = getattr(ax, f"{val_var}axis")
points = ax.collections[0]
point_pos = points.get_offsets().T
cat_pos = point_pos[cat_idx]
val_pos = point_pos[val_idx]
assert_array_equal(cat_pos.round(), 0)
assert cat_pos.max() <= .4
assert cat_pos.min() >= -.4
num_vals = val_axis.convert_units(long_df[val_col])
assert_array_equal(val_pos, num_vals)
if hue_col is not None:
palette = dict(zip(
categorical_order(long_df[hue_col]), color_palette()
))
facecolors = points.get_facecolors()
for i, color in enumerate(facecolors):
if hue_col is None:
assert tuple(color) == to_rgba("C0")
else:
hue_level = long_df.loc[i, hue_col]
expected_color = palette[hue_level]
assert tuple(color) == to_rgba(expected_color)
ticklabels = cat_axis.get_majorticklabels()
assert len(ticklabels) == 1
assert not ticklabels[0].get_text()
def test_attributes(self, long_df):
kwargs = dict(
size=2,
linewidth=1,
edgecolor="C2",
)
ax = self.func(x=long_df["y"], **kwargs)
points, = ax.collections
assert points.get_sizes().item() == kwargs["size"] ** 2
assert points.get_linewidths().item() == kwargs["linewidth"]
assert tuple(points.get_edgecolors().squeeze()) == to_rgba(kwargs["edgecolor"])
def test_three_points(self):
x = np.arange(3)
ax = self.func(x=x)
for point_color in ax.collections[0].get_facecolor():
assert tuple(point_color) == to_rgba("C0")
def test_palette_from_color_deprecation(self, long_df):
color = (.9, .4, .5)
hex_color = mpl.colors.to_hex(color)
hue_var = "a"
n_hue = long_df[hue_var].nunique()
palette = color_palette(f"dark:{hex_color}", n_hue)
with pytest.warns(FutureWarning, match="Setting a gradient palette"):
ax = self.func(data=long_df, x="z", hue=hue_var, color=color)
points = ax.collections[0]
for point_color in points.get_facecolors():
assert to_rgb(point_color) in palette
def test_log_scale(self):
x = [1, 10, 100, 1000]
ax = plt.figure().subplots()
ax.set_xscale("log")
self.func(x=x)
vals = ax.collections[0].get_offsets()[:, 0]
assert_array_equal(x, vals)
y = [1, 2, 3, 4]
ax = plt.figure().subplots()
ax.set_xscale("log")
self.func(x=x, y=y, fixed_scale=False)
for i, point in enumerate(ax.collections):
val = point.get_offsets()[0, 0]
assert val == pytest.approx(x[i])
x = y = np.ones(100)
# Following test fails on pinned (but not latest) matplotlib.
# (Even though visual output is ok -- so it's not an actual bug).
# I'm not exactly sure why, so this version check is approximate
# and should be revisited on a version bump.
if LooseVersion(mpl.__version__) < "3.1":
pytest.xfail()
ax = plt.figure().subplots()
ax.set_yscale("log")
self.func(x=x, y=y, orient="h", fixed_scale=False)
cat_points = ax.collections[0].get_offsets().copy()[:, 1]
assert np.ptp(np.log10(cat_points)) <= .8
@pytest.mark.parametrize(
"kwargs",
[
dict(data="wide"),
dict(data="wide", orient="h"),
dict(data="long", x="x", color="C3"),
dict(data="long", y="y", hue="a", jitter=False),
# TODO XXX full numeric hue legend crashes pinned mpl, disabling for now
# dict(data="long", x="a", y="y", hue="z", edgecolor="w", linewidth=.5),
# dict(data="long", x="a_cat", y="y", hue="z"),
dict(data="long", x="y", y="s", hue="c", orient="h", dodge=True),
dict(data="long", x="s", y="y", hue="c", fixed_scale=False),
]
)
def test_vs_catplot(self, long_df, wide_df, kwargs):
kwargs = kwargs.copy()
if kwargs["data"] == "long":
kwargs["data"] = long_df
elif kwargs["data"] == "wide":
kwargs["data"] = wide_df
try:
name = self.func.__name__[:-4]
except AttributeError:
name = self.func.func.__name__[:-4]
if name == "swarm":
kwargs.pop("jitter", None)
np.random.seed(0) # for jitter
ax = self.func(**kwargs)
np.random.seed(0)
g = catplot(**kwargs, kind=name)
assert_plots_equal(ax, g.ax)
class TestStripPlot(SharedScatterTests):
func = staticmethod(stripplot)
def test_jitter_unfixed(self, long_df):
ax1, ax2 = plt.figure().subplots(2)
kws = dict(data=long_df, x="y", orient="h", fixed_scale=False)
np.random.seed(0)
stripplot(**kws, y="s", ax=ax1)
np.random.seed(0)
stripplot(**kws, y=long_df["s"] * 2, ax=ax2)
p1 = ax1.collections[0].get_offsets()[1]
p2 = ax2.collections[0].get_offsets()[1]
assert p2.std() > p1.std()
@pytest.mark.parametrize(
"orient,jitter",
itertools.product(["v", "h"], [True, .1]),
)
def test_jitter(self, long_df, orient, jitter):
cat_var, val_var = "a", "y"
if orient == "v":
x_var, y_var = cat_var, val_var
cat_idx, val_idx = 0, 1
else:
x_var, y_var = val_var, cat_var
cat_idx, val_idx = 1, 0
cat_vals = categorical_order(long_df[cat_var])
ax = stripplot(
data=long_df, x=x_var, y=y_var, jitter=jitter,
)
if jitter is True:
jitter_range = .4
else:
jitter_range = 2 * jitter
for i, level in enumerate(cat_vals):
vals = long_df.loc[long_df[cat_var] == level, val_var]
points = ax.collections[i].get_offsets().T
cat_points = points[cat_idx]
val_points = points[val_idx]
assert_array_equal(val_points, vals)
assert np.std(cat_points) > 0
assert np.ptp(cat_points) <= jitter_range
class TestSwarmPlot(SharedScatterTests):
func = staticmethod(partial(swarmplot, warn_thresh=1))
class TestBarPlotter(CategoricalFixture):
default_kws = dict(
x=None, y=None, hue=None, data=None,
estimator=np.mean, ci=95, n_boot=100, units=None, seed=None,
order=None, hue_order=None,
orient=None, color=None, palette=None,
saturation=.75, errcolor=".26", errwidth=None,
capsize=None, dodge=True
)
def test_nested_width(self):
kws = self.default_kws.copy()
p = cat._BarPlotter(**kws)
p.establish_variables("g", "y", hue="h", data=self.df)
assert p.nested_width == .8 / 2
p = cat._BarPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
assert p.nested_width == .8 / 3
kws["dodge"] = False
p = cat._BarPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
assert p.nested_width == .8
def test_draw_vertical_bars(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
assert len(ax.patches) == len(p.plot_data)
assert len(ax.lines) == len(p.plot_data)
for bar, color in zip(ax.patches, p.colors):
assert bar.get_facecolor()[:-1] == color
positions = np.arange(len(p.plot_data)) - p.width / 2
for bar, pos, stat in zip(ax.patches, positions, p.statistic):
assert bar.get_x() == pos
assert bar.get_width() == p.width
assert bar.get_y() == 0
assert bar.get_height() == stat
def test_draw_horizontal_bars(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", orient="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
assert len(ax.patches) == len(p.plot_data)
assert len(ax.lines) == len(p.plot_data)
for bar, color in zip(ax.patches, p.colors):
assert bar.get_facecolor()[:-1] == color
positions = np.arange(len(p.plot_data)) - p.width / 2
for bar, pos, stat in zip(ax.patches, positions, p.statistic):
assert bar.get_y() == pos
assert bar.get_height() == p.width
assert bar.get_x() == 0
assert bar.get_width() == stat
def test_draw_nested_vertical_bars(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
n_groups, n_hues = len(p.plot_data), len(p.hue_names)
assert len(ax.patches) == n_groups * n_hues
assert len(ax.lines) == n_groups * n_hues
for bar in ax.patches[:n_groups]:
assert bar.get_facecolor()[:-1] == p.colors[0]
for bar in ax.patches[n_groups:]:
assert bar.get_facecolor()[:-1] == p.colors[1]
positions = np.arange(len(p.plot_data))
for bar, pos in zip(ax.patches[:n_groups], positions):
assert bar.get_x() == approx(pos - p.width / 2)
assert bar.get_width() == approx(p.nested_width)
for bar, stat in zip(ax.patches, p.statistic.T.flat):
assert bar.get_y() == approx(0)
assert bar.get_height() == approx(stat)
def test_draw_nested_horizontal_bars(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", hue="h", orient="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
n_groups, n_hues = len(p.plot_data), len(p.hue_names)
assert len(ax.patches) == n_groups * n_hues
assert len(ax.lines) == n_groups * n_hues
for bar in ax.patches[:n_groups]:
assert bar.get_facecolor()[:-1] == p.colors[0]
for bar in ax.patches[n_groups:]:
assert bar.get_facecolor()[:-1] == p.colors[1]
positions = np.arange(len(p.plot_data))
for bar, pos in zip(ax.patches[:n_groups], positions):
assert bar.get_y() == approx(pos - p.width / 2)
assert bar.get_height() == approx(p.nested_width)
for bar, stat in zip(ax.patches, p.statistic.T.flat):
assert bar.get_x() == approx(0)
assert bar.get_width() == approx(stat)
def test_draw_missing_bars(self):
kws = self.default_kws.copy()
order = list("abcd")
kws.update(x="g", y="y", order=order, data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
assert len(ax.patches) == len(order)
assert len(ax.lines) == len(order)
plt.close("all")
hue_order = list("mno")
kws.update(x="g", y="y", hue="h", hue_order=hue_order, data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
assert len(ax.patches) == len(p.plot_data) * len(hue_order)
assert len(ax.lines) == len(p.plot_data) * len(hue_order)
plt.close("all")
def test_unaligned_index(self):
f, (ax1, ax2) = plt.subplots(2)
cat.barplot(x=self.g, y=self.y, ci="sd", ax=ax1)
cat.barplot(x=self.g, y=self.y_perm, ci="sd", ax=ax2)
for l1, l2 in zip(ax1.lines, ax2.lines):
assert approx(l1.get_xydata()) == l2.get_xydata()
for p1, p2 in zip(ax1.patches, ax2.patches):
assert approx(p1.get_xy()) == p2.get_xy()
assert approx(p1.get_height()) == p2.get_height()
assert approx(p1.get_width()) == p2.get_width()
f, (ax1, ax2) = plt.subplots(2)
hue_order = self.h.unique()
cat.barplot(x=self.g, y=self.y, hue=self.h,
hue_order=hue_order, ci="sd", ax=ax1)
cat.barplot(x=self.g, y=self.y_perm, hue=self.h,
hue_order=hue_order, ci="sd", ax=ax2)
for l1, l2 in zip(ax1.lines, ax2.lines):
assert approx(l1.get_xydata()) == l2.get_xydata()
for p1, p2 in zip(ax1.patches, ax2.patches):
assert approx(p1.get_xy()) == p2.get_xy()
assert approx(p1.get_height()) == p2.get_height()
assert approx(p1.get_width()) == p2.get_width()
def test_barplot_colors(self):
# Test unnested palette colors
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df,
saturation=1, palette="muted")
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
palette = palettes.color_palette("muted", len(self.g.unique()))
for patch, pal_color in zip(ax.patches, palette):
assert patch.get_facecolor()[:-1] == pal_color
plt.close("all")
# Test single color
color = (.2, .2, .3, 1)
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df,
saturation=1, color=color)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
for patch in ax.patches:
assert patch.get_facecolor() == color
plt.close("all")
# Test nested palette colors
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df,
saturation=1, palette="Set2")
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
palette = palettes.color_palette("Set2", len(self.h.unique()))
for patch in ax.patches[:len(self.g.unique())]:
assert patch.get_facecolor()[:-1] == palette[0]
for patch in ax.patches[len(self.g.unique()):]:
assert patch.get_facecolor()[:-1] == palette[1]
plt.close("all")
def test_simple_barplots(self):
ax = cat.barplot(x="g", y="y", data=self.df)
assert len(ax.patches) == len(self.g.unique())
assert ax.get_xlabel() == "g"
assert ax.get_ylabel() == "y"
plt.close("all")
ax = cat.barplot(x="y", y="g", orient="h", data=self.df)
assert len(ax.patches) == len(self.g.unique())
assert ax.get_xlabel() == "y"
assert ax.get_ylabel() == "g"
plt.close("all")
ax = cat.barplot(x="g", y="y", hue="h", data=self.df)
assert len(ax.patches) == len(self.g.unique()) * len(self.h.unique())
assert ax.get_xlabel() == "g"
assert ax.get_ylabel() == "y"
plt.close("all")
ax = cat.barplot(x="y", y="g", hue="h", orient="h", data=self.df)
assert len(ax.patches) == len(self.g.unique()) * len(self.h.unique())
assert ax.get_xlabel() == "y"
assert ax.get_ylabel() == "g"
plt.close("all")
class TestPointPlotter(CategoricalFixture):
default_kws = dict(
x=None, y=None, hue=None, data=None,
estimator=np.mean, ci=95, n_boot=100, units=None, seed=None,
order=None, hue_order=None,
markers="o", linestyles="-", dodge=0,
join=True, scale=1,
orient=None, color=None, palette=None,
)
def test_different_defualt_colors(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", data=self.df))
p = cat._PointPlotter(**kws)
color = palettes.color_palette()[0]
npt.assert_array_equal(p.colors, [color, color, color])
def test_hue_offsets(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", hue="h", data=self.df))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [0, 0])
kws.update(dict(dodge=.5))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [-.25, .25])
kws.update(dict(x="h", hue="g", dodge=0))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [0, 0, 0])
kws.update(dict(dodge=.3))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [-.15, 0, .15])
def test_draw_vertical_points(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
assert len(ax.collections) == 1
assert len(ax.lines) == len(p.plot_data) + 1
points = ax.collections[0]
assert len(points.get_offsets()) == len(p.plot_data)
x, y = points.get_offsets().T
npt.assert_array_equal(x, np.arange(len(p.plot_data)))
npt.assert_array_equal(y, p.statistic)
for got_color, want_color in zip(points.get_facecolors(),
p.colors):
npt.assert_array_equal(got_color[:-1], want_color)
def test_draw_horizontal_points(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", orient="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
assert len(ax.collections) == 1
assert len(ax.lines) == len(p.plot_data) + 1
points = ax.collections[0]
assert len(points.get_offsets()) == len(p.plot_data)
x, y = points.get_offsets().T
npt.assert_array_equal(x, p.statistic)
npt.assert_array_equal(y, np.arange(len(p.plot_data)))
for got_color, want_color in zip(points.get_facecolors(),
p.colors):
npt.assert_array_equal(got_color[:-1], want_color)
def test_draw_vertical_nested_points(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
assert len(ax.collections) == 2
assert len(ax.lines) == len(p.plot_data) * len(p.hue_names) + len(p.hue_names)
for points, numbers, color in zip(ax.collections,
p.statistic.T,
p.colors):
assert len(points.get_offsets()) == len(p.plot_data)
x, y = points.get_offsets().T
npt.assert_array_equal(x, np.arange(len(p.plot_data)))
npt.assert_array_equal(y, numbers)
for got_color in points.get_facecolors():
npt.assert_array_equal(got_color[:-1], color)
def test_draw_horizontal_nested_points(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", hue="h", orient="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
assert len(ax.collections) == 2
assert len(ax.lines) == len(p.plot_data) * len(p.hue_names) + len(p.hue_names)
for points, numbers, color in zip(ax.collections,
p.statistic.T,
p.colors):
assert len(points.get_offsets()) == len(p.plot_data)
x, y = points.get_offsets().T
npt.assert_array_equal(x, numbers)
npt.assert_array_equal(y, np.arange(len(p.plot_data)))
for got_color in points.get_facecolors():
npt.assert_array_equal(got_color[:-1], color)
def test_draw_missing_points(self):
kws = self.default_kws.copy()
df = self.df.copy()
kws.update(x="g", y="y", hue="h", hue_order=["x", "y"], data=df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
df.loc[df["h"] == "m", "y"] = np.nan
kws.update(x="g", y="y", hue="h", data=df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
def test_unaligned_index(self):
f, (ax1, ax2) = plt.subplots(2)
cat.pointplot(x=self.g, y=self.y, ci="sd", ax=ax1)
cat.pointplot(x=self.g, y=self.y_perm, ci="sd", ax=ax2)
for l1, l2 in zip(ax1.lines, ax2.lines):
assert approx(l1.get_xydata()) == l2.get_xydata()
for p1, p2 in zip(ax1.collections, ax2.collections):
assert approx(p1.get_offsets()) == p2.get_offsets()
f, (ax1, ax2) = plt.subplots(2)
hue_order = self.h.unique()
cat.pointplot(x=self.g, y=self.y, hue=self.h,
hue_order=hue_order, ci="sd", ax=ax1)
cat.pointplot(x=self.g, y=self.y_perm, hue=self.h,
hue_order=hue_order, ci="sd", ax=ax2)
for l1, l2 in zip(ax1.lines, ax2.lines):
assert approx(l1.get_xydata()) == l2.get_xydata()
for p1, p2 in zip(ax1.collections, ax2.collections):
assert approx(p1.get_offsets()) == p2.get_offsets()
def test_pointplot_colors(self):
# Test a single-color unnested plot
color = (.2, .2, .3, 1)
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df, color=color)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
for line in ax.lines:
assert line.get_color() == color[:-1]
for got_color in ax.collections[0].get_facecolors():
npt.assert_array_equal(rgb2hex(got_color), rgb2hex(color))
plt.close("all")
# Test a multi-color unnested plot
palette = palettes.color_palette("Set1", 3)
kws.update(x="g", y="y", data=self.df, palette="Set1")
p = cat._PointPlotter(**kws)
assert not p.join
f, ax = plt.subplots()
p.draw_points(ax)
for line, pal_color in zip(ax.lines, palette):
npt.assert_array_equal(line.get_color(), pal_color)
for point_color, pal_color in zip(ax.collections[0].get_facecolors(),
palette):
npt.assert_array_equal(rgb2hex(point_color), rgb2hex(pal_color))
plt.close("all")
# Test a multi-colored nested plot
palette = palettes.color_palette("dark", 2)
kws.update(x="g", y="y", hue="h", data=self.df, palette="dark")
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
for line in ax.lines[:(len(p.plot_data) + 1)]:
assert line.get_color() == palette[0]
for line in ax.lines[(len(p.plot_data) + 1):]:
assert line.get_color() == palette[1]
for i, pal_color in enumerate(palette):
for point_color in ax.collections[i].get_facecolors():
npt.assert_array_equal(point_color[:-1], pal_color)
plt.close("all")
def test_simple_pointplots(self):
ax = cat.pointplot(x="g", y="y", data=self.df)
assert len(ax.collections) == 1
assert len(ax.lines) == len(self.g.unique()) + 1
assert ax.get_xlabel() == "g"
assert ax.get_ylabel() == "y"
plt.close("all")
ax = cat.pointplot(x="y", y="g", orient="h", data=self.df)
assert len(ax.collections) == 1
assert len(ax.lines) == len(self.g.unique()) + 1
assert ax.get_xlabel() == "y"
assert ax.get_ylabel() == "g"
plt.close("all")
ax = cat.pointplot(x="g", y="y", hue="h", data=self.df)
assert len(ax.collections) == len(self.h.unique())
assert len(ax.lines) == (
len(self.g.unique()) * len(self.h.unique()) + len(self.h.unique())
)
assert ax.get_xlabel() == "g"
assert ax.get_ylabel() == "y"
plt.close("all")
ax = cat.pointplot(x="y", y="g", hue="h", orient="h", data=self.df)
assert len(ax.collections) == len(self.h.unique())
assert len(ax.lines) == (
len(self.g.unique()) * len(self.h.unique()) + len(self.h.unique())
)
assert ax.get_xlabel() == "y"
assert ax.get_ylabel() == "g"
plt.close("all")
class TestCountPlot(CategoricalFixture):
def test_plot_elements(self):
ax = cat.countplot(x="g", data=self.df)
assert len(ax.patches) == self.g.unique().size
for p in ax.patches:
assert p.get_y() == 0
assert p.get_height() == self.g.size / self.g.unique().size
plt.close("all")
ax = cat.countplot(y="g", data=self.df)
assert len(ax.patches) == self.g.unique().size
for p in ax.patches:
assert p.get_x() == 0
assert p.get_width() == self.g.size / self.g.unique().size
plt.close("all")
ax = cat.countplot(x="g", hue="h", data=self.df)
assert len(ax.patches) == self.g.unique().size * self.h.unique().size
plt.close("all")
ax = cat.countplot(y="g", hue="h", data=self.df)
assert len(ax.patches) == self.g.unique().size * self.h.unique().size
plt.close("all")
def test_input_error(self):
with pytest.raises(ValueError):
cat.countplot(x="g", y="h", data=self.df)
class TestCatPlot(CategoricalFixture):
def test_facet_organization(self):
g = cat.catplot(x="g", y="y", data=self.df)
assert g.axes.shape == (1, 1)
g = cat.catplot(x="g", y="y", col="h", data=self.df)
assert g.axes.shape == (1, 2)
g = cat.catplot(x="g", y="y", row="h", data=self.df)
assert g.axes.shape == (2, 1)
g = cat.catplot(x="g", y="y", col="u", row="h", data=self.df)
assert g.axes.shape == (2, 3)
def test_plot_elements(self):
g = cat.catplot(x="g", y="y", data=self.df, kind="point")
assert len(g.ax.collections) == 1
want_lines = self.g.unique().size + 1
assert len(g.ax.lines) == want_lines
g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="point")
want_collections = self.h.unique().size
assert len(g.ax.collections) == want_collections
want_lines = (self.g.unique().size + 1) * self.h.unique().size
assert len(g.ax.lines) == want_lines
g = cat.catplot(x="g", y="y", data=self.df, kind="bar")
want_elements = self.g.unique().size
assert len(g.ax.patches) == want_elements
assert len(g.ax.lines) == want_elements
g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="bar")
want_elements = self.g.unique().size * self.h.unique().size
assert len(g.ax.patches) == want_elements
assert len(g.ax.lines) == want_elements
g = cat.catplot(x="g", data=self.df, kind="count")
want_elements = self.g.unique().size
assert len(g.ax.patches) == want_elements
assert len(g.ax.lines) == 0
g = cat.catplot(x="g", hue="h", data=self.df, kind="count")
want_elements = self.g.unique().size * self.h.unique().size
assert len(g.ax.patches) == want_elements
assert len(g.ax.lines) == 0
g = cat.catplot(x="g", y="y", data=self.df, kind="box")
want_artists = self.g.unique().size
assert len(g.ax.artists) == want_artists
g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="box")
want_artists = self.g.unique().size * self.h.unique().size
assert len(g.ax.artists) == want_artists
g = cat.catplot(x="g", y="y", data=self.df,
kind="violin", inner=None)
want_elements = self.g.unique().size
assert len(g.ax.collections) == want_elements
g = cat.catplot(x="g", y="y", hue="h", data=self.df,
kind="violin", inner=None)
want_elements = self.g.unique().size * self.h.unique().size
assert len(g.ax.collections) == want_elements
g = cat.catplot(x="g", y="y", data=self.df, kind="strip")
want_elements = self.g.unique().size
assert len(g.ax.collections) == want_elements
g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="strip")
want_elements = self.g.unique().size + self.h.unique().size
assert len(g.ax.collections) == want_elements
def test_bad_plot_kind_error(self):
with pytest.raises(ValueError):
cat.catplot(x="g", y="y", data=self.df, kind="not_a_kind")
def test_count_x_and_y(self):
with pytest.raises(ValueError):
cat.catplot(x="g", y="y", data=self.df, kind="count")
def test_plot_colors(self):
ax = cat.barplot(x="g", y="y", data=self.df)
g = cat.catplot(x="g", y="y", data=self.df, kind="bar")
for p1, p2 in zip(ax.patches, g.ax.patches):
assert p1.get_facecolor() == p2.get_facecolor()
plt.close("all")
ax = cat.barplot(x="g", y="y", data=self.df, color="purple")
g = cat.catplot(x="g", y="y", data=self.df,
kind="bar", color="purple")
for p1, p2 in zip(ax.patches, g.ax.patches):
assert p1.get_facecolor() == p2.get_facecolor()
plt.close("all")
ax = cat.barplot(x="g", y="y", data=self.df, palette="Set2")
g = cat.catplot(x="g", y="y", data=self.df,
kind="bar", palette="Set2")
for p1, p2 in zip(ax.patches, g.ax.patches):
assert p1.get_facecolor() == p2.get_facecolor()
plt.close("all")
ax = cat.pointplot(x="g", y="y", data=self.df)
g = cat.catplot(x="g", y="y", data=self.df)
for l1, l2 in zip(ax.lines, g.ax.lines):
assert l1.get_color() == l2.get_color()
plt.close("all")
ax = cat.pointplot(x="g", y="y", data=self.df, color="purple")
g = cat.catplot(x="g", y="y", data=self.df, color="purple")
for l1, l2 in zip(ax.lines, g.ax.lines):
assert l1.get_color() == l2.get_color()
plt.close("all")
ax = cat.pointplot(x="g", y="y", data=self.df, palette="Set2")
g = cat.catplot(x="g", y="y", data=self.df, palette="Set2")
for l1, l2 in zip(ax.lines, g.ax.lines):
assert l1.get_color() == l2.get_color()
plt.close("all")
def test_ax_kwarg_removal(self):
f, ax = plt.subplots()
with pytest.warns(UserWarning, match="catplot is a figure-level"):
g = cat.catplot(x="g", y="y", data=self.df, ax=ax)
assert len(ax.collections) == 0
assert len(g.ax.collections) > 0
def test_factorplot(self):
with pytest.warns(UserWarning):
g = cat.factorplot(x="g", y="y", data=self.df)
assert len(g.ax.collections) == 1
want_lines = self.g.unique().size + 1
assert len(g.ax.lines) == want_lines
def test_share_xy(self):
# Test default behavior works
g = cat.catplot(x="g", y="y", col="g", data=self.df, sharex=True)
for ax in g.axes.flat:
assert len(ax.collections) == len(self.df.g.unique())
g = cat.catplot(x="y", y="g", col="g", data=self.df, sharey=True)
for ax in g.axes.flat:
assert len(ax.collections) == len(self.df.g.unique())
# Test unsharing workscol
with pytest.warns(UserWarning):
g = cat.catplot(
x="g", y="y", col="g", data=self.df, sharex=False, kind="bar",
)
for ax in g.axes.flat:
assert len(ax.patches) == 1
with pytest.warns(UserWarning):
g = cat.catplot(
x="y", y="g", col="g", data=self.df, sharey=False, kind="bar",
)
for ax in g.axes.flat:
assert len(ax.patches) == 1
# Make sure no warning is raised if color is provided on unshared plot
with pytest.warns(None) as record:
g = cat.catplot(
x="g", y="y", col="g", data=self.df, sharex=False, color="b"
)
assert not len(record)
for ax in g.axes.flat:
assert ax.get_xlim() == (-.5, .5)
with pytest.warns(None) as record:
g = cat.catplot(
x="y", y="g", col="g", data=self.df, sharey=False, color="r"
)
assert not len(record)
for ax in g.axes.flat:
assert ax.get_ylim() == (.5, -.5)
# Make sure order is used if given, regardless of sharex value
order = self.df.g.unique()
g = cat.catplot(x="g", y="y", col="g", data=self.df, sharex=False, order=order)
for ax in g.axes.flat:
assert len(ax.collections) == len(self.df.g.unique())
g = cat.catplot(x="y", y="g", col="g", data=self.df, sharey=False, order=order)
for ax in g.axes.flat:
assert len(ax.collections) == len(self.df.g.unique())
@pytest.mark.parametrize("var", ["col", "row"])
def test_array_faceter(self, long_df, var):
g1 = catplot(data=long_df, x="y", **{var: "a"})
g2 = catplot(data=long_df, x="y", **{var: long_df["a"].to_numpy()})
for ax1, ax2 in zip(g1.axes.flat, g2.axes.flat):
assert_plots_equal(ax1, ax2)
class TestBoxenPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
orient=None, color=None, palette=None,
saturation=.75, width=.8, dodge=True,
k_depth='tukey', linewidth=None,
scale='exponential', outlier_prop=0.007,
trust_alpha=0.05, showfliers=True)
def ispatch(self, c):
return isinstance(c, mpl.collections.PatchCollection)
def ispath(self, c):
return isinstance(c, mpl.collections.PathCollection)
def edge_calc(self, n, data):
q = np.asanyarray([0.5 ** n, 1 - 0.5 ** n]) * 100
q = list(np.unique(q))
return np.percentile(data, q)
def test_box_ends_finite(self):
p = cat._LVPlotter(**self.default_kws)
p.establish_variables("g", "y", data=self.df)
box_ends = []
k_vals = []
for s in p.plot_data:
b, k = p._lv_box_ends(s)
box_ends.append(b)
k_vals.append(k)
# Check that all the box ends are finite and are within
# the bounds of the data
b_e = map(lambda a: np.all(np.isfinite(a)), box_ends)
assert np.sum(list(b_e)) == len(box_ends)
def within(t):
a, d = t
return ((np.ravel(a) <= d.max())
& (np.ravel(a) >= d.min())).all()
b_w = map(within, zip(box_ends, p.plot_data))
assert np.sum(list(b_w)) == len(box_ends)
k_f = map(lambda k: (k > 0.) & np.isfinite(k), k_vals)
assert np.sum(list(k_f)) == len(k_vals)
def test_box_ends_correct_tukey(self):
n = 100
linear_data = np.arange(n)
expected_k = max(int(np.log2(n)) - 3, 1)
expected_edges = [self.edge_calc(i, linear_data)
for i in range(expected_k + 1, 1, -1)]
p = cat._LVPlotter(**self.default_kws)
calc_edges, calc_k = p._lv_box_ends(linear_data)
npt.assert_array_equal(expected_edges, calc_edges)
assert expected_k == calc_k
def test_box_ends_correct_proportion(self):
n = 100
linear_data = np.arange(n)
expected_k = int(np.log2(n)) - int(np.log2(n * 0.007)) + 1
expected_edges = [self.edge_calc(i, linear_data)
for i in range(expected_k + 1, 1, -1)]
kws = self.default_kws.copy()
kws["k_depth"] = "proportion"
p = cat._LVPlotter(**kws)
calc_edges, calc_k = p._lv_box_ends(linear_data)
npt.assert_array_equal(expected_edges, calc_edges)
assert expected_k == calc_k
@pytest.mark.parametrize(
"n,exp_k",
[(491, 6), (492, 7), (983, 7), (984, 8), (1966, 8), (1967, 9)],
)
def test_box_ends_correct_trustworthy(self, n, exp_k):
linear_data = np.arange(n)
kws = self.default_kws.copy()
kws["k_depth"] = "trustworthy"
p = cat._LVPlotter(**kws)
_, calc_k = p._lv_box_ends(linear_data)
assert exp_k == calc_k
def test_outliers(self):
n = 100
outlier_data = np.append(np.arange(n - 1), 2 * n)
expected_k = max(int(np.log2(n)) - 3, 1)
expected_edges = [self.edge_calc(i, outlier_data)
for i in range(expected_k + 1, 1, -1)]
p = cat._LVPlotter(**self.default_kws)
calc_edges, calc_k = p._lv_box_ends(outlier_data)
npt.assert_array_equal(calc_edges, expected_edges)
assert calc_k == expected_k
out_calc = p._lv_outliers(outlier_data, calc_k)
out_exp = p._lv_outliers(outlier_data, expected_k)
npt.assert_equal(out_calc, out_exp)
def test_showfliers(self):
ax = cat.boxenplot(x="g", y="y", data=self.df, k_depth="proportion",
showfliers=True)
ax_collections = list(filter(self.ispath, ax.collections))
for c in ax_collections:
assert len(c.get_offsets()) == 2
# Test that all data points are in the plot
assert ax.get_ylim()[0] < self.df["y"].min()
assert ax.get_ylim()[1] > self.df["y"].max()
plt.close("all")
ax = cat.boxenplot(x="g", y="y", data=self.df, showfliers=False)
assert len(list(filter(self.ispath, ax.collections))) == 0
plt.close("all")
def test_invalid_depths(self):
kws = self.default_kws.copy()
# Make sure illegal depth raises
kws["k_depth"] = "nosuchdepth"
with pytest.raises(ValueError):
cat._LVPlotter(**kws)
# Make sure illegal outlier_prop raises
kws["k_depth"] = "proportion"
for p in (-13, 37):
kws["outlier_prop"] = p
with pytest.raises(ValueError):
cat._LVPlotter(**kws)
kws["k_depth"] = "trustworthy"
for alpha in (-13, 37):
kws["trust_alpha"] = alpha
with pytest.raises(ValueError):
cat._LVPlotter(**kws)
@pytest.mark.parametrize("power", [1, 3, 7, 11, 13, 17])
def test_valid_depths(self, power):
x = np.random.standard_t(10, 2 ** power)
valid_depths = ["proportion", "tukey", "trustworthy", "full"]
kws = self.default_kws.copy()
for depth in valid_depths + [4]:
kws["k_depth"] = depth
box_ends, k = cat._LVPlotter(**kws)._lv_box_ends(x)
if depth == "full":
assert k == int(np.log2(len(x))) + 1
def test_valid_scales(self):
valid_scales = ["linear", "exponential", "area"]
kws = self.default_kws.copy()
for scale in valid_scales + ["unknown_scale"]:
kws["scale"] = scale
if scale not in valid_scales:
with pytest.raises(ValueError):
cat._LVPlotter(**kws)
else:
cat._LVPlotter(**kws)
def test_hue_offsets(self):
p = cat._LVPlotter(**self.default_kws)
p.establish_variables("g", "y", hue="h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.2, .2])
kws = self.default_kws.copy()
kws["width"] = .6
p = cat._LVPlotter(**kws)
p.establish_variables("g", "y", hue="h", data=self.df)
|
npt.assert_array_equal(p.hue_offsets, [-.15, .15])
|
numpy.testing.assert_array_equal
|
#!/usr/bin/env python
"""
Utilities for manipulating coordinates or list of coordinates, under periodic
boundary conditions or otherwise. Many of these are heavily vectorized in
numpy for performance.
"""
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Nov 27, 2011"
import numpy as np
import math
from monty.dev import deprecated
from pymatgen.core.lattice import Lattice
def find_in_coord_list(coord_list, coord, atol=1e-8):
"""
Find the indices of matches of a particular coord in a coord_list.
Args:
coord_list: List of coords to test
coord: Specific coordinates
atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and
array.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
"""
if len(coord_list) == 0:
return []
diff = np.array(coord_list) - np.array(coord)[None, :]
return np.where(np.all(np.abs(diff) < atol, axis=1))[0]
def in_coord_list(coord_list, coord, atol=1e-8):
"""
Tests if a particular coord is within a coord_list.
Args:
coord_list: List of coords to test
coord: Specific coordinates
atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and
array.
Returns:
True if coord is in the coord list.
"""
return len(find_in_coord_list(coord_list, coord, atol=atol)) > 0
def is_coord_subset(subset, superset, atol=1e-8):
"""
Tests if all coords in subset are contained in superset.
Doesn't use periodic boundary conditions
Args:
subset, superset: List of coords
Returns:
True if all of subset is in superset.
"""
c1 = np.array(subset)
c2 = np.array(superset)
is_close = np.all(np.abs(c1[:, None, :] - c2[None, :, :]) < atol, axis=-1)
any_close = np.any(is_close, axis=-1)
return np.all(any_close)
def coord_list_mapping(subset, superset):
"""
Gives the index mapping from a subset to a superset.
Subset and superset cannot contain duplicate rows
Args:
subset, superset: List of coords
Returns:
list of indices such that superset[indices] = subset
"""
c1 = np.array(subset)
c2 = np.array(superset)
inds = np.where(np.all(np.isclose(c1[:, None, :], c2[None, :, :]),
axis=2))[1]
result = c2[inds]
if not np.allclose(c1, result):
if not is_coord_subset(subset, superset):
raise ValueError("subset is not a subset of superset")
if not result.shape == c1.shape:
raise ValueError("Something wrong with the inputs, likely duplicates "
"in superset")
return inds
def coord_list_mapping_pbc(subset, superset, atol=1e-8):
"""
Gives the index mapping from a subset to a superset.
Subset and superset cannot contain duplicate rows
Args:
subset, superset: List of frac_coords
Returns:
list of indices such that superset[indices] = subset
"""
c1 = np.array(subset)
c2 = np.array(superset)
diff = c1[:, None, :] - c2[None, :, :]
diff -= np.round(diff)
inds = np.where(np.all(np.abs(diff) < atol, axis = 2))[1]
#verify result (its easier to check validity of the result than
#the validity of inputs)
test = c2[inds] - c1
test -= np.round(test)
if not np.allclose(test, 0):
if not is_coord_subset_pbc(subset, superset):
raise ValueError("subset is not a subset of superset")
if not test.shape == c1.shape:
raise ValueError("Something wrong with the inputs, likely duplicates "
"in superset")
return inds
def get_linear_interpolated_value(x_values, y_values, x):
"""
Returns an interpolated value by linear interpolation between two values.
This method is written to avoid dependency on scipy, which causes issues on
threading servers.
Args:
x_values: Sequence of x values.
y_values: Corresponding sequence of y values
x: Get value at particular x
Returns:
Value at x.
"""
a = np.array(sorted(zip(x_values, y_values), key=lambda d: d[0]))
ind = np.where(a[:, 0] >= x)[0]
if len(ind) == 0 or ind[0] == 0:
raise ValueError("x is out of range of provided x_values")
i = ind[0]
x1, x2 = a[i - 1][0], a[i][0]
y1, y2 = a[i - 1][1], a[i][1]
return y1 + (y2 - y1) / (x2 - x1) * (x - x1)
def all_distances(coords1, coords2):
"""
Returns the distances between two lists of coordinates
Args:
coords1: First set of cartesian coordinates.
coords2: Second set of cartesian coordinates.
Returns:
2d array of cartesian distances. E.g the distance between
coords1[i] and coords2[j] is distances[i,j]
"""
c1 = np.array(coords1)
c2 = np.array(coords2)
z = (c1[:, None, :] - c2[None, :, :]) ** 2
return np.sum(z, axis=-1) ** 0.5
def pbc_diff(fcoords1, fcoords2):
"""
Returns the 'fractional distance' between two coordinates taking into
account periodic boundary conditions.
Args:
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6,
0.7] or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
Returns:
Fractional distance. Each coordinate must have the property that
abs(a) <= 0.5. Examples:
pbc_diff([0.1, 0.1, 0.1], [0.3, 0.5, 0.9]) = [-0.2, -0.4, 0.2]
pbc_diff([0.9, 0.1, 1.01], [0.3, 0.5, 0.9]) = [-0.4, -0.4, 0.11]
"""
fdist = np.subtract(fcoords1, fcoords2)
return fdist - np.round(fdist)
@deprecated(Lattice.get_all_distances)
def pbc_all_distances(lattice, fcoords1, fcoords2):
"""
Returns the distances between two lists of coordinates taking into
account periodic boundary conditions and the lattice. Note that this
computes an MxN array of distances (i.e. the distance between each
point in fcoords1 and every coordinate in fcoords2). This is
different functionality from pbc_diff.
Args:
lattice: lattice to use
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6,
0.7] or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
Returns:
2d array of cartesian distances. E.g the distance between
fcoords1[i] and fcoords2[j] is distances[i,j]
"""
return lattice.get_all_distances(fcoords1, fcoords2)
def pbc_shortest_vectors(lattice, fcoords1, fcoords2):
"""
Returns the shortest vectors between two lists of coordinates taking into
account periodic boundary conditions and the lattice.
Args:
lattice: lattice to use
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6, 0.7]
or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
Returns:
array of displacement vectors from fcoords1 to fcoords2
first index is fcoords1 index, second is fcoords2 index
"""
#ensure correct shape
fcoords1, fcoords2 = np.atleast_2d(fcoords1, fcoords2)
#ensure that all points are in the unit cell
fcoords1 = np.mod(fcoords1, 1)
fcoords2 = np.mod(fcoords2, 1)
#create images, 2d array of all length 3 combinations of [-1,0,1]
r = np.arange(-1, 2)
arange = r[:, None] * np.array([1, 0, 0])[None, :]
brange = r[:, None] * np.array([0, 1, 0])[None, :]
crange = r[:, None] * np.array([0, 0, 1])[None, :]
images = arange[:, None, None] + brange[None, :, None] + \
crange[None, None, :]
images = images.reshape((27, 3))
#create images of f2
shifted_f2 = fcoords2[:, None, :] + images[None, :, :]
cart_f1 = lattice.get_cartesian_coords(fcoords1)
cart_f2 = lattice.get_cartesian_coords(shifted_f2)
#all vectors from f1 to f2
vectors = cart_f2[None, :, :, :] - cart_f1[:, None, None, :]
d_2 =
|
np.sum(vectors ** 2, axis=3)
|
numpy.sum
|
# Import GUI specific items
from PyQt5 import QtGui, QtWidgets, QtCore
from PyQt5.QtWidgets import QGraphicsView
import sys
import traceback
import os
import numpy as np
import cv2
from qimage2ndarray import array2qimage
from lib.tkmask import generate_tk_defects_layer
from lib.annotmask import get_sqround_mask # New mask generation facility (original mask needed)
# Specific UI features
from PyQt5.QtWidgets import QSplashScreen, QMessageBox, QGraphicsScene, QFileDialog, QTableWidgetItem
from PyQt5.QtGui import QPixmap, QImage, QColor, QIcon
from PyQt5.QtCore import Qt, QRectF, QSize
from ui import datmant_ui, color_specs_ui
import configparser
import time
import datetime
import subprocess
import pandas as pd
# Overall constants
PUBLISHER = "AlphaControlLab"
APP_TITLE = "DATM Annotation Tool"
APP_VERSION = "1.00.2-beta"
# Some configs
BRUSH_DIAMETER_MIN = 40
BRUSH_DIAMETER_MAX = 100
BRUSH_DIAMETER_DEFAULT = 40
# Colors
MARK_COLOR_MASK = QColor(255,0,0,99)
MARK_COLOR_DEFECT_DEFAULT = QColor(0, 0, 255, 99)
HELPER_COLOR = QColor(0,0,0,99)
# Some paths
COLOR_DEF_PATH = "defs/color_defs.csv"
# Color definitions window
class DATMantGUIColorSpec(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(DATMantGUIColorSpec, self).__init__(parent)
self.ui = color_specs_ui.Ui_ColorSpecsUI()
self.ui.setupUi(self)
# Main UI class with all methods
class DATMantGUI(QtWidgets.QMainWindow, datmant_ui.Ui_DATMantMainWindow):
# Applications states in status bar
APP_STATUS_STATES = {"ready": "Ready.",
"loading": "Loading image...",
"exporting_layers": "Exporting layers...",
"no_images": "No images or unexpected folder structure."}
# Annotation modes
ANNOTATION_MODE_MARKING_DEFECTS = 0
ANNOTATION_MODE_MARKING_MASK = 1
ANNOTATION_MODES_BUTTON_TEXT = {ANNOTATION_MODE_MARKING_DEFECTS: "Mode [Marking defects]",
ANNOTATION_MODE_MARKING_MASK: "Mode [Marking mask]"}
ANNOTATION_MODES_BUTTON_COLORS = {ANNOTATION_MODE_MARKING_DEFECTS: "blue",
ANNOTATION_MODE_MARKING_MASK: "red"}
# Mask file extension. If it changes in the future, it is easier to swap it here
MASK_FILE_EXTENSION_PATTERN = ".mask.png"
# Config file
config_path = None # Path to config file
config_data = None # The actual configuration
CONFIG_NAME = "datmant_config.ini" # Name of the config file
has_image = None
img_shape = None
# Flag which tells whether images were found in CWD
dir_has_images = False
# Drawing mode
annotation_mode = ANNOTATION_MODE_MARKING_DEFECTS
# Annotator
annotator = None
# Brush
brush = None
brush_diameter = BRUSH_DIAMETER_DEFAULT
# Color definitions
cspec = None
# For TK
tk_colors = None
current_paint = None # Paint of the brush
# Color conversion dicts
d_rgb2gray = None
d_gray2rgb = None
# Immutable items
current_image = None # Original image
current_mask = None # Original mask
current_helper = None # Helper mask
current_tk = None # Defects mareked by TK
# User-updatable items
current_defects = None # Defects mask
current_updated_mask = None # Updated mask
# Image name
current_img = None
current_img_as_listed = None
# Internal vars
initializing = False
app = None
def __init__(self, parent=None):
self.initializing = True
# Setting up the base UI
super(DATMantGUI, self).__init__(parent)
self.setupUi(self)
from ui_lib.QtImageAnnotator import QtImageAnnotator
self.annotator = QtImageAnnotator()
# Need to synchronize brush sizes with the annotator
self.annotator.MIN_BRUSH_DIAMETER = BRUSH_DIAMETER_MIN
self.annotator.MAX_BRUSH_DIAMETER = BRUSH_DIAMETER_MAX
self.annotator.brush_diameter = BRUSH_DIAMETER_DEFAULT
self.figThinFigure.addWidget(self.annotator)
# Config file storage: config file stored in user directory
self.config_path = self.fix_path(os.path.expanduser("~")) + "." + PUBLISHER + os.sep
# Get color specifications and populate the corresponding combobox
self.read_defect_color_defs()
self.add_colors_to_list()
# Assign necessary dicts in the annotator component
if self.d_rgb2gray is not None and self.d_gray2rgb is not None:
self.annotator.d_rgb2gray = self.d_rgb2gray
self.annotator.d_gray2rgb = self.d_gray2rgb
else:
raise RuntimeError("Failed to load the color conversion schemes. Annotations cannot be saved.")
# Set up second window
self.color_ui = DATMantGUIColorSpec(self)
# Update button states
self.update_button_states()
# Initialize everything
self.initialize_brush_slider()
# Log this anyway
self.log("Application started")
# Style the mode button properly
self.annotation_mode_default()
# Initialization completed
self.initializing = False
# Set up the status bar
self.status_bar_message("ready")
# Set up those UI elements that depend on config
def UI_config(self):
# Check whether log should be shown or not
self.check_show_log()
# TODO: TEMP: For buttons, use .clicked.connect(self.*), for menu actions .triggered.connect(self.*),
# TODO: TEMP: for checkboxes use .stateChanged, and for spinners .valueChanged
self.actionLog.triggered.connect(self.update_show_log)
self.actionColor_definitions.triggered.connect(self.open_color_definition_help)
self.actionProcess_original_mask.triggered.connect(self.process_mask)
self.actionSave_current_annotations.triggered.connect(self.save_masks)
# Reload AI-generated mask, if present in the directory
self.actionAIMask.triggered.connect(self.load_AI_mask)
# Button assignment
self.annotator.mouseWheelRotated.connect(self.accept_brush_diameter_change)
self.btnClear.clicked.connect(self.clear_all_annotations)
self.btnBrowseImageDir.clicked.connect(self.browse_image_directory)
self.btnBrowseShp.clicked.connect(self.browse_shp_dir)
self.btnPrev.clicked.connect(self.load_prev_image)
self.btnNext.clicked.connect(self.load_next_image)
self.btnMode.clicked.connect(self.annotation_mode_switch)
# Selecting new image from list
# NB! We depend on this firing on index change, so we remove manual load_image elsewhere
self.connect_image_load_on_list_index_change(True)
# Try to load an image now that everything is initialized
self.load_image()
def open_color_definition_help(self):
if not self.cspec:
self.log("Cannot show color specifications as none are loaded")
return
# Assuming colors specs were updated, set up the table
t = self.color_ui.ui.tabColorSpecs
t.setRowCount(len(self.cspec))
t.setColumnCount(3)
t.setColumnWidth(0, 150)
t.setColumnWidth(1, 150)
t.setColumnWidth(2, 150)
t.setHorizontalHeaderLabels(["Colors (TK)", "Colors (DATM)", "Grayscale mask mapping"])
# Go through the color specifications and set them appropriately
row = 0
for col in self.cspec:
tk = col["COLOR_HEXRGB_TK"]
nus = col["COLOR_NAME_EN"]
net = col["COLOR_NAME_ET"]
dt = col["COLOR_HEXRGB_DATMANT"]
gr = col["COLOR_GSCALE_MAPPING"]
# Text
t.setItem(row, 0, QTableWidgetItem(net))
t.setItem(row, 1, QTableWidgetItem(nus))
t.setItem(row, 2, QTableWidgetItem(str(gr)))
# Background and foreground
t.item(row, 0).setBackground(QColor(tk))
t.item(row, 0).setForeground(self.get_best_fg_for_bg(QColor(tk)))
t.item(row, 1).setBackground(QColor(dt))
t.item(row, 1).setForeground(self.get_best_fg_for_bg(QColor(dt)))
t.item(row, 2).setBackground(QColor(gr, gr, gr))
t.item(row, 2).setForeground(self.get_best_fg_for_bg(QColor(gr, gr, gr)))
row += 1
self.color_ui.show()
def connect_image_load_on_list_index_change(self, state):
if state:
self.lstImages.currentIndexChanged.connect(self.load_image)
else:
self.lstImages.disconnect()
def initialize_brush_slider(self):
self.sldBrushDiameter.setMinimum(BRUSH_DIAMETER_MIN)
self.sldBrushDiameter.setMaximum(BRUSH_DIAMETER_MAX)
self.sldBrushDiameter.setValue(BRUSH_DIAMETER_DEFAULT)
self.sldBrushDiameter.valueChanged.connect(self.brush_slider_update)
self.brush_slider_update()
def brush_slider_update(self):
new_diameter = self.sldBrushDiameter.value()
self.txtBrushDiameter.setText(str(new_diameter))
self.brush_diameter = new_diameter
self.update_annotator()
def accept_brush_diameter_change(self, change):
# Need to disconnect slider while changing value
self.sldBrushDiameter.valueChanged.disconnect()
new_diameter = int(self.sldBrushDiameter.value()+change)
new_diameter = BRUSH_DIAMETER_MIN if new_diameter < BRUSH_DIAMETER_MIN else new_diameter
new_diameter = BRUSH_DIAMETER_MAX if new_diameter > BRUSH_DIAMETER_MAX else new_diameter
self.sldBrushDiameter.setValue(new_diameter)
self.txtBrushDiameter.setText(str(new_diameter))
# Reconnect to slider move interrupt
self.sldBrushDiameter.valueChanged.connect(self.brush_slider_update)
# Clear currently used paint completely
def clear_all_annotations(self):
img_new = np.zeros(self.img_shape, dtype=np.uint8)
if self.annotation_mode is self.ANNOTATION_MODE_MARKING_DEFECTS:
self.current_defects = img_new
elif self.annotation_mode is self.ANNOTATION_MODE_MARKING_MASK:
self.current_updated_mask = 255-img_new
self.update_annotator_view()
self.annotator.setFocus()
def update_annotator(self):
if self.annotator is not None:
self.annotator.brush_diameter = self.brush_diameter
self.annotator.update_brush_diameter(0)
self.annotator.brush_fill_color = self.current_paint
def update_mask_from_current_mode(self):
the_mask = self.get_updated_mask()
if self.annotation_mode is self.ANNOTATION_MODE_MARKING_DEFECTS:
self.current_defects = the_mask
else:
self.current_updated_mask = the_mask
# Change annotation mode
def annotation_mode_switch(self):
# Save the mask
self.update_mask_from_current_mode()
# Update the UI
self.annotation_mode += 1
if self.annotation_mode > 1:
self.annotation_mode = 0
self.current_paint = [MARK_COLOR_DEFECT_DEFAULT, MARK_COLOR_MASK][self.annotation_mode]
if self.annotation_mode == self.ANNOTATION_MODE_MARKING_DEFECTS: # TODO: this should be optimized
self.change_brush_color()
self.lstDefectsAndColors.setEnabled(True)
else:
self.lstDefectsAndColors.setEnabled(False)
self.update_annotator()
self.btnMode.setText(self.ANNOTATION_MODES_BUTTON_TEXT[self.annotation_mode])
self.btnMode.setStyleSheet("QPushButton {font-weight: bold; color: "
+ self.ANNOTATION_MODES_BUTTON_COLORS[self.annotation_mode] + "}")
# Update the view
self.update_annotator_view()
self.annotator.setFocus()
# Set default annotation mode
def annotation_mode_default(self):
self.annotation_mode = self.ANNOTATION_MODE_MARKING_DEFECTS
self.current_paint = [MARK_COLOR_DEFECT_DEFAULT, MARK_COLOR_MASK][self.annotation_mode]
if self.annotation_mode == self.ANNOTATION_MODE_MARKING_DEFECTS:
self.change_brush_color()
self.lstDefectsAndColors.setEnabled(True)
else:
self.lstDefectsAndColors.setEnabled(False)
self.update_annotator()
self.btnMode.setText(self.ANNOTATION_MODES_BUTTON_TEXT[self.annotation_mode])
self.btnMode.setStyleSheet("QPushButton {font-weight: bold; color: "
+ self.ANNOTATION_MODES_BUTTON_COLORS[self.annotation_mode] + "}")
# Helper for QMessageBox
def show_info_box(self, title, text, box_icon=QMessageBox.Information):
msg = QMessageBox()
msg.setIcon(box_icon)
msg.setText(text)
msg.setWindowTitle(title)
msg.setModal(True)
msg.setStandardButtons(QMessageBox.Ok)
msg.exec()
# Get both masks as separate numpy arrays
def get_updated_mask(self):
if self.annotator._overlayHandle is not None:
# Depending on the mode, fill the mask appropriately
# Marking defects
if self.annotation_mode is self.ANNOTATION_MODE_MARKING_DEFECTS:
self.status_bar_message("exporting_layers")
self.log("Exporting color layers...")
the_new_mask = self.annotator.export_rgb2gray_mask() # Easy, as this is implemented in annotator
self.status_bar_message("ready")
# Or updating the road edge mask
else:
mask = self.annotator.export_ndarray_noalpha()
the_new_mask = 255 * np.ones(self.img_shape, dtype=np.uint8)
# NB! This approach beats np.where: it is 4.3 times faster!
reds, greens, blues = mask[:, :, 0], mask[:, :, 1], mask[:, :, 2]
# Set the mask according to the painted road mask
m1 = list(MARK_COLOR_MASK.getRgb())[:-1]
the_new_mask[(reds == m1[0]) & (greens == m1[1]) & (blues == m1[2])] = 0
return the_new_mask
def update_annotator_view(self):
# If there is no image, there's nothing to clear
if self.current_image is None:
return
if self.annotation_mode is self.ANNOTATION_MODE_MARKING_DEFECTS:
h, w = self.current_image.rect().height(), self.current_image.rect().width()
helper = np.zeros((h,w,4), dtype=np.uint8)
helper[self.current_helper == 0] = list(HELPER_COLOR.getRgb())
self.annotator.clearAndSetImageAndMask(self.current_image,
self.current_defects,
array2qimage(helper),
aux_helper=(array2qimage(self.current_tk) if self.current_tk is not None else None),
process_gray2rgb=True,
direct_mask_paint=True)
else:
# Remember, the mask must be inverted here, but saved properly
h, w = self.current_image.rect().height(), self.current_image.rect().width()
mask = 255 *
|
np.zeros((h, w, 4), dtype=np.uint8)
|
numpy.zeros
|
"""
color.py
-------------
Hold and deal with visual information about meshes.
There are lots of ways to encode visual information, and the goal of this
architecture is to make it possible to define one, and then transparently
get the others. The two general categories are:
1) colors, defined for a face, vertex, or material
2) textures, defined as an image and UV coordinates for each vertex
This module only implements diffuse colors at the moment.
Goals
----------
1) If nothing is defined sane defaults should be returned
2) If a user alters or sets a value, that is considered user data
and should be saved and treated as such.
3) Only one 'mode' of visual (vertex or face) is allowed at a time
and setting or altering a value should automatically change the mode.
"""
import numpy as np
import copy
import colorsys
from .. import util
from .. import caching
from .. import grouping
class ColorVisuals(object):
"""
Store color information about a mesh.
"""
def __init__(self,
mesh=None,
face_colors=None,
vertex_colors=None):
"""
Store color information about a mesh.
Parameters
----------
mesh : Trimesh
Object that these visual properties
are associated with
face_ colors : (n,3|4) or (3,) or (4,) uint8
Colors per-face
vertex_colors : (n,3|4) or (3,) or (4,) uint8
Colors per-vertex
"""
self.mesh = mesh
self._data = caching.DataStore()
self._cache = caching.Cache(id_function=self.crc)
self.defaults = {
'material_diffuse': np.array([102, 102, 102, 255],
dtype=np.uint8),
'material_ambient': np.array([64, 64, 64, 255],
dtype=np.uint8),
'material_specular': np.array([197, 197, 197, 255],
dtype=np.uint8),
'material_shine': 77.0}
if face_colors is not None:
self.face_colors = face_colors
if vertex_colors is not None:
self.vertex_colors = vertex_colors
@caching.cache_decorator
def transparency(self):
"""
Does the current object contain any transparency.
Returns
----------
transparency: bool, does the current visual contain transparency
"""
if 'vertex_colors' in self._data:
a_min = self._data['vertex_colors'][:, 3].min()
elif 'face_colors' in self._data:
a_min = self._data['face_colors'][:, 3].min()
else:
return False
return bool(a_min < 255)
@property
def defined(self):
"""
Are any colors defined for the current mesh.
Returns
---------
defined: bool, are colors defined or not.
"""
return self.kind is not None
@property
def kind(self):
"""
What color mode has been set.
Returns
----------
mode: 'face', 'vertex', or None
"""
self._verify_crc()
if 'vertex_colors' in self._data:
mode = 'vertex'
elif 'face_colors' in self._data:
mode = 'face'
else:
mode = None
return mode
def crc(self):
"""
A checksum for the current visual object and its parent mesh.
Returns
----------
crc: int, checksum of data in visual object and its parent mesh
"""
# will make sure everything has been transferred
# to datastore that needs to be before returning crc
result = self._data.fast_hash()
if hasattr(self.mesh, 'crc'):
# bitwise xor combines hashes better than a sum
result ^= self.mesh.crc()
return result
def copy(self):
"""
Return a copy of the current ColorVisuals object.
Returns
----------
copied : ColorVisuals
Contains the same information as self
"""
copied = ColorVisuals()
copied._data.data = copy.deepcopy(self._data.data)
return copied
@property
def face_colors(self):
"""
Colors defined for each face of a mesh.
If no colors are defined, defaults are returned.
Returns
----------
colors: (len(mesh.faces), 4) uint8, RGBA color for each face
"""
return self._get_colors(name='face')
@face_colors.setter
def face_colors(self, values):
"""
Set the colors for each face of a mesh.
This will apply these colors and delete any previously specified
color information.
Parameters
------------
colors: (len(mesh.faces), 3), set each face to the specified color
(len(mesh.faces), 4), set each face to the specified color
(3,) int, set the whole mesh this color
(4,) int, set the whole mesh this color
"""
if values is None:
if 'face_colors' in self._data:
self._data.data.pop('face_colors')
return
colors = to_rgba(values)
if (self.mesh is not None and
colors.shape == (4,)):
count = len(self.mesh.faces)
colors = np.tile(colors, (count, 1))
# if we set any color information, clear the others
self._data.clear()
self._data['face_colors'] = colors
self._cache.verify()
@property
def vertex_colors(self):
"""
Return the colors for each vertex of a mesh
Returns
------------
colors: (len(mesh.vertices), 4) uint8, color for each vertex
"""
return self._get_colors(name='vertex')
@vertex_colors.setter
def vertex_colors(self, values):
"""
Set the colors for each vertex of a mesh
This will apply these colors and delete any previously specified
color information.
Parameters
------------
colors: (len(mesh.vertices), 3), set each face to the color
(len(mesh.vertices), 4), set each face to the color
(3,) int, set the whole mesh this color
(4,) int, set the whole mesh this color
"""
if values is None:
if 'vertex_colors' in self._data:
self._data.data.pop('vertex_colors')
return
# make sure passed values are numpy array
values = np.asanyarray(values)
# Ensure the color shape is sane
if (self.mesh is not None and not
(values.shape == (len(self.mesh.vertices), 3) or
values.shape == (len(self.mesh.vertices), 4) or
values.shape == (3,) or
values.shape == (4,))):
return
colors = to_rgba(values)
if (self.mesh is not None and
colors.shape == (4,)):
count = len(self.mesh.vertices)
colors = np.tile(colors, (count, 1))
# if we set any color information, clear the others
self._data.clear()
self._data['vertex_colors'] = colors
self._cache.verify()
def _get_colors(self,
name):
"""
A magical function which maintains the sanity of vertex and face colors.
* If colors have been explicitly stored or changed, they are considered
user data, stored in self._data (DataStore), and are returned immediately
when requested.
* If colors have never been set, a (count,4) tiled copy of the default diffuse
color will be stored in the cache
** the CRC on creation for these cached default colors will also be stored
** if the cached color array is altered (different CRC than when it was
created) we consider that now to be user data and the array is moved from
the cache to the DataStore.
Parameters
-----------
name: str, 'face', or 'vertex'
Returns
-----------
colors: (count, 4) uint8, RGBA colors
"""
count = None
try:
if name == 'face':
count = len(self.mesh.faces)
elif name == 'vertex':
count = len(self.mesh.vertices)
except BaseException:
pass
# the face or vertex colors
key_colors = str(name) + '_colors'
# the initial crc of the
key_crc = key_colors + '_crc'
if key_colors in self._data:
# if a user has explicitly stored or changed the color it
# will be in data
return self._data[key_colors]
elif key_colors in self._cache:
# if the colors have been autogenerated already they
# will be in the cache
colors = self._cache[key_colors]
# if the cached colors have been changed since creation we move
# them to data
if colors.crc() != self._cache[key_crc]:
# call the setter on the property using exec
# this avoids having to pass a setter to this function
if name == 'face':
self.face_colors = colors
elif name == 'vertex':
self.vertex_colors = colors
else:
raise ValueError('unsupported name!!!')
self._cache.verify()
else:
# colors have never been accessed
if self.kind is None:
# no colors are defined, so create a (count, 4) tiled
# copy of the default color
colors = np.tile(self.defaults['material_diffuse'],
(count, 1))
elif (self.kind == 'vertex' and
name == 'face'):
colors = vertex_to_face_color(
vertex_colors=self.vertex_colors,
faces=self.mesh.faces)
elif (self.kind == 'face' and
name == 'vertex'):
colors = face_to_vertex_color(
mesh=self.mesh,
face_colors=self.face_colors)
else:
raise ValueError('self.kind not accepted values!!')
if (count is not None and
colors.shape != (count, 4)):
raise ValueError('face colors incorrect shape!')
# subclass the array to track for changes using a CRC
colors = caching.tracked_array(colors)
# put the generated colors and their initial checksum into cache
self._cache[key_colors] = colors
self._cache[key_crc] = colors.crc()
return colors
def _verify_crc(self):
"""
Verify the checksums of cached face and vertex color, to verify
that a user hasn't altered them since they were generated from
defaults.
If the colors have been altered since creation, move them into
the DataStore at self._data since the user action has made them
user data.
"""
if not hasattr(self, '_cache') or len(self._cache) == 0:
return
for name in ['face', 'vertex']:
# the face or vertex colors
key_colors = str(name) + '_colors'
# the initial crc of the
key_crc = key_colors + '_crc'
if key_colors not in self._cache:
continue
colors = self._cache[key_colors]
# if the cached colors have been changed since creation
# move them to data
if colors.crc() != self._cache[key_crc]:
if name == 'face':
self.face_colors = colors
elif name == 'vertex':
self.vertex_colors = colors
else:
raise ValueError('unsupported name!!!')
self._cache.verify()
def update_vertices(self, mask):
"""
Apply a mask to remove or duplicate vertex properties.
"""
self._update_key(mask, 'vertex_colors')
def update_faces(self, mask):
"""
Apply a mask to remove or duplicate face properties
"""
self._update_key(mask, 'face_colors')
def face_subset(self, face_index):
"""
Given a mask of face indices, return a sliced version.
Parameters
----------
face_index: (n,) int, mask for faces
(n,) bool, mask for faces
Returns
----------
visual: ColorVisuals object containing a subset of faces.
"""
if self.defined:
result = ColorVisuals(
face_colors=self.face_colors[face_index])
else:
result = ColorVisuals()
return result
@property
def main_color(self):
"""
What is the most commonly occurring color.
Returns
------------
color: (4,) uint8, most common color
"""
if self.kind is None:
return DEFAULT_COLOR
elif self.kind == 'face':
colors = self.face_colors
elif self.kind == 'vertex':
colors = self.vertex_colors
else:
raise ValueError('color kind incorrect!')
# find the unique colors
unique, inverse = grouping.unique_rows(colors)
# the most commonly occurring color, or mode
# this will be an index of inverse, not colors
mode_index = np.bincount(inverse).argmax()
color = colors[unique[mode_index]]
return color
def concatenate(self, other, *args):
"""
Concatenate two or more ColorVisuals objects into a single object.
Parameters
-----------
other : ColorVisuals
Object to append
*args: ColorVisuals objects
Returns
-----------
result: ColorVisuals object containing information from current
object and others in the order it was passed.
"""
# avoid a circular import
from . import objects
result = objects.concatenate(self, other, *args)
return result
def __add__(self, other):
"""
Concatenate two ColorVisuals objects into a single object.
Parameters
-----------
other: ColorVisuals object
Returns
-----------
result: ColorVisuals object containing information from current
object and other in the order (self, other)
"""
return self.concatenate(other)
def _update_key(self, mask, key):
"""
Mask the value contained in the DataStore at a specified key.
Parameters
-----------
mask: (n,) int
(n,) bool
key: hashable object, in self._data
"""
mask = np.asanyarray(mask)
if key in self._data:
self._data[key] = self._data[key][mask]
def to_rgba(colors, dtype=np.uint8):
"""
Convert a single or multiple RGB colors to RGBA colors.
Parameters
----------
colors : (n, 3) or (n, 4) array
RGB or RGBA colors
Returns
----------
colors : (n, 4) list of RGBA colors
(4,) single RGBA color
"""
if colors is None or not util.is_sequence(colors):
return DEFAULT_COLOR
# colors as numpy array
colors = np.asanyarray(colors)
# integer value for opaque alpha given our datatype
opaque = np.iinfo(dtype).max
if (colors.dtype.kind == 'f' and colors.max() < (1.0 + 1e-8)):
colors = (colors * opaque).round().astype(dtype)
elif (colors.max() <= opaque):
colors = colors.astype(dtype)
else:
raise ValueError('colors non- convertible!')
if util.is_shape(colors, (-1, 3)):
# add an opaque alpha for RGB colors
colors = np.column_stack((
colors,
opaque * np.ones(len(colors)))).astype(dtype)
elif util.is_shape(colors, (3,)):
# if passed a single RGB color add an alpha
colors = np.append(colors, opaque)
if not (util.is_shape(colors, (4,)) or
util.is_shape(colors, (-1, 4))):
raise ValueError('Colors not of appropriate shape!')
return colors
def to_float(colors):
"""
Convert integer colors to 0.0 - 1.0 floating point colors
Parameters
-------------
colors : (n, d) int
Integer colors
Returns
-------------
as_float : (n, d) float
Float colors 0.0 - 1.0
"""
# colors as numpy array
colors = np.asanyarray(colors)
if colors.dtype.kind == 'f':
return colors
elif colors.dtype.kind in 'iu':
# integer value for opaque alpha given our datatype
opaque = np.iinfo(colors.dtype).max
return colors.astype(np.float64) / opaque
else:
raise ValueError('only works on int or float colors!')
def hex_to_rgba(color):
"""
Turn a string hex color to a (4,) RGBA color.
Parameters
-----------
color: str, hex color
Returns
-----------
rgba: (4,) np.uint8, RGBA color
"""
value = str(color).lstrip('#').strip()
if len(value) == 6:
rgb = [int(value[i:i + 2], 16) for i in (0, 2, 4)]
rgba = np.append(rgb, 255).astype(np.uint8)
else:
raise ValueError('Only RGB supported')
return rgba
def random_color(dtype=np.uint8):
"""
Return a random RGB color using datatype specified.
Parameters
----------
dtype: numpy dtype of result
Returns
----------
color: (4,) dtype, random color that looks OK
"""
hue = np.random.random() + .61803
hue %= 1.0
color = np.array(colorsys.hsv_to_rgb(hue, .99, .99))
if np.dtype(dtype).kind in 'iu':
max_value = (2**(np.dtype(dtype).itemsize * 8)) - 1
color *= max_value
color = np.append(color, max_value).astype(dtype)
return color
def vertex_to_face_color(vertex_colors, faces):
"""
Convert a list of vertex colors to face colors.
Parameters
----------
vertex_colors: (n,(3,4)), colors
faces: (m,3) int, face indexes
Returns
-----------
face_colors: (m,4) colors
"""
vertex_colors = to_rgba(vertex_colors)
face_colors = vertex_colors[faces].mean(axis=1)
return face_colors.astype(np.uint8)
def face_to_vertex_color(
mesh,
face_colors,
dtype=np.uint8):
"""
Convert face colors into vertex colors.
Parameters
-----------
mesh : trimesh.Trimesh object
face_colors: (n, (3,4)) int, face colors
dtype: data type of output
Returns
-----------
vertex_colors: (m,4) dtype, colors for each vertex
"""
rgba = to_rgba(face_colors)
vertex = mesh.faces_sparse.dot(rgba.astype(np.float64))
vertex = (vertex / mesh.vertex_degree.reshape(
(-1, 1))).astype(dtype)
assert vertex.shape == (len(mesh.vertices), 4)
return vertex
def colors_to_materials(colors, count=None):
"""
Convert a list of colors into a list of unique materials
and material indexes.
Parameters
-----------
colors : (n, 3) or (n, 4) float
RGB or RGBA colors
count : int
Number of entities to apply color to
Returns
-----------
diffuse : (m, 4) int
Colors
index : (count,) int
Index of each color
"""
# convert RGB to RGBA
rgba = to_rgba(colors)
# if we were only passed a single color
if util.is_shape(rgba, (4,)) and count is not None:
diffuse = rgba.reshape((-1, 4))
index = np.zeros(count, dtype=np.int)
elif util.is_shape(rgba, (-1, 4)):
# we were passed multiple colors
# find the unique colors in the list to save as materials
unique, index = grouping.unique_rows(rgba)
diffuse = rgba[unique]
else:
raise ValueError('Colors not convertible!')
return diffuse, index
def linear_color_map(values, color_range=None):
"""
Linearly interpolate between two colors.
If colors are not specified the function will
interpolate between 0.0 values as red and 1.0 as green.
Parameters
--------------
values : (n, ) float
Values to interpolate
color_range : None, or (2, 4) uint8
What colors should extrema be set to
Returns
---------------
colors : (n, 4) uint8
RGBA colors for interpolated values
"""
if color_range is None:
color_range = np.array([[255, 0, 0, 255],
[0, 255, 0, 255]],
dtype=np.uint8)
else:
color_range = np.asanyarray(color_range,
dtype=np.uint8)
if color_range.shape != (2, 4):
raise ValueError('color_range must be RGBA (2, 4)')
# float 1D array clamped to 0.0 - 1.0
values = np.clip(np.asanyarray(
values, dtype=np.float64).ravel(),
0.0, 1.0).reshape((-1, 1))
# the stacked component colors
color = [np.ones((len(values), 4)) * c
for c in color_range.astype(np.float64)]
# interpolated colors
colors = (color[1] * values) + (color[0] * (1.0 - values))
# rounded and set to correct data type
colors = np.round(colors).astype(np.uint8)
return colors
def interpolate(values, color_map=None, dtype=np.uint8):
"""
Given a 1D list of values, return interpolated colors
for the range.
Parameters
---------------
values : (n, ) float
Values to be interpolated over
color_map : None, or str
Key to a colormap contained in:
matplotlib.pyplot.colormaps()
e.g: 'viridis'
Returns
-------------
interpolated : (n, 4) dtype
Interpolated RGBA colors
"""
# get a color interpolation function
if color_map is None:
cmap = linear_color_map
else:
from matplotlib.pyplot import get_cmap
cmap = get_cmap(color_map)
# make input always float
values = np.asanyarray(values, dtype=np.float64).ravel()
# scale values to 0.0 - 1.0 and get colors
colors = cmap((values - values.min()) / values.ptp())
# convert to 0-255 RGBA
rgba = to_rgba(colors, dtype=dtype)
return rgba
def uv_to_color(uv, image):
"""
Get the color in a texture image.
Parameters
-------------
uv : (n, 2) float
UV coordinates on texture image
image : PIL.Image
Texture image
Returns
----------
colors : (n, 4) float
RGBA color at each of the UV coordinates
"""
if image is None or uv is None:
return None
# UV coordinates should be (n, 2) float
uv = np.asanyarray(uv, dtype=np.float64)
# get texture image pixel positions of UV coordinates
x = (uv[:, 0] * (image.width - 1))
y = ((1 - uv[:, 1]) * (image.height - 1))
# convert to int and wrap to image
# size in the manner of GL_REPEAT
x = x.round().astype(np.int64) % image.width
y = y.round().astype(np.int64) % image.height
# access colors from pixel locations
# make sure image is RGBA before getting values
colors = np.asanyarray(image.convert('RGBA'))[y, x]
# conversion to RGBA should have corrected shape
assert colors.ndim == 2 and colors.shape[1] == 4
return colors
DEFAULT_COLOR =
|
np.array([102, 102, 102, 255], dtype=np.uint8)
|
numpy.array
|
"""Calculate halo concentration from mass and redshift.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy import integrate
from astropy.cosmology import Planck13 as cosmo
# default cosmological parameters
h = cosmo.h
Om_M = cosmo.Om0
Om_L = 1. - Om_M
def _check_inputs(z, m):
"""Check inputs are arrays of same length or array and a scalar."""
try:
nz = len(z)
z = np.array(z)
except TypeError:
z = np.array([z])
nz = len(z)
try:
nm = len(m)
m =
|
np.array(m)
|
numpy.array
|
# -*- coding: utf-8 -*-
import numbers
import operator
import typing
import unittest
import numpy as np
import torch
from torch import nn
from torch.nn.utils import rnn
__author__ = "<NAME>"
__copyright__ = (
"Copyright (c) 2018 <NAME>\n"
"\n"
"Permission is hereby granted, free of charge, to any person obtaining a copy\n"
"of this software and associated documentation files (the \"Software\"), to deal\n"
"in the Software without restriction, including without limitation the rights\n"
"to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n"
"copies of the Software, and to permit persons to whom the Software is\n"
"furnished to do so, subject to the following conditions:\n"
"\n"
"The above copyright notice and this permission notice shall be included in all\n"
"copies or substantial portions of the Software.\n"
"\n"
"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n"
"IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n"
"FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n"
"AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n"
"LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n"
"OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n"
"SOFTWARE."
)
__license__ = "MIT License"
__version__ = "2018.1"
__date__ = "Aug 18, 2018"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class TorchTestCase(unittest.TestCase):
"""This class extends ``unittest.TestCase`` such that some of the available assertions support instances of various
PyTorch classes.
``TorchTestCase`` provides the following PyTorch-specific functionality:
* ``assertEqual`` supports all kinds of PyTorch tensors as well as instances of ``torch.nn.Parameter`` and
``torch.nn.utils.rnn.PackedSequence``.
* ``assertGreater``, ``assertGreaterEqual``, ``assertLess``, and ``assertLessEqual`` support all kinds of PyTorch
tensors except ``CharTensor``s as well as instances of ``torch.nn.Parameter``.
Furthermore, these assertions allow for comparing tensors to numbers. Notice, however, that neither of the
mentioned assertions performs any kind of type check in the sense that it is possible to compare a
``FloatTensor`` with a ``Parameter``, for example.
"""
ORDER_ASSERTION_TYPES = [
torch.ByteTensor,
torch.cuda.ByteTensor,
torch.ShortTensor,
torch.cuda.ShortTensor,
torch.IntTensor,
torch.cuda.IntTensor,
torch.LongTensor,
torch.cuda.LongTensor,
torch.HalfTensor,
torch.cuda.HalfTensor,
torch.FloatTensor,
torch.cuda.FloatTensor,
torch.DoubleTensor,
torch.cuda.DoubleTensor,
torch.Tensor # this is an alias for the default tensor type torch.FloatTensor
]
"""list[type]: A list of all types of PyTorch tensors that are supported by order assertions, like lower-than."""
TENSOR_TYPES = [
torch.ByteTensor,
torch.cuda.ByteTensor,
torch.CharTensor,
torch.cuda.CharTensor,
torch.ShortTensor,
torch.cuda.ShortTensor,
torch.IntTensor,
torch.cuda.IntTensor,
torch.LongTensor,
torch.cuda.LongTensor,
torch.HalfTensor,
torch.cuda.HalfTensor,
torch.FloatTensor,
torch.cuda.FloatTensor,
torch.DoubleTensor,
torch.cuda.DoubleTensor,
torch.Tensor # this is an alias for the default tensor type torch.FloatTensor
]
"""list[type]: A list of all different types of PyTorch tensors."""
# CONSTRUCTOR ####################################################################################################
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._eps = 0.0 # the element-wise absolute tolerance that is enforced in equality assertions
# add equality functions for tensors
for t in self.TENSOR_TYPES:
self.addTypeEqualityFunc(t, self.assert_tensor_equal)
# add equality function for parameters
self.addTypeEqualityFunc(nn.Parameter, self.assert_parameter_equal)
# add equality function for packed sequences
self.addTypeEqualityFunc(torch.nn.utils.rnn.PackedSequence, self.assert_packed_sequence_equal)
# PROPERTIES #####################################################################################################
@property
def eps(self) -> float:
"""float: The element-wise absolute tolerance that is enforced in equality assertions.
The tolerance value for equality assertions between two tensors is interpreted as the maximum element-wise
absolute difference that the compared tensors may exhibit. Notice that a specified tolerance is enforced for
comparisons of **two tensors** only, and only for **equality assertions**.
"""
return self._eps
@eps.setter
def eps(self, eps: numbers.Real) -> None:
# sanitize the provided arg
if not isinstance(eps, numbers.Real):
raise TypeError("<eps> has to be a real number, but is of type {}!".format(type(eps)))
eps = float(eps)
if eps < 0:
raise ValueError("<eps> has to be non-negative, but was specified as {}!".format(eps))
# update eps value
self._eps = eps
# METHODS ########################################################################################################
def _fail_with_message(self, msg: typing.Union[str, None], standard_msg: str) -> None:
"""A convenience method that first formats the message to display with ``_formatMessage``, and then invokes
``fail``.
Args:
msg (str or None): The explicit user-defined message.
standard_msg (str): The standard message created by some assertion method.
"""
self.fail(self._formatMessage(msg, standard_msg))
@classmethod
def _prepare_tensor_order_comparison(cls, first, second) -> typing.Tuple[typing.Any, typing.Any]:
"""This method prepares tensors for subsequent order comparisons.
The preparation includes the following steps:
1. check that both args are either a tensor or a number,
2. check that at least one of them is a tensor,
3. if both args are tensors, then check whether they have the same shape, and
4. turn any provided number into an according tensor of appropriate shape.
Notice that order comparison support all kinds of PyTorch tensors except ``CharTensor``s, which is why this
method raises a ``TypeError`` if a ``CharTensor`` is provided.
Args:
first: The first tensor or number to prepare.
second: The second tensor or number to prepare.
Returns:
tuple: The method simply returns the prepared args in the same order that they were provided.
Raises:
TypeError: If any of ``first`` or ``second`` is neither a supported kind of tensor nor a number, or if
both args are numbers.
ValueErrors: If ``first`` and ``second`` are tensors of different shape.
"""
# ensure that both args are either a tensor or a number
if type(first) not in cls.ORDER_ASSERTION_TYPES and not isinstance(first, numbers.Real):
raise TypeError("The first argument is neither a supported type of tensor nor a number!")
if type(second) not in cls.ORDER_ASSERTION_TYPES and not isinstance(second, numbers.Real):
raise TypeError("The second argument is neither a supported type of tensor nor a number!")
# if both args are tensor then check whether they have the same shape
if (
type(first) in cls.ORDER_ASSERTION_TYPES and
type(second) in cls.ORDER_ASSERTION_TYPES and
first.shape != second.shape
):
raise ValueError("The arguments must not be tensors of different shapes!")
# turn first argument into tensor if it is a number
if isinstance(first, numbers.Real):
first = float(first)
if isinstance(second, numbers.Real):
raise TypeError("At least one the arguments has to be a tensor!")
else:
first = torch.ones(second.shape) * first
# turn second argument into tensor if it is a number
if isinstance(second, numbers.Real):
second = float(second)
if isinstance(first, numbers.Real):
raise TypeError("At least one the arguments has to be a tensor!")
else:
second = torch.ones(first.shape) * second
return first, second
def _tensor_aware_assertion(
self,
tensor_assertion: typing.Callable,
default_assertion: typing.Callable,
first,
second,
msg: typing.Optional[str]
) -> None:
"""Invokes either a tensor-specific version of an assertion or the original implementation provided by
``unittest.TestCase``.
This method assumes that a function that implements some assertion has to be invoked as
some-assertion(first, second, msg=msg)
If either ``first`` or ``second`` is a PyTorch Tensor, then we invoke ``tensor_assertion``, and otherwise
we use ``default_assertion``.
Args:
tensor_assertion (callable): The tensor-specific implementation of an assertion.
default_assertion (callable): The default implementation of the same assertion.
first: The first arg to pass to the assertion method.
second: The second arg to pass to the assertion method.
msg (str): Passed to the assertion method as keyword arg ``msg``.
"""
# check whether any of the args is a tensor/variable/parameter
# if yes -> call tensor-specific assertion check
all_tensor_types = self.TENSOR_TYPES + [nn.Parameter]
if type(first) in all_tensor_types or type(second) in all_tensor_types:
# turn parameters into tensors
if isinstance(first, nn.Parameter):
first = first.data
if isinstance(second, nn.Parameter):
second = second.data
# invoke assertion check for tensors
tensor_assertion(first, second, msg=msg)
# call original method for checking the assertion
else:
default_assertion(first, second, msg=msg)
@staticmethod
def _tensor_comparison(
first,
second,
comp_op: typing.Callable
) -> typing.Optional[typing.List[typing.Tuple[int, ...]]]:
"""Compares two PyTorch tensors element-wisely by means of the provided comparison operator.
The provided tensors may be of any, possibly different types of PyTorch tensors except ``CharTensor``. They do
have to be of equal shape, though. Notice further that this method expects actual tensors as opposed to PyTorch
``Parameter``s.
Args:
first: The first PyTorch tensor to compare.
second: The second PyTorch tensor to compare.
comp_op: The comparison operator to use.
Returns:
``None``, if the comparison evaluates to ``True`` for all coordinates, and a list of positions, i.e., tuples
of ``int`` values, where it does not, otherwise.
"""
# turn both tensors into numpy arrays
first = first.cpu().numpy()
second = second.cpu().numpy()
# compare both args
comp = comp_op(first, second)
# if comparison yields true for each entry -> nothing else to do
if comp.all():
return None
# retrieve all coordinates where the comparison evaluated to False
index_lists = [list(l) for l in np.where(
|
np.invert(comp)
|
numpy.invert
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import PIL.Image as Image
import random
import numpy as np
def get_dir_list(path, _except=None):
return [os.path.join(path, x) for x in os.listdir(path) if os.path.isdir(os.path.join(path, x)) and x != _except]
def get_file_list(path, _except=None, sort=False):
if sort:
return sorted(
[os.path.join(path, x) for x in os.listdir(path) if os.path.isfile(os.path.join(path, x)) and x != _except])
else:
return [os.path.join(path, x) for x in os.listdir(path) if
os.path.isfile(os.path.join(path, x)) and x != _except]
class dataReader():
def __init__(self, filename, batch_size=16, num_frames_per_clip=16):
self.start_pos = 0
self.batch_size = batch_size
self.num_frames_per_clip = num_frames_per_clip
self.clips = []
self.labels = []
with open(filename, 'r') as fr:
for line in fr:
line = line.strip().split('\t')
clip = line[0]
label = int(line[1])
if (len(get_file_list(clip)) < num_frames_per_clip): continue
self.clips.append(clip)
self.labels.append(label)
self.size = len(self.clips)
self.idx = range(self.size)
random.shuffle(self.idx)
self.finished = False
def get_frames_data(self, clip):
ret = []
candidates = get_file_list(clip, sort=True)
length = len(candidates)
s_index = random.randint(0, length - self.num_frames_per_clip)
for idx in range(s_index, s_index + self.num_frames_per_clip):
img = Image.open(candidates[idx])
ret.append(np.array(img))
return ret
def get_next_batch(self):
data = []
labels = []
for i in range(self.batch_size):
if(self.start_pos>=self.size):
self.start_pos = 0
random.shuffle(self.idx)
self.finished = True
clip = self.clips[self.idx[self.start_pos ]]
label = self.labels[self.idx[self.start_pos ]]
data.append(self.get_frames_data(clip))
labels.append(label)
self.start_pos+=1
data =
|
np.array(data)
|
numpy.array
|
"""
Plots:
plot_MAP_rv
plot_quicklooklc
plot_fitted_zoom
plot_raw_zoom
plot_fitindiv
plot_phasefold
plot_scene
plot_hr
plot_lithium
plot_rotation
plot_fpscenarios
plot_grounddepth
plot_full_kinematics
(plot_positions)
(plot_velocities)
groundphot:
plot_groundscene
shift_img_plot
plot_pixel_lc
vis_photutils_lcs
stackviz_blend_check
convenience:
hist2d
plot_contour_2d_samples
"""
import os, corner, pickle
from datetime import datetime
from glob import glob
import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm
from numpy import array as nparr
from scipy.interpolate import interp1d
from itertools import product
from collections import deque
from aesthetic.plot import savefig, format_ax
from aesthetic.plot import set_style
import billy.plotting as bp
from timmy.paths import DATADIR, RESULTSDIR
from timmy.convenience import (
get_tessphot, get_clean_tessphot, detrend_tessphot, get_model_transit,
get_model_transit_quad, _get_fitted_data_dict,
_get_fitted_data_dict_alltransit, _get_fitted_data_dict_allindivtransit
)
from astrobase.lcmath import (
phase_magseries, phase_magseries_with_errs, phase_bin_magseries,
phase_bin_magseries_with_errs, sigclip_magseries, find_lc_timegroups,
phase_magseries_with_errs, time_bin_magseries
)
from astrobase import periodbase
from astrobase.plotbase import skyview_stamp
from astropy.stats import LombScargle
from astropy import units as u, constants as const
from astropy.io import fits
from astropy.time import Time
from astropy.wcs import WCS
from astroquery.mast import Catalogs
import astropy.visualization as vis
import matplotlib as mpl
from matplotlib import patches
from scipy.ndimage import gaussian_filter
import logging
from matplotlib.ticker import MaxNLocator, NullLocator
from matplotlib.colors import LinearSegmentedColormap, colorConverter
from matplotlib.ticker import ScalarFormatter
import matplotlib.ticker as ticker
import matplotlib.transforms as transforms
import matplotlib.patheffects as path_effects
##################################################
# wrappers to generic plots implemented in billy #
##################################################
def plot_test_data(x_obs, y_obs, y_mod, modelid, outdir):
bp.plot_test_data(x_obs, y_obs, y_mod, modelid, outdir)
def plot_MAP_data(x_obs, y_obs, y_MAP, outpath):
bp.plot_MAP_data(x_obs, y_obs, y_MAP, outpath, ms=1)
def plot_sampleplot(m, outpath, N_samples=100):
bp.plot_sampleplot(m, outpath, N_samples=N_samples, ms=1, malpha=0.1)
def plot_traceplot(m, outpath):
bp.plot_traceplot(m, outpath)
def plot_cornerplot(true_d, m, outpath):
bp.plot_cornerplot(true_d, m, outpath)
def plot_MAP_rv(x_obs, y_obs, y_MAP, y_err, telcolors, x_pred, y_pred,
map_estimate, outpath):
#
# rv vs time
#
plt.close('all')
plt.figure(figsize=(14, 4))
plt.plot(x_pred, y_pred, "k", lw=0.5)
plt.errorbar(x_obs, y_MAP, yerr=y_err, fmt=",k")
plt.scatter(x_obs, y_MAP, c=telcolors, s=8, zorder=100)
plt.xlim(x_pred.min(), x_pred.max())
plt.xlabel("BJD")
plt.ylabel("radial velocity [m/s]")
_ = plt.title("MAP model")
fig = plt.gcf()
savefig(fig, outpath, writepdf=0, dpi=300)
outpath = outpath.replace('.png', '_phasefold.png')
#
# rv vs phase
#
plt.close('all')
obs_d = phase_magseries_with_errs(
x_obs, y_MAP, y_err, map_estimate['period'], map_estimate['t0'],
wrap=False, sort=False
)
pred_d = phase_magseries(
x_pred, y_pred, map_estimate['period'], map_estimate['t0'],
wrap=False, sort=True
)
plt.plot(
pred_d['phase'], pred_d['mags'], "k", lw=0.5
)
plt.errorbar(
obs_d['phase'], obs_d['mags'], yerr=obs_d['errs'], fmt=",k"
)
plt.scatter(
obs_d['phase'], obs_d['mags'], c=telcolors, s=8, zorder=100
)
plt.xlabel("phase")
plt.ylabel("radial velocity [m/s]")
_ = plt.title("MAP model. P={:.5f}, t0={:.5f}".
format(map_estimate['period'], map_estimate['t0']),
fontsize='small')
fig = plt.gcf()
savefig(fig, outpath, writepdf=0, dpi=300)
###############
# convenience #
###############
def hist2d(x, y, bins=20, range=None, weights=None, levels=None, smooth=None,
ax=None, color=None, quiet=False,
plot_datapoints=False, plot_density=False,
plot_contours=True, no_fill_contours=False, fill_contours=True,
contour_kwargs=None, contourf_kwargs=None, data_kwargs=None,
pcolor_kwargs=None, **kwargs):
"""
Plot a 2-D histogram of samples.
(Function stolen from corner.py -- Foreman-Mackey, (2016), corner.py:
Scatterplot matrices in Python, Journal of Open Source Software, 1(2), 24,
doi:10.21105/joss.00024.)
Parameters
----------
x : array_like[nsamples,]
The samples.
y : array_like[nsamples,]
The samples.
quiet : bool
If true, suppress warnings for small datasets.
levels : array_like
The contour levels to draw.
ax : matplotlib.Axes
A axes instance on which to add the 2-D histogram.
plot_datapoints : bool
Draw the individual data points.
plot_density : bool
Draw the density colormap.
plot_contours : bool
Draw the contours.
no_fill_contours : bool
Add no filling at all to the contours (unlike setting
``fill_contours=False``, which still adds a white fill at the densest
points).
fill_contours : bool
Fill the contours.
contour_kwargs : dict
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict
Any additional keyword arguments to pass to the `contourf` method.
data_kwargs : dict
Any additional keyword arguments to pass to the `plot` method when
adding the individual data points.
pcolor_kwargs : dict
Any additional keyword arguments to pass to the `pcolor` method when
adding the density colormap.
"""
if ax is None:
ax = plt.gca()
# Set the default range based on the data range if not provided.
if range is None:
if "extent" in kwargs:
logging.warn("Deprecated keyword argument 'extent'. "
"Use 'range' instead.")
range = kwargs["extent"]
else:
range = [[x.min(), x.max()], [y.min(), y.max()]]
# Set up the default plotting arguments.
if color is None:
color = "k"
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(1.0, 3.1, 1.0) ** 2)
# levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# This is the color map for the density plot, over-plotted to indicate the
# density of the points near the center.
density_cmap = LinearSegmentedColormap.from_list(
"density_cmap", [color, (1, 1, 1, 0)])
# This color map is used to hide the points at the high density areas.
white_cmap = LinearSegmentedColormap.from_list(
"white_cmap", [(1, 1, 1), (1, 1, 1)], N=2)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
# rgba_color = colorConverter.to_rgba(color)
rgba_color = [0.0, 0.0, 0.0, 0.7]
contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
for i, l in enumerate(levels):
contour_cmap[i][-1] *= float(i) / (len(levels)+1)
# We'll make the 2D histogram to directly estimate the density.
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins,
range=list(map(np.sort, range)),
weights=weights)
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"'range' argument.")
if smooth is not None:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
H = gaussian_filter(H, smooth)
if plot_contours or plot_density:
# Compute the density levels.
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
V.sort()
m = np.diff(V) == 0
if np.any(m) and not quiet:
logging.warning("Too few points to create valid contours")
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = np.diff(V) == 0
V.sort()
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate([
X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
])
Y2 = np.concatenate([
Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
])
if plot_datapoints:
if data_kwargs is None:
data_kwargs = dict()
data_kwargs["color"] = data_kwargs.get("color", color)
data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
data_kwargs["mec"] = data_kwargs.get("mec", "none")
data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)
# Plot the base fill to hide the densest data points.
if (plot_contours or plot_density) and not no_fill_contours:
ax.contourf(X2, Y2, H2.T, [V.min(), H.max()],
cmap=white_cmap, antialiased=False)
if plot_contours and fill_contours:
if contourf_kwargs is None:
contourf_kwargs = dict()
contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased",
False)
ax.contourf(X2, Y2, H2.T, np.concatenate([[0], V, [H.max()*(1+1e-4)]]),
**contourf_kwargs)
# Plot the density map. This can't be plotted at the same time as the
# contour fills.
elif plot_density:
if pcolor_kwargs is None:
pcolor_kwargs = dict()
ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap, **pcolor_kwargs)
# Plot the contour edge colors.
if plot_contours:
cs = ax.contour(X2, Y2, H2.T, V, colors='k', linewidths=1, zorder=3)
if kwargs['return_3sigma']:
# index of 3 sigma line is, in this case, 0! and it's the vertices
# along that contour that we often care about
p = cs.collections[0].get_paths()[0]
v = p.vertices
x_3sig = v[:,0]
y_3sig = v[:,1]
ax.set_xlim(range[0])
ax.set_ylim(range[1])
if not kwargs['return_3sigma']:
return ax
else:
return ax, x_3sig, y_3sig
def plot_contour_2d_samples(xsample, ysample, xgrid, ygrid, outpath,
xlabel='logper', ylabel='logk',
return_3sigma=True, smooth=None):
fig, ax = plt.subplots(figsize=(4,3))
# smooth of 1.0 was ok
bins = (xgrid, ygrid)
ax, x_3sig, y_3sig = hist2d(
xsample, ysample, bins=bins, range=None,
weights=None, levels=None, smooth=smooth, ax=ax, color=None, quiet=False,
plot_datapoints=False, plot_density=False, plot_contours=True,
no_fill_contours=False, fill_contours=True, contour_kwargs=None,
contourf_kwargs=None, data_kwargs=None, pcolor_kwargs=None,
return_3sigma=return_3sigma
)
ax.plot(x_3sig, y_3sig, color='C0', zorder=5, lw=1)
ax.set_xscale('linear')
ax.set_yscale('linear')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.tight_layout(h_pad=0, w_pad=0)
format_ax(ax)
savefig(fig, outpath)
if return_3sigma:
return x_3sig, y_3sig
##################
# timmy-specific #
##################
def plot_splitsignal_map(m, outpath):
"""
y_obs + y_MAP + y_rot + y_orb
things at rotation frequency
things at orbital frequency
"""
plt.close('all')
# 8.5x11 is letter paper. x10 allows space for caption.
fig, axs = plt.subplots(nrows=4, figsize=(8.5, 10), sharex=True)
axs[0].set_ylabel('Raw flux', fontsize='x-large')
axs[0].plot(m.x_obs, m.y_obs, ".k", ms=4, label="data", zorder=2,
rasterized=True)
axs[0].plot(m.x_obs, m.map_estimate['mu_model'], lw=0.5, label='MAP',
color='C0', alpha=1, zorder=1)
y_tra = m.map_estimate['mu_transit']
y_gprot = m.map_estimate['mu_gprot']
axs[1].set_ylabel('Transit', fontsize='x-large')
axs[1].plot(m.x_obs, m.y_obs-y_gprot, ".k", ms=4, label="data-rot",
zorder=2, rasterized=True)
axs[1].plot(m.x_obs, m.map_estimate['mu_model']-y_gprot, lw=0.5,
label='model-rot', color='C0', alpha=1, zorder=1)
axs[2].set_ylabel('Rotation', fontsize='x-large')
axs[2].plot(m.x_obs, m.y_obs-y_tra, ".k", ms=4, label="data-transit",
zorder=2, rasterized=True)
axs[2].plot(m.x_obs, m.map_estimate['mu_model']-y_tra, lw=0.5,
label='model-transit', color='C0', alpha=1, zorder=1)
axs[3].set_ylabel('Residual', fontsize='x-large')
axs[3].plot(m.x_obs, m.y_obs-m.map_estimate['mu_model'], ".k", ms=4,
label="data", zorder=2, rasterized=True)
axs[3].plot(m.x_obs, m.map_estimate['mu_model']-m.map_estimate['mu_model'],
lw=0.5, label='model', color='C0', alpha=1, zorder=1)
axs[-1].set_xlabel("Time [days]", fontsize='x-large')
for a in axs:
format_ax(a)
# a.set_ylim((-.075, .075))
# if part == 'i':
# a.set_xlim((0, 9))
# else:
# a.set_xlim((10, 20.3))
# props = dict(boxstyle='square', facecolor='white', alpha=0.9, pad=0.15,
# linewidth=0)
# if part == 'i':
# axs[3].text(0.97, 0.03, 'Orbit 19', ha='right', va='bottom',
# transform=axs[3].transAxes, bbox=props, zorder=3,
# fontsize='x-large')
# else:
# axs[3].text(0.97, 0.03, 'Orbit 20', ha='right', va='bottom',
# transform=axs[3].transAxes, bbox=props, zorder=3,
# fontsize='x-large')
fig.tight_layout(h_pad=0., w_pad=0.)
if not os.path.exists(outpath) or m.OVERWRITE:
savefig(fig, outpath, writepdf=1, dpi=300)
ydict = {
'x_obs': m.x_obs,
'y_obs': m.y_obs,
'y_resid': m.y_obs-m.map_estimate['mu_model'],
'y_mod_tra': y_tra,
'y_mod_gprot': y_gprot,
'y_mod': m.map_estimate['mu_model'],
'y_err': m.y_err
}
return ydict
def plot_quicklooklc(outdir, yval='PDCSAP_FLUX', provenance='spoc',
overwrite=0):
outpath = os.path.join(
outdir, 'quicklooklc_{}_{}.png'.format(provenance, yval)
)
if os.path.exists(outpath) and not overwrite:
print('found {} and no overwrite'.format(outpath))
return
# NOTE: I checked that pre-quality cuts etc, there weren't extra transits
# sitting in the SPOC data.
time, flux, flux_err = get_clean_tessphot(provenance, yval, binsize=None,
maskflares=0)
from wotan import flatten
# flat_flux, trend_flux = flatten(time, flux, method='pspline',
# break_tolerance=0.4, return_trend=True)
flat_flux, trend_flux = flatten(time, flux, method='hspline',
window_length=0.3,
break_tolerance=0.4, return_trend=True)
# flat_flux, trend_flux = flatten(time, flux, method='biweight',
# window_length=0.3, edge_cutoff=0.5,
# break_tolerance=0.4, return_trend=True,
# cval=2.0)
_plot_quicklooklc(outpath, time, flux, flux_err, flat_flux, trend_flux,
showvlines=0, provenance=provenance)
def _plot_quicklooklc(outpath, time, flux, flux_err, flat_flux, trend_flux,
showvlines=0, figsize=(25,8), provenance=None, timepad=1,
titlestr=None, ylim=None):
t0 = 1574.2738
per = 8.32467
epochs = np.arange(-100,100,1)
tra_times = t0 + per*epochs
plt.close('all')
f,axs = plt.subplots(figsize=figsize, nrows=2, sharex=True)
xmin, xmax = np.nanmin(time)-timepad, np.nanmax(time)+timepad
s = 1.5 if provenance == 'spoc' else 2.5*10
axs[0].scatter(time, flux, c='k', zorder=3, s=s, rasterized=True,
linewidths=0)
axs[0].plot(time, trend_flux, c='C0', zorder=4, rasterized=True, lw=1)
axs[1].scatter(time, flat_flux, c='k', zorder=3, s=s, rasterized=True,
linewidths=0)
axs[1].plot(time, trend_flux-trend_flux, c='C0', zorder=4,
rasterized=True, lw=1)
ymin, ymax = np.nanmin(flux), np.nanmax(flux)
axs[0].set_ylim((ymin, ymax))
axs[1].set_ylim(( np.nanmean(flat_flux) - 6*np.nanstd(flat_flux),
np.nanmean(flat_flux) + 4*np.nanstd(flat_flux) ))
axs[0].set_ylabel('raw')
axs[1].set_ylabel('detrended')
axs[1].set_xlabel('BJDTDB')
if isinstance(titlestr, str):
axs[0].set_title(titlestr)
for ax in axs:
if showvlines and provenance == 'spoc':
ymin, ymax = ax.get_ylim()
ax.vlines(
tra_times, ymin, ymax, colors='C1', alpha=0.5,
linestyles='--', zorder=-2, linewidths=0.5
)
ax.set_ylim((ymin, ymax))
if 'Evans' in provenance:
period = 8.3248972
t0 = 2457000 + 1574.2738304
tdur = 1.91/24
if showvlines and provenance == 'Evans_2020-04-01':
tra_ix = 44
if showvlines and provenance == 'Evans_2020-04-26':
tra_ix = 47
if showvlines and provenance == 'Evans_2020-05-21':
tra_ix = 50
if 'Evans' in provenance:
ymin, ymax = ax.get_ylim()
ax.vlines(
[t0 + period*tra_ix - tdur/2, t0 + period*tra_ix + tdur/2],
ymin, ymax, colors='C1', alpha=0.5, linestyles='--', zorder=-2,
linewidths=0.5
)
ax.set_ylim((ymin, ymax))
ax.set_xlim((xmin, xmax))
format_ax(ax)
if isinstance(ylim, tuple):
axs[1].set_ylim(ylim)
f.tight_layout(h_pad=0., w_pad=0.)
savefig(f, outpath, writepdf=0, dpi=300)
def plot_raw_zoom(outdir, yval='PDCSAP_FLUX', provenance='spoc',
overwrite=0, detrend=0):
outpath = os.path.join(
outdir, 'raw_zoom_{}_{}.png'.format(provenance, yval)
)
if detrend:
outpath = os.path.join(
outdir, 'raw_zoom_{}_{}_detrended.png'.format(provenance, yval)
)
if os.path.exists(outpath) and not overwrite:
print('found {} and no overwrite'.format(outpath))
return
time, flux, flux_err = get_clean_tessphot(provenance, yval, binsize=None,
maskflares=0)
flat_flux, trend_flux = detrend_tessphot(time, flux, flux_err)
if detrend:
flux = flat_flux
t_offset = np.nanmin(time)
time -= t_offset
FLARETIMES = [
(4.60, 4.63),
(37.533, 37.62)
]
flaresel = np.zeros_like(time).astype(bool)
for ft in FLARETIMES:
flaresel |= ( (time > min(ft)) & (time < max(ft)) )
t0 = 1574.2738 - t_offset
per = 8.32467
epochs = np.arange(-100,100,1)
tra_times = t0 + per*epochs
plt.close('all')
##########################################
# figsize=(8.5, 10) full page... 10 leaves space.
fig = plt.figure(figsize=(8.5, 5))
ax0 = plt.subplot2grid(shape=(2,5), loc=(0,0), colspan=5)
ax1 = plt.subplot2grid((2,5), (1,0), colspan=1)
ax2 = plt.subplot2grid((2,5), (1,1), colspan=1)
ax3 = plt.subplot2grid((2,5), (1,2), colspan=1)
ax4 = plt.subplot2grid((2,5), (1,3), colspan=1)
ax5 = plt.subplot2grid((2,5), (1,4), colspan=1)
all_axs = [ax0,ax1,ax2,ax3,ax4,ax5]
tra_axs = [ax1,ax2,ax3,ax4,ax5]
tra_ixs = [0,2,3,4,5]
# main lightcurve
yval = (flux - np.nanmedian(flux))*1e3
ax0.scatter(time, yval, c='k', zorder=3, s=0.75, rasterized=True,
linewidths=0)
ax0.scatter(time[flaresel], yval[flaresel], c='r', zorder=3, s=1,
rasterized=True, linewidths=0)
ax0.set_ylim((-20, 20)) # omitting like 1 upper point from the big flare at time 38
ymin, ymax = ax0.get_ylim()
ax0.vlines(
tra_times, ymin, ymax, colors='C1', alpha=0.5,
linestyles='--', zorder=-2, linewidths=0.5
)
ax0.set_ylim((ymin, ymax))
ax0.set_xlim((np.nanmin(time)-1, np.nanmax(time)+1))
# zoom-in of raw transits
for ax, tra_ix in zip(tra_axs, tra_ixs):
mid_time = t0 + per*tra_ix
tdur = 2/24. # roughly, in units of days
n = 3.5
start_time = mid_time - n*tdur
end_time = mid_time + n*tdur
s = (time > start_time) & (time < end_time)
ax.scatter(time[s], (flux[s] - np.nanmedian(flux[s]))*1e3, c='k',
zorder=3, s=7, rasterized=False, linewidths=0)
_flaresel = np.zeros_like(time[s]).astype(bool)
for ft in FLARETIMES:
_flaresel |= ( (time[s] > min(ft)) & (time[s] < max(ft)) )
if np.any(_flaresel):
ax.scatter(time[s][_flaresel],
(flux[s] - np.nanmedian(flux[s]))[_flaresel]*1e3,
c='r', zorder=3, s=8, rasterized=False, linewidths=0)
ax.set_xlim((start_time, end_time))
ax.set_ylim((-8, 8))
ymin, ymax = ax.get_ylim()
ax.vlines(
mid_time, ymin, ymax, colors='C1', alpha=0.5,
linestyles='--', zorder=-2, linewidths=0.5
)
ax.set_ylim((ymin, ymax))
if tra_ix > 0:
# hide the ytick labels
labels = [item.get_text() for item in
ax.get_yticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_yticklabels(empty_string_labels)
for ax in all_axs:
format_ax(ax)
fig.text(0.5,-0.01, 'Time [days]', ha='center', fontsize='x-large')
fig.text(-0.01,0.5, 'Relative flux [ppt]', va='center',
rotation=90, fontsize='x-large')
fig.tight_layout(h_pad=0.2, w_pad=-0.5)
savefig(fig, outpath, writepdf=1, dpi=300)
def plot_phasefold(m, summdf, outpath, overwrite=0, show_samples=0,
modelid=None, inppt=0, showerror=1):
set_style()
if modelid is None:
d, params, paramd = _get_fitted_data_dict(m, summdf)
_d = d
elif 'alltransit' in modelid:
d = _get_fitted_data_dict_alltransit(m, summdf)
_d = d['tess']
elif modelid in ['allindivtransit', 'tessindivtransit']:
d = _get_fitted_data_dict_allindivtransit(
m, summdf, bestfitmeans='median'
)
if modelid == 'tessindivtransit':
_d = d['tess']
elif modelid == 'allindivtransit':
_d = d['all']
P_orb = summdf.loc['period', 'median']
t0_orb = summdf.loc['t0', 'median']
# phase and bin them.
binsize = 5e-4
if modelid == 'allindivtransit':
orb_d = phase_magseries_with_errs(
_d['x_obs'], _d['y_obs'], _d['y_err'], P_orb, t0_orb, wrap=True,
sort=True
)
orb_bd = phase_bin_magseries_with_errs(
orb_d['phase'], orb_d['mags'], orb_d['errs'], binsize=binsize,
minbinelems=3, weights=1/(orb_d['errs']**2)
)
elif modelid == 'tessindivtransit':
orb_d = phase_magseries(
_d['x_obs'], _d['y_obs'], P_orb, t0_orb, wrap=True, sort=True
)
orb_bd = phase_bin_magseries(
orb_d['phase'], orb_d['mags'], binsize=binsize, minbinelems=3
)
mod_d = phase_magseries(
_d['x_obs'], _d['y_mod'], P_orb, t0_orb, wrap=True, sort=True
)
resid_bd = phase_bin_magseries(
mod_d['phase'], orb_d['mags'] - mod_d['mags'], binsize=binsize,
minbinelems=3
)
# get the samples. shape: N_samples x N_time
if show_samples:
np.random.seed(42)
N_samples = 20
sample_df = pm.trace_to_dataframe(m.trace, var_names=params)
sample_params = sample_df.sample(n=N_samples, replace=False)
y_mod_samples = []
for ix, p in sample_params.iterrows():
print(ix)
paramd = dict(p)
y_mod_samples.append(get_model_transit(paramd, d['x_obs']))
y_mod_samples = np.vstack(y_mod_samples)
mod_ds = {}
for i in range(N_samples):
mod_ds[i] = phase_magseries(
d['x_obs'], y_mod_samples[i, :], P_orb, t0_orb, wrap=True,
sort=True
)
# make tha plot
plt.close('all')
fig, (a0, a1) = plt.subplots(nrows=2, ncols=1, sharex=True,
figsize=(4, 3), gridspec_kw=
{'height_ratios':[3, 2]})
if not inppt:
a0.scatter(orb_d['phase']*P_orb*24, orb_d['mags'], color='gray', s=2,
alpha=0.8, zorder=4, linewidths=0, rasterized=True)
a0.scatter(orb_bd['binnedphases']*P_orb*24, orb_bd['binnedmags'],
color='black', s=8, alpha=1, zorder=5, linewidths=0)
a0.plot(mod_d['phase']*P_orb*24, mod_d['mags'], color='darkgray',
alpha=0.8, rasterized=False, lw=1, zorder=1)
a1.scatter(orb_d['phase']*P_orb*24, orb_d['mags']-mod_d['mags'],
color='gray', s=2, alpha=0.8, zorder=4, linewidths=0,
rasterized=True)
a1.scatter(resid_bd['binnedphases']*P_orb*24, resid_bd['binnedmags'],
color='black', s=8, alpha=1, zorder=5, linewidths=0)
a1.plot(mod_d['phase']*P_orb*24, mod_d['mags']-mod_d['mags'],
color='darkgray', alpha=0.8, rasterized=False, lw=1, zorder=1)
else:
ydiff = 0 if modelid in ['allindivtransit', 'tessindivtransit'] else 1
a0.scatter(orb_d['phase']*P_orb*24, 1e3*(orb_d['mags']-ydiff),
color='darkgray', s=7, alpha=0.5, zorder=4, linewidths=0,
rasterized=True)
a0.scatter(orb_bd['binnedphases']*P_orb*24,
1e3*(orb_bd['binnedmags']-ydiff), color='black', s=18, alpha=1,
zorder=5, linewidths=0)
a0.plot(mod_d['phase']*P_orb*24, 1e3*(mod_d['mags']-ydiff),
color='gray', alpha=0.8, rasterized=False, lw=1, zorder=1)
a1.scatter(orb_d['phase']*P_orb*24, 1e3*(orb_d['mags']-mod_d['mags']),
color='darkgray', s=7, alpha=0.5, zorder=4, linewidths=0,
rasterized=True)
a1.scatter(resid_bd['binnedphases']*P_orb*24,
1e3*resid_bd['binnedmags'], color='black', s=18, alpha=1,
zorder=5, linewidths=0)
a1.plot(mod_d['phase']*P_orb*24, 1e3*(mod_d['mags']-mod_d['mags']),
color='gray', alpha=0.8, rasterized=False, lw=1, zorder=1)
if show_samples:
# NOTE: this comes out looking "bad" because if you phase up a model
# with a different period to the data, it will produce odd
# aliases/spikes.
xvals, yvals = [], []
for i in range(N_samples):
xvals.append(mod_ds[i]['phase']*P_orb*24)
yvals.append(mod_ds[i]['mags'])
a0.plot(mod_ds[i]['phase']*P_orb*24, mod_ds[i]['mags'], color='C1',
alpha=0.2, rasterized=True, lw=0.2, zorder=-2)
a1.plot(mod_ds[i]['phase']*P_orb*24,
mod_ds[i]['mags']-mod_d['mags'], color='C1', alpha=0.2,
rasterized=True, lw=0.2, zorder=-2)
# # N_samples x N_times
# from scipy.ndimage import gaussian_filter1d
# xvals, yvals = nparr(xvals), nparr(yvals)
# model_phase = xvals.mean(axis=0)
# g_std = 100
# n_std = 2
# mean = gaussian_filter1d(yvals.mean(axis=0), g_std)
# diff = gaussian_filter1d(n_std*yvals.std(axis=0), g_std)
# model_flux_lower = mean - diff
# model_flux_upper = mean + diff
# ax.plot(model_phase, model_flux_lower, color='C1',
# alpha=0.8, lw=0.5, zorder=3)
# ax.plot(model_phase, model_flux_upper, color='C1', alpha=0.8,
# lw=0.5, zorder=3)
# ax.fill_between(model_phase, model_flux_lower, model_flux_upper,
# color='C1', alpha=0.5, zorder=3, linewidth=0)
if not inppt:
a0.set_ylabel('Relative flux', fontsize='small')
else:
a0.set_ylabel('Relative flux [ppt]', fontsize='small')
a1.set_ylabel('Residual [ppt]', fontsize='small')
a1.set_xlabel('Hours from mid-transit', fontsize='small')
if not inppt:
a0.set_ylim((0.9925, 1.005))
yv = orb_d['mags']-mod_d['mags']
if inppt:
yv = 1e3*(orb_d['mags']-mod_d['mags'])
a1.set_ylim((np.nanmedian(yv)-3.2*np.nanstd(yv),
np.nanmedian(yv)+3.2*np.nanstd(yv) ))
for a in (a0, a1):
a.set_xlim((-0.011*P_orb*24, 0.011*P_orb*24))
# a.set_xlim((-0.02*P_orb*24, 0.02*P_orb*24))
format_ax(a)
if inppt:
a0.set_ylim((-6.9, 4.1))
for a in (a0, a1):
xval = np.arange(-2,3,1)
a.set_xticks(xval)
yval = np.arange(-5,5,2.5)
a0.set_yticks(yval)
if showerror:
trans = transforms.blended_transform_factory(
a0.transAxes, a0.transData)
if inppt:
method1 = False
if method1:
_e = 1e3*np.median(_d['y_err'])
# bin to roughly 5e-4 * 8.3 * 24 * 60 ~= 6 minute intervals
bintime = binsize*P_orb*24*60
sampletime = 2 # minutes
errorfactor = (sampletime/bintime)**(1/2)
yerr = errorfactor*_e
print(f'{_e:.2f}, {errorfactor*_e:.2f}')
else:
yerr = np.nanstd(1e3*resid_bd['binnedmags'])
a0.errorbar(
0.85, -5, yerr=yerr,
fmt='none', ecolor='black', alpha=1, elinewidth=1, capsize=2,
transform=trans, zorder=12
)
else:
raise NotImplementedError
fig.tight_layout()
savefig(fig, outpath, writepdf=1, dpi=300)
def plot_scene(c_obj, img_wcs, img, outpath, Tmag_cutoff=17, showcolorbar=0,
ap_mask=0, bkgd_mask=0, ticid=None, showdss=1):
plt.close('all')
#
# wcs information parsing
# follow Clara Brasseur's https://github.com/ceb8/tessworkshop_wcs_hack
# (this is from the CDIPS vetting reports...)
#
radius = 6.0*u.arcminute
nbhr_stars = Catalogs.query_region(
"{} {}".format(float(c_obj.ra.value), float(c_obj.dec.value)),
catalog="TIC",
radius=radius
)
try:
px,py = img_wcs.all_world2pix(
nbhr_stars[nbhr_stars['Tmag'] < Tmag_cutoff]['ra'],
nbhr_stars[nbhr_stars['Tmag'] < Tmag_cutoff]['dec'],
0
)
except Exception as e:
print('ERR! wcs all_world2pix got {}'.format(repr(e)))
raise(e)
ticids = nbhr_stars[nbhr_stars['Tmag'] < Tmag_cutoff]['ID']
tmags = nbhr_stars[nbhr_stars['Tmag'] < Tmag_cutoff]['Tmag']
sel = (px > 0) & (px < 10) & (py > 0) & (py < 10)
if isinstance(ticid, str):
sel &= (ticids != ticid)
px,py = px[sel], py[sel]
ticids, tmags = ticids[sel], tmags[sel]
ra, dec = float(c_obj.ra.value), float(c_obj.dec.value)
target_x, target_y = img_wcs.all_world2pix(ra,dec,0)
# geometry: there are TWO coordinate axes. (x,y) and (ra,dec). To get their
# relative orientations, the WCS and ignoring curvature will usually work.
shiftra_x, shiftra_y = img_wcs.all_world2pix(ra+1e-4,dec,0)
shiftdec_x, shiftdec_y = img_wcs.all_world2pix(ra,dec+1e-4,0)
###########
# get DSS #
###########
ra = c_obj.ra.value
dec = c_obj.dec.value
sizepix = 220
try:
dss, dss_hdr = skyview_stamp(ra, dec, survey='DSS2 Red',
scaling='Linear', convolvewith=None,
sizepix=sizepix, flip=False,
cachedir='~/.astrobase/stamp-cache',
verbose=True, savewcsheader=True)
except (OSError, IndexError, TypeError) as e:
print('downloaded FITS appears to be corrupt, retrying...')
try:
dss, dss_hdr = skyview_stamp(ra, dec, survey='DSS2 Red',
scaling='Linear', convolvewith=None,
sizepix=sizepix, flip=False,
cachedir='~/.astrobase/stamp-cache',
verbose=True, savewcsheader=True,
forcefetch=True)
except Exception as e:
print('failed to get DSS stamp ra {} dec {}, error was {}'.
format(ra, dec, repr(e)))
return None, None
##########################################
plt.close('all')
if showdss:
fig = plt.figure(figsize=(4,9))
# ax0: TESS
# ax1: DSS
ax0 = plt.subplot2grid((2, 1), (0, 0), projection=img_wcs)
ax1 = plt.subplot2grid((2, 1), (1, 0), projection=WCS(dss_hdr))
else:
fig = plt.figure(figsize=(4,4))
ax0 = plt.subplot2grid((1, 1), (0, 0), projection=img_wcs)
##########################################
#
# ax0: img
#
#interval = vis.PercentileInterval(99.99)
interval = vis.AsymmetricPercentileInterval(5,99)
vmin,vmax = interval.get_limits(img)
norm = vis.ImageNormalize(
vmin=vmin, vmax=vmax, stretch=vis.LogStretch(1000))
cset0 = ax0.imshow(img, cmap=plt.cm.gray_r, origin='lower', zorder=1,
norm=norm)
if isinstance(ap_mask, np.ndarray):
for x,y in product(range(10),range(10)):
if ap_mask[y,x]:
ax0.add_patch(
patches.Rectangle(
(x-.5, y-.5), 1, 1, hatch='//', fill=False, snap=False,
linewidth=0., zorder=2, alpha=1, rasterized=True,
color='white'
)
)
if isinstance(bkgd_mask, np.ndarray):
for x,y in product(range(10),range(10)):
if bkgd_mask[y,x]:
ax0.add_patch(
patches.Rectangle(
(x-.5, y-.5), 1, 1, hatch='x', fill=False, snap=False,
linewidth=0., zorder=2, alpha=0.7, rasterized=True
)
)
ax0.scatter(px, py, marker='o', c='white', s=1.5*5e4/(tmags**3),
rasterized=False, zorder=6, linewidths=0.5, edgecolors='k')
ax0.plot(target_x, target_y, mew=0.5, zorder=5,
markerfacecolor='yellow', markersize=18, marker='*',
color='k', lw=0)
t = ax0.text(4.0, 5.2, 'A', fontsize=20, color='k', zorder=6)
t.set_path_effects([path_effects.Stroke(linewidth=2.5, foreground='white'),
path_effects.Normal()])
t = ax0.text(4.6, 3.8, 'B', fontsize=20, color='k', zorder=6)
t.set_path_effects([path_effects.Stroke(linewidth=2.5, foreground='white'),
path_effects.Normal()])
if showdss:
ax0.set_title('TESS', fontsize='xx-large')
if showcolorbar:
cb0 = fig.colorbar(cset0, ax=ax0, extend='neither', fraction=0.046, pad=0.04)
if showdss:
#
# ax1: DSS
#
cset1 = ax1.imshow(dss, origin='lower', cmap=plt.cm.gray_r)
ax1.grid(ls='--', alpha=0.5)
ax1.set_title('DSS2 Red', fontsize='xx-large')
if showcolorbar:
cb1 = fig.colorbar(cset1, ax=ax1, extend='neither', fraction=0.046,
pad=0.04)
# # DSS is ~1 arcsecond per pixel. overplot apertures on axes 6,7
# for ix, radius_px in enumerate([21,21*1.5,21*2.25]):
# circle = plt.Circle((sizepix/2, sizepix/2), radius_px,
# color='C{}'.format(ix), fill=False, zorder=5+ix)
# ax1.add_artist(circle)
#
# ITNERMEDIATE SINCE TESS IMAGES NOW PLOTTED
#
for ax in [ax0]:
lon = ax.coords[0]
lat = ax.coords[1]
lat.set_major_formatter('dd:mm:ss')
lon.set_major_formatter('hh:mm:ss')
lat.set_ticks(spacing=60*u.arcsec)
lon.set_ticks(spacing=120*u.arcsec)
lon.set_ticks_visible(False)
lat.set_ticks_visible(False)
lon.set_ticklabel_visible(False)
lat.set_ticklabel_visible(False)
invert_x, invert_y = False, False
if shiftra_x - target_x > 0:
# want RA to increase to the left (almost E)
ax.invert_xaxis()
invert_x = True
if shiftdec_y - target_y < 0:
# want DEC to increase up (almost N)
ax.invert_yaxis()
invert_y = True
compass(ax, 0.84, 0.03, 0.12, invert_x=invert_x, invert_y=invert_y)
ax.grid(ls='--', alpha=0.5)
if showdss:
axlist = [ax0,ax1]
else:
axlist = [ax0]
for ax in axlist:
ax.set_xlabel(r'$\alpha_{2000}$', fontsize='large')
ax.set_ylabel(r'$\delta_{2000}$', fontsize='large')
if showcolorbar:
fig.tight_layout(h_pad=-8, w_pad=-8)
else:
fig.tight_layout(h_pad=1, w_pad=1)
savefig(fig, outpath, dpi=300)
def compass(ax, x, y, size, invert_x=False, invert_y=False):
"""Add a compass to indicate the north and east directions.
Parameters
----------
x, y : float
Position of compass vertex in axes coordinates.
size : float
Size of compass in axes coordinates.
"""
xy = x, y
scale = ax.wcs.pixel_scale_matrix
scale /= np.sqrt(np.abs(np.linalg.det(scale)))
for n, label, ha, va in zip(
scale, 'EN', ['right', 'center'], ['center', 'bottom']
):
if invert_x:
n[0] *= -1
if invert_y:
n[1] *= -1
ax.annotate(label, xy, xy + size * n, ax.transAxes, ax.transAxes,
ha='center', va='center',
arrowprops=dict(arrowstyle='<-', shrinkA=0.0, shrinkB=0.0))
def plot_hr(outdir, isochrone=None, do_cmd=0, color0='phot_bp_mean_mag'):
set_style()
# from cdips.tests.test_nbhd_plot
pklpath = '/Users/luke/Dropbox/proj/timmy/results/cluster_membership/nbhd_info_5251470948229949568.pkl'
info = pickle.load(open(pklpath, 'rb'))
(targetname, groupname, group_df_dr2, target_df, nbhd_df,
cutoff_probability, pmdec_min, pmdec_max, pmra_min, pmra_max,
group_in_k13, group_in_cg18, group_in_kc19, group_in_k18
) = info
if isochrone in ['mist', 'parsec']:
if isochrone == 'mist':
from timmy.read_mist_model import ISOCMD
isocmdpath = os.path.join(DATADIR, 'cluster',
'MIST_isochrones_age7pt60206_Av0pt217_FeH0',
'MIST_iso_5f04eb2b54f51.iso.cmd')
# relevant params: star_mass log_g log_L log_Teff Gaia_RP_DR2Rev
# Gaia_BP_DR2Rev Gaia_G_DR2Rev
isocmd = ISOCMD(isocmdpath)
# 10, 20, 30, 40 Myr.
assert len(isocmd.isocmds) == 4
elif isochrone == 'parsec':
isopath = os.path.join(DATADIR, 'cluster', 'PARSEC_isochrones',
'output799447099984.dat')
iso_df = pd.read_csv(isopath, delim_whitespace=True)
##########
plt.close('all')
f, ax = plt.subplots(figsize=(4,3))
if not do_cmd:
nbhd_yval = np.array(nbhd_df['phot_g_mean_mag'] +
5*np.log10(nbhd_df['parallax']/1e3) + 5)
else:
nbhd_yval = np.array(nbhd_df['phot_g_mean_mag'])
ax.scatter(
nbhd_df[color0]-nbhd_df['phot_rp_mean_mag'], nbhd_yval,
c='gray', alpha=1., zorder=2, s=5, rasterized=True, linewidths=0,
label='Neighborhood', marker='.'
)
if not do_cmd:
yval = group_df_dr2['phot_g_mean_mag'] + 5*np.log10(group_df_dr2['parallax']/1e3) + 5
if isochrone == 'mist':
mediancorr = (
np.nanmedian(5*np.log10(group_df_dr2['parallax']/1e3))
+ 5 + 5.9
)
elif isochrone == 'parsec':
mediancorr = (
np.nanmedian(5*np.log10(group_df_dr2['parallax']/1e3))
+ 5 + 5.6
)
else:
yval = group_df_dr2['phot_g_mean_mag']
ax.scatter(
group_df_dr2[color0]-group_df_dr2['phot_rp_mean_mag'],
yval,
c='k', alpha=1., zorder=3, s=5, rasterized=True, linewidths=0,
label='Members'# 'CG18 P>0.1'
)
if not do_cmd:
target_yval = np.array(target_df['phot_g_mean_mag'] +
5*np.log10(target_df['parallax']/1e3) + 5)
else:
target_yval = np.array(target_df['phot_g_mean_mag'])
if do_cmd:
mfc = 'k'
m = 'X'
ms = 6
lw = 0
mec = 'white'
else:
mfc = 'yellow'
m = '*'
ms = 14
lw = 0
mec = 'k'
ax.plot(
target_df[color0]-target_df['phot_rp_mean_mag'],
target_yval,
alpha=1, mew=0.5, zorder=8, label='TOI 837', markerfacecolor=mfc,
markersize=ms, marker=m, color='black', lw=lw, mec=mec
)
if isochrone:
if isochrone == 'mist':
if not do_cmd:
print(f'{mediancorr:.2f}')
ages = [10, 20, 30, 40]
N_ages = len(ages)
colors = plt.cm.cool(np.linspace(0,1,N_ages))[::-1]
for i, (a, c) in enumerate(zip(ages, colors)):
mstar = isocmd.isocmds[i]['star_mass']
sel = (mstar < 7)
if not do_cmd:
_yval = isocmd.isocmds[i]['Gaia_G_DR2Rev'][sel] + mediancorr
else:
corr = 5.75
_yval = isocmd.isocmds[i]['Gaia_G_DR2Rev'][sel] + corr
if color0 == 'phot_bp_mean_mag':
_c0 = 'Gaia_BP_DR2Rev'
elif color0 == 'phot_g_mean_mag':
_c0 = 'Gaia_G_DR2Rev'
else:
raise NotImplementedError
ax.plot(
isocmd.isocmds[i][_c0][sel]-isocmd.isocmds[i]['Gaia_RP_DR2Rev'][sel],
_yval,
c=c, alpha=1., zorder=4, label=f'{a} Myr', lw=0.5
)
if i == 3 and do_cmd:
sel = (mstar < 1.15) & (mstar > 1.08)
print(mstar[sel])
teff = 10**isocmd.isocmds[i]['log_Teff']
print(teff[sel])
logg = isocmd.isocmds[i]['log_g']
print(logg[sel])
rstar = ((( (10**logg)*u.cm/(u.s*u.s)) /
(const.G*mstar*u.Msun))**(-1/2)).to(u.Rsun)
print(rstar[sel])
rho = (mstar*u.Msun/(4/3*np.pi*rstar**3)).cgs
print(rho[sel])
_yval = isocmd.isocmds[i]['Gaia_G_DR2Rev'][sel] + corr
ax.scatter(
isocmd.isocmds[i][_c0][sel]-isocmd.isocmds[i]['Gaia_RP_DR2Rev'][sel],
_yval,
c=c, alpha=1., zorder=10, s=0.5, marker=".", linewidths=0
)
elif isochrone == 'parsec':
if not do_cmd:
print(f'{mediancorr:.2f}')
ages = [10, 20, 30, 40]
logages = [7, 7.30103, 7.47712, 7.60206]
N_ages = len(ages)
colors = plt.cm.cool(np.linspace(0,1,N_ages))[::-1]
for i, (a, la, c) in enumerate(zip(ages, logages, colors)):
sel = (np.abs(iso_df.logAge - la) < 0.01) & (iso_df.Mass < 7)
if not do_cmd:
_yval = iso_df[sel]['Gmag'] + mediancorr
else:
corr = 5.65
_yval = iso_df[sel]['Gmag'] + corr
if color0 == 'phot_bp_mean_mag':
_c0 = 'G_BPmag'
elif color0 == 'phot_g_mean_mag':
_c0 = 'Gmag'
else:
raise NotImplementedError
ax.plot(
iso_df[sel][_c0]-iso_df[sel]['G_RPmag'],
_yval,
c=c, alpha=1., zorder=4, label=f'{a} Myr', lw=0.5
)
if i == 3 and do_cmd:
sel = (
(np.abs(iso_df.logAge - la) < 0.01) &
(iso_df.Mass < 1.1) &
(iso_df.Mass > 0.95)
)
mstar = np.array(iso_df.Mass)
print(42*'#')
print(f'{_c0} - Rp')
print(mstar[sel])
teff = np.array(10**iso_df['logTe'])
print(teff[sel])
logg = np.array(iso_df['logg'])
print(logg[sel])
rstar = ((( (10**logg)*u.cm/(u.s*u.s)) /
(const.G*mstar*u.Msun))**(-1/2)).to(u.Rsun)
print(rstar[sel])
rho = (mstar*u.Msun/(4/3*np.pi*rstar**3)).cgs
print(rho[sel])
_yval = iso_df[sel]['Gmag'] + corr
ax.scatter(
iso_df[sel][_c0]-iso_df[sel]['G_RPmag'],
_yval,
c='red', alpha=1., zorder=10, s=2, marker=".", linewidths=0
)
ax.legend(loc='upper right', handletextpad=0.1, fontsize='x-small', framealpha=0.7)
#if not do_cmd:
# ax.legend(loc='best', handletextpad=0.1, fontsize='x-small', framealpha=0.7)
#else:
# ax.legend(loc='upper right', handletextpad=0.1, fontsize='x-small', framealpha=0.7)
if not do_cmd:
ax.set_ylabel('Absolute G [mag]', fontsize='large')
else:
ax.set_ylabel('G [mag]', fontsize='large')
if color0 == 'phot_bp_mean_mag':
ax.set_xlabel('Bp - Rp [mag]', fontsize='large')
elif color0 == 'phot_g_mean_mag':
ax.set_xlabel('G - Rp [mag]', fontsize='large')
else:
raise NotImplementedError
ylim = ax.get_ylim()
ax.set_ylim((max(ylim),min(ylim)))
format_ax(ax)
if not isochrone:
s = ''
else:
s = '_'+isochrone
c0s = '_Bp_m_Rp' if color0 == 'phot_bp_mean_mag' else '_G_m_Rp'
if not do_cmd:
outpath = os.path.join(outdir, f'hr{s}{c0s}.png')
else:
outpath = os.path.join(outdir, f'cmd{s}{c0s}.png')
savefig(f, outpath, dpi=400)
def plot_positions(outdir):
# from cdips.tests.test_nbhd_plot
pklpath = '/Users/luke/Dropbox/proj/timmy/results/cluster_membership/nbhd_info_5251470948229949568.pkl'
info = pickle.load(open(pklpath, 'rb'))
(targetname, groupname, group_df_dr2, target_df, nbhd_df,
cutoff_probability, pmdec_min, pmdec_max, pmra_min, pmra_max,
group_in_k13, group_in_cg18, group_in_kc19, group_in_k18
) = info
##########
plt.close('all')
# ra vs ra
# dec vs ra --- dec vs dec
# parallax vs ra --- parallax vs dec --- parallax vs parallax
f, axs = plt.subplots(figsize=(4,4), nrows=2, ncols=2)
ax_ixs = [(0,0),(1,0),(1,1)]
xy_tups = [('ra', 'dec'), ('ra', 'parallax'), ('dec', 'parallax')]
ldict = {
'ra': r'$\alpha$ [deg]',
'dec': r'$\delta$ [deg]',
'parallax': r'$\pi$ [mas]'
}
for ax_ix, xy_tup in zip(ax_ixs, xy_tups):
i, j = ax_ix[0], ax_ix[1]
xv, yv = xy_tup[0], xy_tup[1]
axs[i,j].scatter(
nbhd_df[xv], nbhd_df[yv], c='gray', alpha=0.9, zorder=2, s=5,
rasterized=True, linewidths=0, label='Neighborhood', marker='.'
)
axs[i,j].scatter(
group_df_dr2[xv], group_df_dr2[yv], c='k', alpha=0.9,
zorder=3, s=5, rasterized=True, linewidths=0, label='Members'
)
axs[i,j].plot(
target_df[xv], target_df[yv], alpha=1, mew=0.5, zorder=8,
label='TOI 837', markerfacecolor='yellow', markersize=9, marker='*',
color='black', lw=0
)
axs[0,0].set_ylabel(ldict['dec'])
axs[1,0].set_xlabel(ldict['ra'])
axs[1,0].set_ylabel(ldict['parallax'])
axs[1,1].set_xlabel(ldict['dec'])
axs[0,1].set_axis_off()
# ax.legend(loc='best', handletextpad=0.1, fontsize='x-small', framealpha=0.7)
for ax in axs.flatten():
format_ax(ax)
outpath = os.path.join(outdir, 'positions.png')
savefig(f, outpath)
def plot_velocities(outdir):
# from cdips.tests.test_nbhd_plot
pklpath = '/Users/luke/Dropbox/proj/timmy/results/cluster_membership/nbhd_info_5251470948229949568.pkl'
info = pickle.load(open(pklpath, 'rb'))
(targetname, groupname, group_df_dr2, target_df, nbhd_df,
cutoff_probability, pmdec_min, pmdec_max, pmra_min, pmra_max,
group_in_k13, group_in_cg18, group_in_kc19, group_in_k18
) = info
##########
plt.close('all')
f, axs = plt.subplots(figsize=(4,4), nrows=2, ncols=2)
ax_ixs = [(0,0),(1,0),(1,1)]
xy_tups = [('pmra', 'pmdec'),
('pmra', 'radial_velocity'),
('pmdec', 'radial_velocity')]
ldict = {
'pmra': r'$\mu_{{\alpha}} \cos\delta$ [mas/yr]',
'pmdec': r'$\mu_{{\delta}}$ [mas/yr]',
'radial_velocity': 'RV [km/s]'
}
for ax_ix, xy_tup in zip(ax_ixs, xy_tups):
i, j = ax_ix[0], ax_ix[1]
xv, yv = xy_tup[0], xy_tup[1]
axs[i,j].scatter(
nbhd_df[xv], nbhd_df[yv], c='gray', alpha=0.9, zorder=2, s=5,
rasterized=True, linewidths=0, label='Neighborhood', marker='.'
)
axs[i,j].scatter(
group_df_dr2[xv], group_df_dr2[yv], c='k', alpha=0.9,
zorder=3, s=5, rasterized=True, linewidths=0, label='Members'
)
axs[i,j].plot(
target_df[xv], target_df[yv], alpha=1, mew=0.5, zorder=8,
label='TOI 837', markerfacecolor='yellow', markersize=9, marker='*',
color='black', lw=0
)
axs[0,0].set_ylabel(ldict['pmdec'])
axs[1,0].set_xlabel(ldict['pmra'])
axs[1,0].set_ylabel(ldict['radial_velocity'])
axs[1,1].set_xlabel(ldict['pmdec'])
axs[0,1].set_axis_off()
# ax.legend(loc='best', handletextpad=0.1, fontsize='x-small', framealpha=0.7)
for ax in axs.flatten():
format_ax(ax)
outpath = os.path.join(outdir, 'velocities.png')
savefig(f, outpath)
def plot_full_kinematics(outdir):
set_style()
# from cdips.tests.test_nbhd_plot
pklpath = '/Users/luke/Dropbox/proj/timmy/results/cluster_membership/nbhd_info_5251470948229949568.pkl'
info = pickle.load(open(pklpath, 'rb'))
(targetname, groupname, group_df_dr2, target_df, nbhd_df,
cutoff_probability, pmdec_min, pmdec_max, pmra_min, pmra_max,
group_in_k13, group_in_cg18, group_in_kc19, group_in_k18
) = info
##########
plt.close('all')
params = ['ra', 'dec', 'parallax', 'pmra', 'pmdec', 'radial_velocity']
nparams = len(params)
qlimd = {
'ra': 0,
'dec': 0,
'parallax': 0,
'pmra': 1,
'pmdec': 1,
'radial_velocity': 1
} # whether to limit axis by quantile
ldict = {
'ra': r'$\alpha$ [deg]',
'dec': r'$\delta$ [deg]',
'parallax': r'$\pi$ [mas]',
'pmra': r'$\mu_{{\alpha}} \cos\delta$ [mas/yr]',
'pmdec': r'$\mu_{{\delta}}$ [mas/yr]',
'radial_velocity': 'RV [km/s]'
}
f, axs = plt.subplots(figsize=(6,6), nrows=nparams-1, ncols=nparams-1)
for i in range(nparams):
for j in range(nparams):
print(i,j)
if j == nparams-1 or i == nparams-1:
continue
if j>i:
axs[i,j].set_axis_off()
continue
xv = params[j]
yv = params[i+1]
print(i,j,xv,yv)
axs[i,j].scatter(
nbhd_df[xv], nbhd_df[yv], c='gray', alpha=0.9, zorder=2, s=5,
rasterized=True, linewidths=0, label='Neighborhood', marker='.'
)
axs[i,j].scatter(
group_df_dr2[xv], group_df_dr2[yv], c='k', alpha=0.9,
zorder=3, s=5, rasterized=True, linewidths=0, label='Members'
)
axs[i,j].plot(
target_df[xv], target_df[yv], alpha=1, mew=0.5,
zorder=8, label='TOI 837', markerfacecolor='yellow',
markersize=14, marker='*', color='black', lw=0
)
# set the axis limits as needed
if qlimd[xv]:
xlim = (np.nanpercentile(nbhd_df[xv], 25),
np.nanpercentile(nbhd_df[xv], 75))
axs[i,j].set_xlim(xlim)
if qlimd[yv]:
ylim = (np.nanpercentile(nbhd_df[yv], 25),
np.nanpercentile(nbhd_df[yv], 75))
axs[i,j].set_ylim(ylim)
# fix labels
if j == 0 :
axs[i,j].set_ylabel(ldict[yv])
if not i == nparams - 2:
# hide xtick labels
labels = [item.get_text() for item in axs[i,j].get_xticklabels()]
empty_string_labels = ['']*len(labels)
axs[i,j].set_xticklabels(empty_string_labels)
if i == nparams - 2:
axs[i,j].set_xlabel(ldict[xv])
if not j == 0:
# hide ytick labels
labels = [item.get_text() for item in axs[i,j].get_yticklabels()]
empty_string_labels = ['']*len(labels)
axs[i,j].set_yticklabels(empty_string_labels)
if (not (j == 0)) and (not (i == nparams - 2)):
# hide ytick labels
labels = [item.get_text() for item in axs[i,j].get_yticklabels()]
empty_string_labels = ['']*len(labels)
axs[i,j].set_yticklabels(empty_string_labels)
# hide xtick labels
labels = [item.get_text() for item in axs[i,j].get_xticklabels()]
empty_string_labels = ['']*len(labels)
axs[i,j].set_xticklabels(empty_string_labels)
# axs[2,2].legend(loc='best', handletextpad=0.1, fontsize='medium', framealpha=0.7)
# leg = axs[2,2].legend(bbox_to_anchor=(0.8,0.8), loc="upper right",
# handletextpad=0.1, fontsize='medium',
# bbox_transform=f.transFigure)
for ax in axs.flatten():
format_ax(ax)
f.tight_layout(h_pad=0.1, w_pad=0.1)
outpath = os.path.join(outdir, 'full_kinematics.png')
savefig(f, outpath)
def plot_groundscene(c_obj, img_wcs, img, outpath, Tmag_cutoff=17,
showcolorbar=0, ticid=None, xlim=None, ylim=None,
ap_mask=0, customap=0):
plt.close('all')
# standard tick formatting fails for these images.
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
#
# wcs information parsing
# follow Clara Brasseur's https://github.com/ceb8/tessworkshop_wcs_hack
# (this is from the CDIPS vetting reports...)
#
radius = 6.0*u.arcminute
nbhr_stars = Catalogs.query_region(
"{} {}".format(float(c_obj.ra.value), float(c_obj.dec.value)),
catalog="TIC",
radius=radius
)
try:
px,py = img_wcs.all_world2pix(
nbhr_stars[nbhr_stars['Tmag'] < Tmag_cutoff]['ra'],
nbhr_stars[nbhr_stars['Tmag'] < Tmag_cutoff]['dec'],
0
)
except Exception as e:
print('ERR! wcs all_world2pix got {}'.format(repr(e)))
raise(e)
ticids = nbhr_stars[nbhr_stars['Tmag'] < Tmag_cutoff]['ID']
tmags = nbhr_stars[nbhr_stars['Tmag'] < Tmag_cutoff]['Tmag']
sel = (px > 0) & (px < img.shape[1]) & (py > 0) & (py < img.shape[0])
if isinstance(ticid, str):
sel &= (ticids != ticid)
px,py = px[sel], py[sel]
ticids, tmags = ticids[sel], tmags[sel]
ra, dec = float(c_obj.ra.value), float(c_obj.dec.value)
target_x, target_y = img_wcs.all_world2pix(ra,dec,0)
# geometry: there are TWO coordinate axes. (x,y) and (ra,dec). To get their
# relative orientations, the WCS and ignoring curvature will usually work.
shiftra_x, shiftra_y = img_wcs.all_world2pix(ra+1e-4,dec,0)
shiftdec_x, shiftdec_y = img_wcs.all_world2pix(ra,dec+1e-4,0)
##########################################
plt.close('all')
fig = plt.figure(figsize=(5,5))
# ax: whatever the groundbased image was
ax = plt.subplot2grid((1, 1), (0, 0), projection=img_wcs)
##########################################
#
# ax0: img
#
#interval = vis.PercentileInterval(99.99)
#interval = vis.AsymmetricPercentileInterval(1,99.9)
vmin,vmax = 10, int(1e4)
norm = vis.ImageNormalize(
vmin=vmin, vmax=vmax, stretch=vis.LogStretch(1000))
cset0 = ax.imshow(img, cmap=plt.cm.gray, origin='lower', zorder=1,
norm=norm)
if isinstance(ap_mask, np.ndarray):
for x,y in product(range(10),range(10)):
if ap_mask[y,x]:
ax.add_patch(
patches.Rectangle(
(x-.5, y-.5), 1, 1, hatch='//', fill=False, snap=False,
linewidth=0., zorder=2, alpha=0.7, rasterized=True
)
)
ax.scatter(px, py, marker='o', c='C1', s=2e4/(tmags**3), rasterized=True,
zorder=6, linewidths=0.8)
# ax0.scatter(px, py, marker='x', c='C1', s=20, rasterized=True,
# zorder=6, linewidths=0.8)
ax.plot(target_x, target_y, mew=0.5, zorder=5, markerfacecolor='yellow',
markersize=10, marker='*', color='k', lw=0)
if customap:
datestr = [e for e in outpath.split('/') if '2020' in e][0]
tdir = (
'/Users/luke/Dropbox/proj/timmy/results/groundphot/{}/photutils_apphot/'.
format(datestr)
)
inpath = os.path.join(
tdir,
os.path.basename(outpath).replace(
'groundscene.png', 'customtable.fits')
)
chdul = fits.open(inpath)
d = chdul[1].data
chdul.close()
xc, yc = d['xcenter'], d['ycenter']
colors = ['C{}'.format(ix) for ix in range(len(xc))]
for _x, _y, _c in zip(xc, yc, colors):
ax.scatter(_x, _y, marker='x', c=_c, s=20, rasterized=True,
zorder=6, linewidths=0.8)
ax.set_title('El Sauce 36cm', fontsize='xx-large')
if showcolorbar:
cb0 = fig.colorbar(cset0, ax=ax, extend='neither', fraction=0.046, pad=0.04)
#
# fix the axes
#
ax.grid(ls='--', alpha=0.5)
if shiftra_x - target_x > 0:
# want RA to increase to the left (almost E)
ax.invert_xaxis()
if shiftdec_y - target_y < 0:
# want DEC to increase up (almost N)
ax.invert_yaxis()
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
format_ax(ax)
ax.set_xlabel(r'$\alpha_{2000}$')
ax.set_ylabel(r'$\delta_{2000}$')
if showcolorbar:
fig.tight_layout(h_pad=-8, w_pad=-8)
else:
fig.tight_layout(h_pad=1, w_pad=1)
savefig(fig, outpath, writepdf=0, dpi=300)
def shift_img_plot(img, shift_img, xlim, ylim, outpath, x0, y0, target_x,
target_y, titlestr0, titlestr1, showcolorbar=1):
vmin,vmax = 10, int(1e4)
norm = vis.ImageNormalize(
vmin=vmin, vmax=vmax, stretch=vis.LogStretch(1000))
fig, axs = plt.subplots(nrows=2,ncols=1)
axs[0].imshow(img, cmap=plt.cm.gray, origin='lower', norm=norm)
axs[0].set_title(titlestr0)
axs[0].plot(target_x, target_y, mew=0.5, zorder=5,
markerfacecolor='yellow', markersize=3, marker='*', color='k',
lw=0)
cset = axs[1].imshow(shift_img, cmap=plt.cm.gray, origin='lower', norm=norm)
axs[1].set_title(titlestr1)
axs[1].plot(x0, y0, mew=0.5, zorder=5, markerfacecolor='yellow',
markersize=3, marker='*', color='k', lw=0)
for ax in axs:
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
format_ax(ax)
if showcolorbar:
raise NotImplementedError
cb0 = fig.colorbar(cset, ax=axs[1], extend='neither', fraction=0.046, pad=0.04)
fig.tight_layout()
savefig(fig, outpath, writepdf=0, dpi=300)
def plot_pixel_lc(times, img_cube, outpath, showvlines=0):
# 20x20 around target pixel
nrows, ncols = 20, 20
fig, axs = plt.subplots(figsize=(20,20), nrows=nrows, ncols=ncols,
sharex=True)
x0, y0 = 768, 512 # in array coordinates
xmin, xmax = x0-10, x0+10 # note: xmin/xmax in mpl coordinates (not ndarray coordinates)
ymin, ymax = y0-10, y0+10 # note: ymin/ymax in mpl coordinates (not ndarray coordinates)
props = dict(boxstyle='square', facecolor='white', alpha=0.9, pad=0.15,
linewidth=0)
time_offset = np.nanmin(times)
times -= time_offset
N_trim = 47 # per <NAME> reduction notes
if '2020-04-01' not in outpath:
raise NotImplementedError(
'pixel LCs are deprecated. only 2020-04-01 was implemented'
)
for ax_i, data_i in enumerate(range(xmin, xmax)):
for ax_j, data_j in enumerate(list(range(ymin, ymax))[::-1]):
# note: y reverse is the same as "origin = lower"
print(ax_i, ax_j, data_i, data_j)
axs[ax_j,ax_i].scatter(
times[N_trim:], img_cube[N_trim:, data_j, data_i], c='k', zorder=3, s=2,
rasterized=True, linewidths=0
)
tstr = (
'{:.1f}\n{} {}'.format(
np.nanpercentile( img_cube[N_trim:, data_j, data_i], 99),
data_i, data_j)
)
axs[ax_j,ax_i].text(0.97, 0.03, tstr, ha='right', va='bottom',
transform=axs[ax_j,ax_i].transAxes, bbox=props,
zorder=-1, fontsize='xx-small')
if showvlines:
ylim = axs[ax_j,ax_i].get_ylim()
axs[ax_j,ax_i].vlines(
[2458940.537 - time_offset, 2458940.617 - time_offset],
min(ylim), max(ylim), colors='C1', alpha=0.5, linestyles='--',
zorder=-2, linewidths=1
)
axs[ax_j,ax_i].set_ylim(ylim)
# hide ytick labels
labels = [item.get_text() for item in axs[ax_j,ax_i].get_yticklabels()]
empty_string_labels = ['']*len(labels)
axs[ax_j,ax_i].set_yticklabels(empty_string_labels)
format_ax(axs[ax_j,ax_i])
fig.tight_layout(h_pad=0, w_pad=0)
savefig(fig, outpath, writepdf=0, dpi=300)
def vis_photutils_lcs(datestr, ap, overwrite=1):
outpath = os.path.join(
RESULTSDIR, 'groundphot', datestr, 'vis_photutils_lcs',
'vis_photutils_lcs_{}.png'.format(ap)
)
if os.path.exists(outpath) and not overwrite:
print('found {} and no overwrite'.format(outpath))
return
lcdir = '/Users/luke/Dropbox/proj/timmy/results/groundphot/{}/vis_photutils_lcs'.format(datestr)
lcpaths = glob(os.path.join(lcdir, 'TIC*csv'))
lcs = [pd.read_csv(l) for l in lcpaths]
target_ticid = '460205581' # TOI 837
target_lc = pd.read_csv(glob(os.path.join(
lcdir, 'TIC*{}*csv'.format(target_ticid)))[0]
)
if datestr == '2020-04-01':
N_trim = 47 # drop the first 47 points due to clouds
elif datestr == '2020-04-26':
N_trim = 0
elif datestr == '2020-05-21':
N_trim = 0
else:
raise NotImplementedError('pls manually set N_trim')
time = target_lc['BJD_TDB'][N_trim:]
flux = target_lc[ap][N_trim:]
mean_flux = np.nanmean(flux)
flux /= mean_flux
print(42*'-')
print(target_ticid, target_lc.id.iloc[0], mean_flux)
comp_mean_fluxs = nparr([np.nanmean(lc[ap][N_trim:]) for lc in lcs])
comp_inds = np.argsort(np.abs(mean_flux - comp_mean_fluxs))
# the maximum number of comparison stars is say, 10.
N_comp_max = 10
#
# finally, make the plot
#
plt.close('all')
fig, ax = plt.subplots(figsize=(8,8))
ax.scatter(time, flux, c='k', zorder=3, s=3, rasterized=True, linewidths=0)
tstr = 'TIC{}'.format(target_ticid)
props = dict(boxstyle='square', facecolor='white', alpha=0.5, pad=0.15,
linewidth=0)
ax.text(np.nanpercentile(time, 97), np.nanpercentile(flux, 3), tstr,
ha='right', va='top', bbox=props, zorder=-1, fontsize='small')
offset = 0.3
outdf = pd.DataFrame({})
for ix, comp_ind in enumerate(comp_inds[1:N_comp_max+1]):
lc = lcs[comp_ind]
time = lc['BJD_TDB'][N_trim:]
flux = lc[ap][N_trim:]
mean_flux = np.nanmean(flux)
flux /= mean_flux
print(lc['ticid'].iloc[0], lc.id.iloc[0], mean_flux)
color = 'C{}'.format(ix)
ax.scatter(time, flux+offset, s=3, rasterized=True, linewidths=0,
c=color)
tstr = 'TIC{}'.format(lc['ticid'].iloc[0])
ax.text(np.nanpercentile(time, 97), np.nanpercentile(flux+offset, 50),
tstr, ha='right', va='top', bbox=props, zorder=-1,
fontsize='small', color=color)
offset += 0.3
t = lc['ticid'].iloc[0]
outdf['time_{}'.format(t)] = time
outdf['flux_{}'.format(t)] = flux
outdf['absflux_{}'.format(t)] = flux*mean_flux
outcsvpath = os.path.join(
RESULTSDIR, 'groundphot', datestr, 'vis_photutils_lcs',
'vis_photutils_lcs_compstars_{}.csv'.format(ap)
)
outdf.to_csv(outcsvpath, index=False)
print('made {}'.format(outcsvpath))
format_ax(ax)
fig.tight_layout()
savefig(fig, outpath, writepdf=0, dpi=300)
def stackviz_blend_check(datestr, apn, soln=0, overwrite=1, adaptiveoffset=1,
N_comp=5):
if soln == 1:
raise NotImplementedError('gotta implement image+aperture inset axes')
if adaptiveoffset:
outdir = os.path.join(
RESULTSDIR, 'groundphot', datestr,
f'stackviz_blend_check_adaptiveoffset_Ncomp{N_comp}'
)
else:
outdir = os.path.join(
RESULTSDIR, 'groundphot', datestr,
f'stackviz_blend_check_noadaptiveoffset_Ncomp{N_comp}'
)
if not os.path.exists(outdir):
os.mkdir(outdir)
outpath = os.path.join(outdir, 'stackviz_blend_check_{}.png'.format(apn))
if os.path.exists(outpath) and not overwrite:
print('found {} and no overwrite'.format(outpath))
return
lcdir = f'/Users/luke/Dropbox/proj/timmy/results/groundphot/{datestr}/compstar_detrend_Ncomp{N_comp}'
lcpaths = np.sort(glob(os.path.join(
lcdir, 'toi837_detrended*_sum_{}_*.csv'.format(apn))))
assert len(lcpaths) == 13
origlcdir = f'/Users/luke/Dropbox/proj/timmy/results/groundphot/{datestr}/vis_photutils_lcs'
lcs = [pd.read_csv(l) for l in lcpaths]
N_lcs = len(lcpaths)
#
# make the plot
#
plt.close('all')
fig, ax = plt.subplots(figsize=(8,N_lcs))
offset = 0
props = dict(boxstyle='square', facecolor='white', alpha=0.5, pad=0.15,
linewidth=0)
for ix, lc in enumerate(lcs):
time = nparr(lc['time'])
flux = nparr(lc['flat_flux'])
_id = str(ix+1).zfill(4)
origpath = os.path.join(
origlcdir, 'CUSTOM{}_photutils_groundlc.csv'.format(_id)
)
odf = pd.read_csv(origpath)
ra, dec = np.mean(odf['sky_center.ra']), np.mean(odf['sky_center.dec'])
print(_id, ra, dec)
color = 'C{}'.format(ix)
ax.scatter(time, flux+offset, s=3, rasterized=True, linewidths=0,
c=color)
tstr = '{}: {:.4f} {:.4f}'.format(_id, ra, dec)
txt_x, txt_y = (
np.nanpercentile(time, 97), np.nanpercentile(flux+offset, 1)
)
if adaptiveoffset:
ax.text(txt_x, txt_y,
tstr, ha='right', va='bottom', bbox=props, zorder=-1,
fontsize='small', color=color)
else:
ax.text(txt_x, max(txt_y, 0.9),
tstr, ha='right', va='bottom', bbox=props, zorder=-1,
fontsize='small', color=color)
if adaptiveoffset:
if int(apn) <= 1:
offset += 0.30
elif int(apn) == 2:
offset += 0.10
elif int(apn) == 3:
offset += 0.05
elif int(apn) == 4:
offset += 0.035
else:
offset += 0.02
else:
offset += 0.10
if not adaptiveoffset:
ax.set_ylim((0.9, 2.3))
format_ax(ax)
fig.tight_layout()
savefig(fig, outpath, writepdf=0, dpi=300)
def plot_fitted_zoom(m, summdf, outpath, overwrite=1, modelid=None):
yval = "PDCSAP_FLUX"
provenance = 'spoc'
detrend = 1
if os.path.exists(outpath) and not overwrite:
print('found {} and no overwrite'.format(outpath))
return
if modelid is None:
d, params, _ = _get_fitted_data_dict(m, summdf)
_d = d
elif 'alltransit' in modelid:
d = _get_fitted_data_dict_alltransit(m, summdf)
_d = d['tess']
time, flux, flux_err = _d['x_obs'], _d['y_obs'], _d['y_err']
t_offset = np.nanmin(time)
time -= t_offset
t0 = summdf.loc['t0', 'median'] - t_offset
per = summdf.loc['period', 'median']
epochs = np.arange(-100,100,1)
tra_times = t0 + per*epochs
plt.close('all')
##########################################
# figsize=(8.5, 10) full page... 10 leaves space.
fig = plt.figure(figsize=(8.5*1.5, 8))
ax0 = plt.subplot2grid(shape=(3,5), loc=(0,0), colspan=5)
ax1 = plt.subplot2grid((3,5), (1,0), colspan=1)
ax2 = plt.subplot2grid((3,5), (1,1), colspan=1)
ax3 = plt.subplot2grid((3,5), (1,2), colspan=1)
ax4 = plt.subplot2grid((3,5), (1,3), colspan=1)
ax5 = plt.subplot2grid((3,5), (1,4), colspan=1)
ax6 = plt.subplot2grid((3,5), (2,0), colspan=1)
ax7 = plt.subplot2grid((3,5), (2,1), colspan=1)
ax8 = plt.subplot2grid((3,5), (2,2), colspan=1)
ax9 = plt.subplot2grid((3,5), (2,3), colspan=1)
ax10 = plt.subplot2grid((3,5), (2,4), colspan=1)
all_axs = [ax0,ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8,ax9,ax10]
tra_axs = [ax1,ax2,ax3,ax4,ax5]
res_axs = [ax6,ax7,ax8,ax9,ax10]
tra_ixs = [0,2,3,4,5]
# main lightcurve
yval = (flux - np.nanmean(flux))*1e3
ax0.scatter(time, yval, c='k', zorder=3, s=0.75, rasterized=True,
linewidths=0)
ax0.plot(time, (_d['y_mod'] - np.nanmean(flux))*1e3, color='C0', alpha=0.8,
rasterized=False, lw=1, zorder=4)
ax0.set_ylim((-20, 20)) # omitting like 1 upper point from the big flare at time 38
ymin, ymax = ax0.get_ylim()
ax0.vlines(
tra_times, ymin, ymax, colors='C1', alpha=0.5,
linestyles='--', zorder=-2, linewidths=0.5
)
ax0.set_ylim((ymin, ymax))
ax0.set_xlim((np.nanmin(time)-1, np.nanmax(time)+1))
# zoom-in of raw transits
for ax, tra_ix, rax in zip(tra_axs, tra_ixs, res_axs):
mid_time = t0 + per*tra_ix
tdur = 2/24. # roughly, in units of days
# n = 2.5 # good
n = 2.0 # good
start_time = mid_time - n*tdur
end_time = mid_time + n*tdur
s = (time > start_time) & (time < end_time)
ax.scatter(time[s], (flux[s] - np.nanmean(flux[s]))*1e3, c='k',
zorder=3, s=7, rasterized=False, linewidths=0)
ax.plot(time[s], (_d['y_mod'][s] - np.nanmean(flux[s]))*1e3 ,
color='C0', alpha=0.8, rasterized=False, lw=1, zorder=1)
rax.scatter(time[s], (flux[s] - _d['y_mod'][s])*1e3, c='k',
zorder=3, s=7, rasterized=False, linewidths=0)
rax.plot(time[s], (_d['y_mod'][s] - _d['y_mod'][s])*1e3, color='C0',
alpha=0.8, rasterized=False, lw=1, zorder=1)
ax.set_ylim((-8, 8))
rax.set_ylim((-8, 8))
for a in [ax,rax]:
a.set_xlim((start_time, end_time))
ymin, ymax = a.get_ylim()
a.vlines(
mid_time, ymin, ymax, colors='C1', alpha=0.5,
linestyles='--', zorder=-2, linewidths=0.5
)
a.set_ylim((ymin, ymax))
if tra_ix > 0:
# hide the ytick labels
labels = [item.get_text() for item in
a.get_yticklabels()]
empty_string_labels = ['']*len(labels)
a.set_yticklabels(empty_string_labels)
labels = [item.get_text() for item in
ax.get_yticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_xticklabels(empty_string_labels)
for ax in all_axs:
format_ax(ax)
fig.text(0.5,-0.01, 'Time [days]', ha='center', fontsize='x-large')
fig.text(-0.01,0.5, 'Relative flux [part per thousand]', va='center',
rotation=90, fontsize='x-large')
fig.tight_layout(h_pad=0.2, w_pad=-1.0)
savefig(fig, outpath, writepdf=1, dpi=300)
def plot_lithium(outdir):
set_style()
from timmy.lithium import get_Randich18_lithium, get_Berger18_lithium
rdf = get_Randich18_lithium()
bdf = get_Berger18_lithium()
selclusters = [
# 'IC4665', # LDB 23.2 Myr
'NGC2547', # LDB 37.7 Myr
'IC2602', # LDB 43.7 Myr
# 'IC2391', # LDB 51.3 Myr
]
selrdf = np.zeros(len(rdf)).astype(bool)
for c in selclusters:
selrdf |= rdf.Cluster.str.contains(c)
srdf = rdf[selrdf]
srdf_lim = srdf[srdf.f_EWLi==3]
srdf_val = srdf[srdf.f_EWLi==0]
# young dictionary
yd = {
'val_teff_young': nparr(srdf_val.Teff),
'val_teff_err_young': nparr(srdf_val.e_Teff),
'val_li_ew_young': nparr(srdf_val.EWLi),
'val_li_ew_err_young': nparr(srdf_val.e_EWLi),
'lim_teff_young': nparr(srdf_lim.Teff),
'lim_teff_err_young': nparr(srdf_lim.e_Teff),
'lim_li_ew_young': nparr(srdf_lim.EWLi),
'lim_li_ew_err_young': nparr(srdf_lim.e_EWLi),
}
# field dictionary
# SNR > 3
field_det = ( (bdf.EW_Li_ / bdf.e_EW_Li_) > 3 )
bdf_val = bdf[field_det]
bdf_lim = bdf[~field_det]
fd = {
'val_teff_field': nparr(bdf_val.Teff),
'val_li_ew_field': nparr(bdf_val.EW_Li_),
'val_li_ew_err_field': nparr(bdf_val.e_EW_Li_),
'lim_teff_field': nparr(bdf_lim.Teff),
'lim_li_ew_field': nparr(bdf_lim.EW_Li_),
'lim_li_ew_err_field': nparr(bdf_lim.e_EW_Li_),
}
d = {**yd, **fd}
##########
# make tha plot
##########
plt.close('all')
f, ax = plt.subplots(figsize=(4,3))
classes = ['young', 'field']
colors = ['k', 'gray']
zorders = [2, 1]
markers = ['o', '.']
ss = [13, 5]
labels = ['NGC$\,$2547 & IC$\,$2602', 'Kepler Field']
# plot vals
for _cls, _col, z, m, l, s in zip(classes, colors, zorders, markers,
labels, ss):
ax.scatter(
d[f'val_teff_{_cls}'], d[f'val_li_ew_{_cls}'], c=_col, alpha=1,
zorder=z, s=s, rasterized=False, linewidths=0, label=l, marker=m
)
from timmy.priors import TEFF, LI_EW
ax.plot(
TEFF,
LI_EW,
alpha=1, mew=0.5, zorder=8, label='TOI 837', markerfacecolor='yellow',
markersize=18, marker='*', color='black', lw=0
)
ax.legend(loc='best', handletextpad=0.1, fontsize='x-small', framealpha=0.7)
ax.set_ylabel('Li$_{6708}$ EW [m$\mathrm{\AA}$]', fontsize='large')
ax.set_xlabel('Effective Temperature [K]', fontsize='large')
ax.set_xlim((4900, 6600))
format_ax(ax)
outpath = os.path.join(outdir, 'lithium.png')
savefig(f, outpath)
def plot_rotation(outdir):
set_style()
from timmy.paths import DATADIR
rotdir = os.path.join(DATADIR, 'rotation')
# make plot
plt.close('all')
f, ax = plt.subplots(figsize=(4,3))
classes = ['pleiades', 'praesepe', 'ngc6811']
colors = ['k', 'gray', 'darkgray']
zorders = [3, 2, 1]
markers = ['o', 'x', 's']
lws = [0, 0., 0]
mews= [0.5, 0.5, 0.5]
ss = [3.0, 6, 3.0]
labels = ['Pleaides', 'Praesepe', 'NGC$\,$6811']
# plot vals
for _cls, _col, z, m, l, lw, s, mew in zip(
classes, colors, zorders, markers, labels, lws, ss, mews
):
df = pd.read_csv(os.path.join(rotdir, f'curtis19_{_cls}.csv'))
ax.plot(
df['teff'], df['prot'], c=_col, alpha=1, zorder=z, markersize=s,
rasterized=False, lw=lw, label=l, marker=m, mew=mew,
mfc=_col
)
# ax.plot(
# target_df['phot_bp_mean_mag']-target_df['phot_rp_mean_mag'],
# target_yval,
# alpha=1, mew=0.5, zorder=8, label='TOI 837', markerfacecolor=mfc,
# markersize=ms, marker=m, color='black', lw=lw, mec=mec
# )
from timmy.priors import TEFF, P_ROT
ax.plot(
TEFF,
P_ROT,
alpha=1, mew=0.5, zorder=8, label='TOI 837', markerfacecolor='yellow',
markersize=18, marker='*', color='black', lw=0
)
ax.legend(loc='best', handletextpad=0.1, fontsize='x-small', framealpha=0.7)
ax.set_ylabel('Rotation Period [days]', fontsize='large')
ax.set_xlabel('Effective Temperature [K]', fontsize='large')
ax.set_xlim((4900, 6600))
ax.set_ylim((0,14))
format_ax(ax)
outpath = os.path.join(outdir, 'rotation.png')
savefig(f, outpath)
def _get_color_df(tdepth_ap, sep_arcsec, fn_mass_to_dmag, band='Rc'):
color_path = os.path.join(RESULTSDIR, 'fpscenarios', f'multicolor_{band}.csv')
color_data_df = pd.read_csv(color_path)
m2_color = max(color_data_df[color_data_df['frac_viable'] == 0].m2)
color_ap = tdepth_ap-0.20 # arcsec, same as tdepth
color_sep = sep_arcsec[sep_arcsec < color_ap]
color_dmag = np.ones_like(color_sep)*fn_mass_to_dmag(m2_color)
color_df = pd.DataFrame({'sep_arcsec': color_sep, 'dmag': color_dmag})
_append_df = pd.DataFrame({
'sep_arcsec':[2.00001],
'dmag':[10]
})
color_df = color_df.append(_append_df)
return color_df
def _get_rv_secondary_df(dist_pc, method=1):
if method == 1:
rv_path = os.path.join(RESULTSDIR, 'fpscenarios',
'rvoutersensitivity_3sigma.csv')
elif method == 2:
rv_path = os.path.join(RESULTSDIR, 'fpscenarios',
'rvoutersensitivity_method2_3sigma.csv')
else:
raise NotImplementedError
rv_df = pd.read_csv(rv_path)
rv_df['sma_au'] = 10**(rv_df.log10sma)
rv_df['mp_msun'] = 10**(rv_df.log10mpsini)
rv_df['sep_arcsec'] = rv_df['sma_au'] / dist_pc
# conversion to contrast, from drivers.contrast_to_masslimit
smooth_path = os.path.join(DATADIR, 'speckle', 'smooth_dmag_to_mass.csv')
smooth_df = pd.read_csv(smooth_path)
sel = ~pd.isnull(smooth_df['m_comp/m_sun'])
smooth_df = smooth_df[sel]
fn_mass_to_dmag = interp1d(
nparr(smooth_df['m_comp/m_sun']), nparr(smooth_df['dmag_smooth']),
kind='quadratic', bounds_error=False, fill_value=np.nan
)
rv_df['dmag'] = fn_mass_to_dmag(nparr(rv_df.mp_msun))
sel = (
(rv_df.sep_arcsec > 1e-3)
&
(rv_df.sep_arcsec < 1e1)
)
if method == 2:
sel &= (rv_df.mp_msun > 0.01)
sel &= ~(pd.isnull(rv_df.dmag))
srv_df = rv_df[sel]
if method == 1:
# set the point one above the last finite dmag value to zero.
srv_df.loc[np.nanargmin(nparr(srv_df.dmag))+1, 'dmag'] = 0
elif method == 2:
eps = 1e-2
row_df = pd.DataFrame({
'log10sma':np.nan,
'log10mpsini':np.nan,
'sma_au':srv_df.sma_au.max()+eps,
'mp_msun':1.1,
'sep_arcsec':srv_df.sep_arcsec.max()+eps,
'dmag':0
}, index=[0])
srv_df = srv_df.append(row_df, ignore_index=True)
return srv_df, fn_mass_to_dmag, smooth_df
def plot_fpscenarios(outdir):
set_style()
#
# get data
#
#
# speckle AO from SOAR HRcam
#
speckle_path = os.path.join(DATADIR, 'speckle', 'sep_vs_dmag.csv')
speckle_df = pd.read_csv(speckle_path, names=['sep_arcsec','dmag'])
dist_pc = 142.488 # pc, TIC8
sep_arcsec = np.logspace(-2, 1.5, num=1000, endpoint=True)
sep_au = sep_arcsec*dist_pc
#
# transit depth constraint: within 2 arcseconds from ground-based seeing
# limited resolution.
# within dmag~5.2 from transit depth (and assumption of a totally eclipsing
# M+M dwarf type scenario. Totally eclipsing dark companion+Mdwarf is a bit
# ridiculous).
#
N = 0.5 # N=1 for the dark companion scenario.
Tmag = 9.9322
depth_obs = (4374e-6) # QLP depth
tdepth_ap = 2 # arcsec
tdepth_sep = sep_arcsec[sep_arcsec < tdepth_ap]
tdepth_dmag = 5/2*
|
np.log10(N/depth_obs)
|
numpy.log10
|
#!/usr/bin/env python
# coding: utf-8
# # Chapter 1: Basic Image and Video Processing
# ## Problems
#
# ## 1. Display RGB image color channels in 3D
# In[6]:
from IPython.core.display import display,HTML
display(HTML('<style>.prompt{width: 0px; min-width: 0px; visibility: collapse}</style>'))
# In[3]:
# comment the next line only if you are not running this code from jupyter notebook
get_ipython().run_line_magic('matplotlib', 'inline')
from skimage.io import imread
import numpy as np
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
# In[20]:
def plot_3d(X, Y, Z, cmap='Reds', title=''):
"""
This function plots 3D visualization of a channel
It displays (x, y, f(x,y)) for all x,y values
"""
fig = plt.figure(figsize=(15,15))
ax = fig.gca(projection='3d')
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cmap, linewidth=0, antialiased=False, rstride=2, cstride=2, alpha=0.5)
ax.xaxis.set_major_locator(LinearLocator(10))
ax.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(elev=10., azim=5)
ax.set_title(title, size=20)
plt.show()
# In[ ]:
im = imread('images/Img_01_01.jpg')
# In[ ]:
Y = np.arange(im.shape[0])
X = np.arange(im.shape[1])
X, Y = np.meshgrid(X, Y)
# In[21]:
Z1 = im[...,0]
Z2 = im[...,1]
Z3 = im[...,2]
# In[22]:
# plot 3D visualizations of the R, G, B channels of the image respectively
plot_3d(Z1, X, im.shape[1]-Y, cmap='Reds', title='3D plot for the Red Channel')
# In[23]:
plot_3d(Z2, X, im.shape[1]-Y, cmap='Greens', title='3D plot for the Green Channel')
# In[24]:
plot_3d(Z3, X, im.shape[1]-Y, cmap='Blues', title='3D plot for the Blue Channel')
# ## 2. Video I/O
# ### 2.1 Read/Write Video Files with scikit-video
# In[25]:
import skvideo.io
import numpy as np
import matplotlib.pylab as plt
# In[34]:
# set keys and values for parameters in ffmpeg
inputparameters = {}
outputparameters = {}
reader = skvideo.io.FFmpegReader('images/Vid_01_01.mp4',
inputdict=inputparameters,
outputdict=outputparameters)
# In[35]:
## Read video file
num_frames, height, width, num_channels = reader.getShape()
print(num_frames, height, width, num_channels)
# 600 916 1920 3
# In[36]:
plt.figure(figsize=(20,10))
# iterate through the frames and display a few frames
frame_list =
|
np.random.choice(num_frames, 4)
|
numpy.random.choice
|
import math
import os
import numpy as np
from PIL import ImageFilter, Image as Im
from skimage.filters import gaussian
from stl import Mesh
from .config import decay_constant, Material
from .perimeter import lines_to_voxels
from .slice import to_intersecting_lines
def get_material(s):
mat = Material()
for const in mat.material_constant.keys():
if const in s:
return const
return ''
def find_top_bottom_surfaces(voxels):
# Find the heights of the top and bottom surface for each pixel
bottom_surface =
|
np.zeros(voxels.shape[1:], dtype=np.int32)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""pyahp.methods.approximate
This module contains the class implementing the geometric priority estimation method.
"""
import numpy as np
import numpy.linalg as LA
from pyahp.methods import Method
def eig(A, initvec=None, tol=0.0001, iteration=16):
'''Calculate the dominant eigenvalue of A with power method.
The Power Method is a classical method to calculate the single eigenvalue with maximal abstract value,
i.e. |lambda_1| > |lambda2| >= |lambda_i|, where lambdai are eigenvalues, in numerical analysis.
The speed of convergence is dominated by |lambda2|/|lambda1|.
Perron theorem guarantees that lambda_1>|lambda_i| for any positive matrix.
see https://en.wikipedia.org/wiki/Power_iteration for more details.
Arguments:
A {2D-array} -- square matrix
Keyword Arguments:
initvec {1D-array} -- [initial vector] (default: {None})
tol {number} -- [tolerance] (default: {0.0001})
iteration {number} -- [iteration] (default: {16})
Returns:
dominant eigenvalue, eigenvector {1D-array}
Example:
>>> A = np.array([[1,2,6],[1/2,1,4],[1/6,1/4,1]])
>>> lmd, v = eig(A)
>>> lmd
3.0090068360243256
>>> v
[1. 0.5503254 0.15142866]
'''
m, n = A.shape
u0 = initvec or
|
np.random.random(n)
|
numpy.random.random
|
from threading import Thread, Lock
import numpy as np
import quaternion
def generate_mesh_slices(width, height, depth, center_w, center_x, center_y, center_z, zoom, rotation_theta, rotation_phi, rotation_gamma, rotation_beta, offset_x, offset_y, depth_dither=1):
ct, st = np.cos(rotation_theta), np.sin(rotation_theta)
cp, sp = np.cos(rotation_phi), np.sin(rotation_phi)
cg, sg = np.cos(rotation_gamma), np.sin(rotation_gamma)
cb, sb = np.cos(rotation_beta), np.sin(rotation_beta)
zoom = 2**-zoom
x = np.arange(width, dtype='float64') + offset_x
y = np.arange(height, dtype='float64') + offset_y
z = np.arange(depth, dtype='float64')
x, y = np.meshgrid(x, y)
x = (2 * x - width) * zoom / height
y = (2 * y - height) * zoom / height
for z_ in z:
z_ += np.random.rand(*x.shape) * depth_dither - 0.5*depth_dither
z_ = (2 * z_ - depth) * zoom / depth
# The screen coordinates have x as the real axis, so 'w' is a misnomer here.
y_ = cb*y
w_ = -sb*y
x_, z_ = x*ct + z_*st, z_*ct - x*st
x_, y_ = x_*cp + y*sp, y*cp - x_*sp
y_, z_ = y_*cg + z_*sg, z_*cg - y_*sg
# See above for the misnaming compared to the quaternion library.
x_ += center_w
y_ += center_x
z_ += center_y
w_ += center_z
yield quaternion.from_float_array(np.stack((x_, y_, z_, w_), axis=-1))
def generate_imaginary_mesh_slices(width, height, depth, center_w, center_x, center_y, center_z, zoom, rotation_theta, rotation_phi, rotation_gamma, offset_x, offset_y, depth_dither=1):
ct, st = np.cos(rotation_theta), np.sin(rotation_theta)
cp, sp = np.cos(rotation_phi), np.sin(rotation_phi)
cg, sg = np.cos(rotation_gamma), np.sin(rotation_gamma)
zoom = 2**-zoom
x = np.arange(width, dtype='float64') + offset_x
y = np.arange(height, dtype='float64') + offset_y
z = np.arange(depth, dtype='float64')
x, y = np.meshgrid(x, y)
x = (2 * x - width) * zoom / height
y = (2 * y - height) * zoom / height
w = 0*x + center_w
for z_ in z:
z_ += np.random.rand(*x.shape) * depth_dither - 0.5*depth_dither
z_ = (2 * z_ - depth) * zoom / depth
x_, z_ = x*ct + z_*st, z_*ct - x*st
x_, y_ = x_*cp + y*sp, y*cp - x_*sp
y_, z_ = y_*cg + z_*sg, z_*cg - y_*sg
x_ += center_x
y_ += center_y
z_ += center_z
yield quaternion.from_float_array(
|
np.stack((w, x_, y_, z_), axis=-1)
|
numpy.stack
|
import os, os.path as osp
import sys
import re
import click
import time
import copy
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import torchvision
import pickle
import json
import PIL.Image
from torch_utils import misc
from torch_utils import training_stats
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import grid_sample_gradfix
from utils.visualization import get_palette
import dnnlib
from utils import metrics
#----------------------------------------------------------------------------
def report_metrics(result_dict, run_dir=None, snapshot_pkl=None, desc='test'):
if run_dir is not None and snapshot_pkl is not None:
snapshot_pkl = os.path.relpath(snapshot_pkl, run_dir)
jsonl_line = json.dumps(dict(result_dict, snapshot_pth=snapshot_pkl, timestamp=time.time()))
print(jsonl_line)
if run_dir is not None and os.path.isdir(run_dir):
with open(os.path.join(run_dir, f'metric-segmentation-{desc}.jsonl'), 'at') as f:
f.write(jsonl_line + '\n')
#----------------------------------------------------------------------------
def setup_snapshot_image_label_grid(training_set, random_seed=0):
rnd = np.random.RandomState(random_seed)
gw = np.clip(7680 // training_set.image_shape[2], 7, 32)
gh = np.clip(4320 // training_set.image_shape[1], 4, 32) // 2 # Half the number for label
all_indices = list(range(len(training_set)))
rnd.shuffle(all_indices)
grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]
# Load data
data = list(zip(*[training_set[i] for i in grid_indices]))
images = data[0]
labels = data[1]
return (gw, gh), np.stack([x.numpy() for x in images]), np.stack([x.numpy() for x in labels])
#----------------------------------------------------------------------------
def save_image_label_grid(img, label, fname, drange, grid_size, conf_threshold=0.9):
lo, hi = drange
img = np.asarray(img, dtype=np.float32)
img = (img - lo) * (255 / (hi - lo))
img = np.rint(img).clip(0, 255).astype(np.uint8)
# Colorize label
if label.ndim == 4:
label_ = label.argmax(axis=1)
conf_ = label.max(axis=1)
label = label_
hc_label = label.copy()
hc_label[conf_ < conf_threshold] = 255
else:
hc_label = None
cmap = get_palette()
label = cmap[label]
if hc_label is not None:
hc_label = cmap[hc_label]
gw, gh = grid_size
_N, C, H, W = img.shape
assert C == 3, f'{C}'
img = img.reshape(gh, gw, C, H, W)
img = img.transpose(0, 3, 1, 4, 2)
label = label.reshape(gh, gw, H, W, C)
label = label.transpose(0, 2, 1, 3, 4)
if hc_label is not None:
hc_label = hc_label.reshape(gh, gw, H, W, C)
hc_label = hc_label.transpose(0, 2, 1, 3, 4)
# Save image
img = np.concatenate([img, label], axis=1) if hc_label is None else \
np.concatenate([img, label, hc_label], axis=1)
img = img.reshape(gh * H * (2 + (hc_label is not None)), gw * W, C)
assert C == 3
PIL.Image.fromarray(img, 'RGB').save(fname)
#----------------------------------------------------------------------------
def train_annotator(
run_dir = '.', # Output directory.
resume_path = '', # Resume path of model
training_set_kwargs = {}, # Options for training set.
validation_set_kwargs = {}, # Options for validation set.
# data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader.
G_kwargs = {}, #
A_kwargs = {}, # Options for annotator network.
S_kwargs = {}, # Options for student network.
T_kwargs = {},
matcher_kwargs = {},
gmparam_regex = '.*',
A_opt_kwargs = {}, # Options for annotator optimizer.
S_opt_kwargs = {}, # Options for student optimizer.
loss_kwargs = {}, # Options for loss function.
random_seed = 0, # Global random seed.
image_per_batch = 2, #
A_interval = 1,
S_interval = 1,
total_iters = 150000, # Total length of the training, measured in thousands of real images.
niter_per_tick = 20, # Progress snapshot interval.
label_snapshot_ticks = 200, # How often to save (image, label) snapshots? None = disable.
network_snapshot_ticks = 200, # How often to save annotator network snapshots? None = disable.
cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark?
allow_tf32 = False, # Enable torch.backends.cuda.matmul.allow_tf32 and torch.backends.cudnn.allow_tf32?
abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks.
progress_fn = None, # Callback function for updating training progress. Called for all ranks.
):
# Initialize.
start_time = time.time()
device = torch.device('cuda')
|
np.random.seed(random_seed)
|
numpy.random.seed
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.