prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import numpy as np
import pandas as pd
import rasterio
import statsmodels.formula.api as smf
from scipy.sparse import coo_matrix
import scipy.spatial
import patsy
from statsmodels.api import add_constant, OLS
from .utils import transform_coord
def test_linearity(x, y, n_knots=5, verbose=True):
"""Test linearity between two variables.
Run a linear regression of y on x, and take the residuals.
Fit the residuals with a natural spline with `n_knots` knots.
Conduct a joint F-test for all columns in the natural spline basis matrix.
Example:
>>> import numpy as np
>>> rng = np.random.default_rng(0)
>>> x = np.linspace(0., 1., 101)
>>> y = 5 * x + 3 + rng.random(size=101) / 5
>>> test_linearity(x, y, n_knots=5, verbose=False)
0.194032
"""
residuals = OLS(y, add_constant(x)).fit().resid
basis_matrix = patsy.dmatrix(
f"cr(x, df={n_knots - 1}, constraints='center') - 1", {'x': x},
return_type='dataframe')
results = OLS(residuals, basis_matrix).fit()
results.summary()
nobs = results.nobs
f_value = results.fvalue
p_value = np.round(results.f_pvalue, 6)
print('Test for Linearity: '
f'N = {nobs:.0f}; df={nobs - n_knots - 1:.0f}; '
f'F = {f_value:.3f}; p = {p_value:.6f}.')
return p_value
def winsorize(s, lower, upper, verbose=False):
"""Winsorizes a pandas series.
Args:
s (pandas.Series): the series to be winsorized
lower, upper (int): number between 0 to 100
"""
lower_value = np.nanpercentile(s.values, lower)
upper_value = np.nanpercentile(s.values, upper)
if verbose:
print(f'Winsorizing to {lower_value} - {upper_value}')
return s.clip(lower_value, upper_value)
def demean(df, column, by):
"""Demean a column in a pandas DataFrame.
Args:
df (pandas.DataFrame): data
column (str): the column to be demeaned
by (list of str): the column names
"""
return (
df[column].values -
(df.loc[:, by + [column]]
.groupby(by).transform(np.nanmean).values.squeeze()))
def load_gd_census(GPS_FILE, MASTER_FILE):
# read GPS coords + treatment status
df = pd.read_csv(
GPS_FILE,
usecols=['village_code', 'ge', 'hi_sat', 'treat',
'latitude', 'longitude', 'elevation', 'accuracy', 'eligible',
'GPS_imputed'],
dtype={
'village_code': 'Int64',
'ge': 'Int32',
'hi_sat': 'Int32',
'treat': 'Int32',
'eligible': 'Int32',
'GPS_imputed': 'Int32'})
# drop non GE households
df = df.loc[df['ge'] == 1, :].copy()
# treat x eligible = cash inflow
df.loc[:, 'treat_eligible'] = (
df.loc[:, 'treat'].values * df.loc[:, 'eligible'].values)
# read sat level identifiers
df_master = pd.read_stata(
MASTER_FILE,
columns=['village_code', 'satlevel_name']
).astype({'village_code': 'Int64'})
df_master = df_master.drop_duplicates()
# merge treatment
df = pd.merge(
df, df_master,
on='village_code', how='left')
assert df['satlevel_name'].notna().all(), (
'Missing saturation level identifier')
return df.drop(columns=['ge'])
def snap_to_grid(df, lon_col, lat_col,
min_lon, max_lon, min_lat, max_lat, step,
**kwargs):
"""Collapses variables in a data frame onto a grid.
Args:
df (pandas.DataFrame)
lon_col, lat_col (str): name of lon, lat columns
min_lon, max_lon, min_lat, max_lat, step (float)
**kwargs: passed to pandas agg() function after grouping by lat, lon
Returns:
(numpy.ndarray, numpy.ndarray): lon and lat grids
pandas.DataFrame: output data frame
"""
df_copy = df.copy()
# snap to grid
df_copy.loc[:, 'grid_lon'] = np.round(
(df[lon_col].values - min_lon - step / 2) / step
).astype(np.int32)
df_copy.loc[:, 'grid_lat'] = np.round(
(df[lat_col].values - min_lat - step / 2) / step
).astype(np.int32)
# construct the grid
grid_lon, grid_lat = np.meshgrid(
np.arange(0, np.round((max_lon - min_lon) / step).astype(np.int32)),
np.arange(0, np.round((max_lat - min_lat) / step).astype(np.int32)))
df_grid = pd.DataFrame({'grid_lon': grid_lon.flatten(),
'grid_lat': grid_lat.flatten()})
# collapse
df_output = pd.merge(
df_grid.assign(is_in_grid=True),
df_copy.groupby(['grid_lon', 'grid_lat']).agg(**kwargs),
how='outer', on=['grid_lon', 'grid_lat'])
print(f"Dropping {df_output['is_in_grid'].isna().sum()} observations;\n"
f"Keeping {df_output['is_in_grid'].notna().sum()} observations")
df_output = df_output.loc[df_output['is_in_grid'].notna(), :].copy()
return (grid_lon, grid_lat), df_output.drop(columns=['is_in_grid'])
def control_for_spline(x, y, z, cr_df=3):
# handle nan's
is_na = np.any((np.isnan(x), np.isnan(y), np.isnan(z)), axis=0)
df = pd.DataFrame({'x': x[~is_na], 'y': y[~is_na], 'z': z[~is_na]})
mod = smf.ols(formula=f"z ~ 1 + cr(x, df={cr_df}) + cr(y, df={cr_df})",
data=df)
res = mod.fit()
# return nan's for cases where any one of x, y, z is nan
z_out =
|
np.full_like(z, np.nan)
|
numpy.full_like
|
#!/usr/bin/env python
# ECLAIR/src/ECLAIR/Build_instance/ECLAIR_core.py
# Author: <NAME> for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: <EMAIL>, <EMAIL>
"""ECLAIR is a package for the robust and scalable
inference of cell lineages from gene expression data.
ECLAIR achieves a higher level of confidence in the estimated lineages
through the use of approximation algorithms for consensus clustering and by combining the information from an ensemble of minimum spanning trees
so as to come up with an improved, aggregated lineage tree.
In addition, the present package features several customized algorithms for assessing the similarity between weighted graphs or unrooted trees and for estimating the reproducibility of each edge to a given tree.
References
----------
* <NAME>., <NAME>., <NAME>. and <NAME>.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* <NAME>. and <NAME>., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* <NAME>., <NAME>., <NAME>. and <NAME>.,
"Thirty Years of Graph Matching in Pattern Recognition".
In: International Journal of Pattern Recognition and Artificial Intelligence,
18, 3, pp. 265-298. 2004
"""
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import Concurrent_AP as AP
import Cluster_Ensembles as CE
import DBSCAN_multiplex
import Density_Sampling
from collections import defaultdict, namedtuple
import datetime
import igraph
from math import floor, sqrt
import numpy as np
import os
import psutil
import random
import scipy.sparse
from scipy.spatial.distance import _validate_vector
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics import pairwise_distances_argmin_min
from sklearn.preprocessing import StandardScaler
import subprocess
from sys import exit
import tables
import time
__all__ = ['tree_path_integrals', 'ECLAIR_processing']
Data_info = namedtuple('Data_info', "data_file_name expected_N_samples "
"skip_rows cell_IDs_column extra_excluded_columns "
"time_info_column")
AP_parameters = namedtuple('AP_parameters', "clustering_method max_iter "
"convergence_iter")
DBSCAN_parameters = namedtuple('DBSCAN_parameters', "clustering_method minPts "
"eps quantile metric")
HIERARCHICAL_parameters = namedtuple('HIERARCHICAL_parameters',
'clustering_method k')
KMEANS_parameters = namedtuple('KMEANS_parameters', 'clustering_method k')
CC_parameters = namedtuple('CC_parameters', 'N_runs sampling_fraction N_cc')
Holder = namedtuple('Holder', "N_samples subsample_size N_runs name_tag "
"method run error_count")
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory().__dict__.iteritems():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: ECLAIR: ECLAIR_core: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
exit(1)
def KMEANS(data, k):
from sklearn.cluster import k_means, MiniBatchKMeans
if data.shape[0] < 50000:
centroids, cluster_labels, _ = k_means(data, k, init = 'k-means++', precompute_distances = 'auto', n_init = 20, max_iter = 300, n_jobs = 1)
else:
mbkm = MiniBatchKMeans(k, 'k-means++', max_iter = 300, batch_size = data.shape[0] / k, n_init = 20)
mbkm.fit(data)
centroids = mbkm.cluster_centers_
cluster_labels = mbkm.labels_
return centroids, cluster_labels
def hierarchical_clustering(data, n_clusters):
from .Scalable_SLINK import SLINK
from scipy.cluster.hierarchy import fcluster
assert isinstance(n_clusters, int) and n_clusters > 1
linkage_matrix = SLINK(data)
cluster_labels = fcluster(linkage_matrix, n_clusters - 1, 'maxclust')
return cluster_labels
def toHDF5(hdf5_file_name, data_file_name, expected_rows, sampling_fraction,
clustering_parameters, skip_rows, cell_IDs_column,
extra_excluded_columns, time_info_column, scaling = False,
PCA_flag = False, N_PCA = 10):
"""Read the data and store it in HDF5 format.
Also records the cell/sample names.
If applicable, create spaces in this data structure
for various arrays involved
in affinity propagation clustering.
Parameters
----------
hdf5_file_name : string or file object
data_file_name : string or file object
expected_rows : int
sampling_fraction : float
clustering_parameters : namedtuple
skip_rows : int
cell_IDs_column : int
extra_excluded_column : list
scaling : Boolean, optional (default = True)
Returns
-------
data : array (n_samples, n_features)
cell_IDs : array (n_samples,)
hdf5_file_name : file object or string
"""
assert isinstance(expected_rows,int)
assert isinstance(sampling_fraction, float) or isinstance(sampling_fraction, int)
assert isinstance(skip_rows, int)
assert isinstance(cell_IDs_column, int)
cell_IDs, time_info, data = dataProcessor(data_file_name, skip_rows,
cell_IDs_column, extra_excluded_columns, time_info_column)
unexpressed_indices = reportUnexpressed(data)
if unexpressed_indices.size != 0:
data = np.delete(data, unexpressed_indices, axis = 1)
cell_IDs = np.delete(cell_IDs, unexpressed_indices)
if time_info_column > 0:
time_info = np.delete(time_info, unexpressed_indices)
# Done with detecting unexpressed genes or reporters.
method = clustering_parameters.clustering_method
if scaling or method == 'DBSCAN':
data = StandardScaler().fit_transform(data)
if PCA_flag:
if not scaling:
data = StandardScaler().fit_transform(data)
pca = PCA(copy = True)
data = pca.fit_transform(data)[:, :N_PCA]
N_samples = data.shape[0]
subsample_size = int(floor(N_samples * sampling_fraction))
# Create an HDF5 data structure. For data-sets with a large number of samples, ensemble clustering needs to handle certain arrays that possibly do not fit into memory. We store them on disk, along with the set of probability distributions of distances that we compute for each pair of clusters from the final consensus clustering. Similarly, this HDF5 file format is used for our implementation of affinity propagation clustering.
with tables.open_file(hdf5_file_name, mode = 'w') as fileh:
consensus_group = fileh.create_group(fileh.root, "consensus_group")
atom = tables.Float32Atom()
if method == 'affinity_propagation':
aff_prop_group = fileh.create_group(fileh.root, "aff_prop_group")
fileh.create_carray(aff_prop_group, 'availabilities', atom, (subsample_size, subsample_size), 'Matrix of availabilities for affinity propagation', filters = None)
fileh.create_carray(aff_prop_group, 'responsibilities', atom, (subsample_size, subsample_size), 'Matrix of responsibilities for affinity propagation', filters = None)
fileh.create_carray(aff_prop_group, 'similarities', atom, (subsample_size, subsample_size), 'Matrix of similarities for affinity propagation', filters = None)
fileh.create_carray(aff_prop_group, 'temporaries', atom, (subsample_size, subsample_size), 'Matrix of temporaries for affinity propagation', filters = None)
fileh.create_carray(aff_prop_group, 'parallel_updates', atom, (subsample_size, clustering_parameters.convergence_iter), 'Matrix of parallel updates for affinity propagation', filters = None)
return data, cell_IDs, time_info, hdf5_file_name
def dataProcessor(data_file_name, skip_rows, cell_IDs_column,
extra_excluded_columns = None, time_info_column = -1):
"""Read the contents of data_file_name, extracting the names
or IDs of each sample, as stored in column labelled
'cell_IDs_column' and creating an array of the features,
excluding those stored in 'extra_excluded_columns'.
Also check the validity of the excluded indices.
Parameters
----------
data_file_name : file object or string
skip_rows : int
cell_IDs_column : int
extra_excluded_columns : list, optional (default = None)
Returns
-------
cell_IDs : array (n_samples,)
data : array (n_samples, n_features)
"""
assert isinstance(skip_rows, int)
assert isinstance(cell_IDs_column, int)
assert isinstance(time_info_column, int)
assert time_info_column != cell_IDs_column
try:
isinstance(skip_rows, int) and skip_rows >= 0
except TypeError:
time_now = datetime.datetime.today()
format = "%Y-%m-%d %H:%M:%S"
time_now = str(time_now.strftime(format))
print('\nECLAIR\t ERROR\t {}: the number of rows to skip as part of a header must be a non-negative integer.\n'.format(time_now))
raise
try:
isinstance(cell_IDs_column, int) and cell_IDs_column >= 0
except TypeError:
time_now = datetime.datetime.today()
format = "%Y-%m-%d %H:%M:%S"
time_now = str(time_now.strftime(format))
print('\nECLAIR\t ERROR\t {}: the label distinguishing the column of cell IDs from other features must be a single integer.\n'.format(time_now))
raise
cell_IDs_column = [cell_IDs_column]
with open(data_file_name, 'r') as f:
lst = f.readline()
lst = lst.replace('\t', ' ').replace(',', ' ').split()
N_cols = len(lst)
assert time_info_column < N_cols
with open(data_file_name, 'r') as f:
cell_IDs = np.loadtxt(f, dtype = str, delimiter = '\t',
skiprows = skip_rows, usecols = cell_IDs_column)
if time_info_column > 0:
time_info_column = [time_info_column]
with open(data_file_name, 'r') as f:
time_info = np.loadtxt(f, dtype = float, delimiter = '\t',
skiprows = skip_rows, usecols = [time_info_column])
else:
time_info_column = []
time_info = np.zeros(0, dtype = float)
if extra_excluded_columns is None:
extra_excluded_columns = np.empty(0, dtype = int)
else:
extra_excluded_columns = np.array(extra_excluded_columns, dtype = int,
copy = False)
extra_excluded_columns = np.clip(np.append(extra_excluded_columns, cell_IDs_column), 0, N_cols - 1)
extra_excluded_columns = np.unique(extra_excluded_columns)
ID_index = np.where(extra_excluded_columns == cell_IDs_column[0])[0]
if ID_index.size != 0:
extra_excluded_columns = np.delete(extra_excluded_columns, ID_index)
if len(time_info_column) > 0:
time_index = np.where(extra_excluded_columns == time_info_column[0])[0]
if time_index.size != 0:
extra_excluded_columns = np.delete(extra_excluded_columns, time_index)
indices = np.delete(np.arange(N_cols), np.concatenate((extra_excluded_columns, cell_IDs_column, time_info_column)).astype(int))
my_iterator = iter(indices)
with open(data_file_name, 'r') as f:
data = np.loadtxt(f, dtype = float, delimiter = '\t',
skiprows = skip_rows, usecols = my_iterator)
return cell_IDs, time_info, data
def reportUnexpressed(data):
"""If a gene is unexpressed throughout all samples,
remove its index from the data-set.
Parameters
----------
data : array (n_samples, n_features)
"""
return np.where(data.sum(axis = 0) == 0)[0]
def build_weighted_adjacency(graph):
adjacency_matrix = np.asarray(graph.get_adjacency(type = igraph.GET_ADJACENCY_BOTH).data)
adjacency_matrix = adjacency_matrix.astype(dtype = float)
N_clusters = adjacency_matrix.shape[0]
c = 0
for i in xrange(N_clusters - 1):
for j in xrange(i + 1, N_clusters):
if adjacency_matrix[i, j]:
x = graph.es['weight'][c]
adjacency_matrix[i, j] = x
adjacency_matrix[j, i] = x
c += 1
return adjacency_matrix
def handle_off_diagonal_zeros(M):
eensy = np.finfo(np.float32).eps * 1000
M = np.array(M, copy = False)
n = M.shape[0]
zeros = np.where(M == 0)
for i in xrange(zeros[0].size):
if zeros[0][i] != zeros[1][i]:
M[zeros[0][i], zeros[1][i]] = eensy
M[np.diag_indices(n)] = 0
def get_MST(run, exemplars, exemplars_similarities, cell_IDs, name_tag,
output_directory):
"""Build the minimum spanning tree of the graph whose nodes
are given by 'exemplars' and 'cell_IDs' and whose edges
are weighted according to the second argument,
a matrix of similarities.
Plot the MST and returns its structure.
Parameters
----------
run : int
exemplars : array (n_clusters,)
exemplars_similarities : array (n_clusters, n_clusters)
cell_IDs : list
name_tag : string
Returns
-------
A sparse adjacency matrix for the spanning tree in CSR format
"""
assert isinstance(run, int)
assert isinstance(name_tag, str)
n = len(exemplars)
handle_off_diagonal_zeros(exemplars_similarities)
g = igraph.Graph.Weighted_Adjacency(exemplars_similarities.tolist(),
mode=igraph.ADJ_UPPER, attr = 'weight')
g.vs['label'] = [cell_IDs[exemplar] for exemplar in exemplars]
mst = g.spanning_tree(weights = g.es['weight'])
layout = mst.layout('fr')
name = output_directory + '/ECLAIR_figures/{}/mst-run-{}__{}.pdf'.format(name_tag, run + 1, name_tag)
igraph.plot(mst, name, bbox = (5500, 5500), margin = 60, layout = layout,
vertex_label_dist = 1)
mst_adjacency_matrix = np.asarray(mst.get_adjacency(type = igraph.GET_ADJACENCY_BOTH).data)
return scipy.sparse.csr_matrix(mst_adjacency_matrix)
def get_median(values, counts):
N_pairs = np.sum(counts)
median_index = (N_pairs + 1) / 2 if N_pairs % 2 else N_pairs / 2
cumsum = 0
for i, v in enumerate(values):
cumsum += counts[i]
if cumsum >= median_index:
if N_pairs % 2:
median = v
break
if N_pairs % 2 == 0 and cumsum >= median_index + 1:
median = v
break
else:
median = int(np.rint((v + values[i + 1]) / 2))
break
return median
def tree_path_integrals(hdf5_file_name, N_runs, cluster_dims_list, consensus_labels,
mst_adjacency_list, markov = False):
"""For each pair of cluster from the final ensemble clustering,
compute a distribution of distances as follows.
Assume that n_A cells are grouped into cluster A and n_B
into cluster B. Given cell 'a' from group A,
for each cell 'b' from group B, collect the distances
separating the cluster where 'a' resides in run 'i'
from the cluster where 'b' belongs in run 'i';
do so for each of the 'N_runs' separate runs of subsampling
and clusterings.
Parameters
----------
hdf5_file_name : string or file object
N_runs : int
cluster_dims_list : list of size N_clusters
consensus_labels : list or array (n_samples)
mst_adjacency_list : list of arrays
Returns
-------
consensus_means : array (N_clusters, N_clusters)
consensus_variances : array (N_clusters, N_clusters)
"""
assert isinstance(N_runs, int) and N_runs > 0
hypergraph_adjacency = CE.load_hypergraph_adjacency(hdf5_file_name)
cluster_runs_adjacency = hypergraph_adjacency.transpose().tocsr()
del hypergraph_adjacency
consensus_labels = np.asarray(consensus_labels)
N_samples = consensus_labels.size
N_clusters = np.unique(consensus_labels).size
consensus_means = np.zeros((N_clusters, N_clusters), dtype = float)
consensus_variances = np.zeros((N_clusters, N_clusters), dtype = float)
consensus_medians = np.zeros((N_clusters, N_clusters), dtype = int)
fileh = tables.open_file(hdf5_file_name, 'r+')
consensus_distributions_values = fileh.create_vlarray(fileh.root.consensus_group, 'consensus_distributions_values', tables.UInt16Atom(), 'For each clusters a, b > a from ensemble clustering, stores the possible time steps it takes to reach cells from cluster a to cells from cluster b, over the possible causal sets associated to an ensemble of partitions', filters = None, expectedrows = N_clusters * (N_clusters - 1) / 2)
consensus_distributions_counts = fileh.create_vlarray(fileh.root.consensus_group, 'consensus_distributions_counts', tables.Float128Atom(), 'For each clusters a, b > a from ensemble clustering, stores for each time step the number of its occurrences, weighted by the probability that such a time step takes place on the trees from the ensemble', filters = None, expectedrows = N_clusters * (N_clusters - 1) / 2)
cluster_separators = np.cumsum(cluster_dims_list)
for a in xrange(N_clusters - 1):
cells_in_a = np.where(consensus_labels == a)[0]
for b in xrange(a+1, N_clusters):
cells_in_b = np.where(consensus_labels == b)[0]
count = 0
counts_dict = defaultdict(int)
weights_dict = defaultdict(float)
for run in xrange(N_runs):
if cells_in_a.size == 1 and (cluster_separators[run + 1] - cluster_separators[run]) == 1:
single_elt = cluster_runs_adjacency[cells_in_a, cluster_separators[run]]
if single_elt == 1:
cluster_IDs_a = np.zeros(1, dtype = np.int32)
else:
cluster_IDs_a = np.zeros(0, dtype = np.int32)
else:
try:
cluster_IDs_a = np.where(np.squeeze(np.asarray(cluster_runs_adjacency[cells_in_a, cluster_separators[run]:cluster_separators[run + 1]].todense())) == 1)
except ValueError:
continue
if isinstance(cluster_IDs_a, tuple):
if len(cluster_IDs_a) == 1:
if (cluster_separators[run + 1] - cluster_separators[run]) == 1:
cluster_IDs_a = np.zeros(cluster_IDs_a[0].size, dtype = np.int32)
else:
cluster_IDs_a = cluster_IDs_a[0]
else:
cluster_IDs_a = cluster_IDs_a[1]
if cluster_IDs_a.size == 0:
continue
if cells_in_b.size == 1 and (cluster_separators[run + 1] - cluster_separators[run]) == 1:
single_elt = cluster_runs_adjacency[cells_in_b, cluster_separators[run]]
if single_elt == 1:
cluster_IDs_b = np.zeros(1, dtype = np.int32)
else:
cluster_IDs_b = np.zeros(0, dtype = np.int32)
else:
try:
cluster_IDs_b = np.where(np.squeeze(np.asarray(cluster_runs_adjacency[cells_in_b, cluster_separators[run]:cluster_separators[run + 1]].todense())) == 1)
except ValueError:
continue
if isinstance(cluster_IDs_b, tuple):
if len(cluster_IDs_b) == 1:
if (cluster_separators[run + 1] - cluster_separators[run]) == 1:
cluster_IDs_b = np.zeros(cluster_IDs_b[0].size, dtype = np.int32)
else:
cluster_IDs_b = cluster_IDs_b[0]
else:
cluster_IDs_b = cluster_IDs_b[1]
if cluster_IDs_b.size == 0:
continue
cluster_IDs_a, counts_a = np.unique(cluster_IDs_a,
return_counts = True)
cluster_IDs_b, counts_b = np.unique(cluster_IDs_b,
return_counts = True)
n_a = cluster_IDs_a.size
n_b = cluster_IDs_b.size
mst_time_steps_matrix = scipy.sparse.csgraph.dijkstra(mst_adjacency_list[run], directed = False, unweighted = True)
#x = mst_time_steps_matrix[cluster_IDs_a]
#y = np.zeros((mst_time_steps_matrix.shape[1], n_b), dtype = np.int32)
#y[(cluster_IDs_b, xrange(n_b))] = 1
#time_steps_values = np.dot(x, y)
time_steps_values = mst_time_steps_matrix[cluster_IDs_a]
time_steps_values = time_steps_values[:, cluster_IDs_b]
time_steps_values = time_steps_values.astype(int)
time_steps_counts = np.dot(counts_a.reshape(-1, 1), counts_b.reshape(1, -1))
if markov:
mst_adjacency = np.squeeze(np.asarray(mst_adjacency_list[run].todense()))
Delta = np.sum(mst_adjacency, axis = 1).reshape(-1, 1).astype(float)
transition_probabilities = np.divide(mst_adjacency, Delta)
time_steps_probabilities = np.zeros((n_a, n_b),
dtype = float)
for time_step in np.unique(time_steps_values):
idx = np.where(time_steps_values == time_step)
new_x = map(lambda i: cluster_IDs_a[i], idx[0])
new_x = np.array(new_x, dtype = int)
new_y = map(lambda i: cluster_IDs_b[i], idx[1])
new_y = np.array(new_y, dtype = int)
mapped_idx = (new_x, new_y)
time_reversed_mapped_idx = (new_y, new_x)
if time_step == 0:
time_steps_probabilities[idx] = 1
elif time_step == 1:
tmp_1 = transition_probabilities[mapped_idx]
tmp_2 = transition_probabilities[time_reversed_mapped_idx]
time_steps_probabilities[idx] = np.minimum(tmp_1, tmp_2)
else:
markov_chain = np.linalg.matrix_power(transition_probabilities, time_step)
tmp_1 = markov_chain[mapped_idx]
tmp_2 = markov_chain[time_reversed_mapped_idx]
time_steps_probabilities[idx] = np.minimum(tmp_1, tmp_2)
time_steps_weights = time_steps_counts * time_steps_probabilities
else:
time_steps_weights = time_steps_counts
time_steps_counts = np.ravel(time_steps_counts)
time_steps_values = np.ravel(time_steps_values)
time_steps_weights = np.ravel(time_steps_weights)
for i in xrange(n_a * n_b):
counts_dict[time_steps_values[i]] += time_steps_counts[i]
weights_dict[time_steps_values[i]] += time_steps_weights[i]
count += 1
if count > 0:
time_steps = np.array(counts_dict.keys(), dtype = int)
counts = np.array(counts_dict.values(), dtype = int)
weights = np.array(weights_dict.values(), dtype = float)
consensus_distributions_values.append(time_steps)
consensus_distributions_counts.append(weights)
median = get_median(time_steps, counts)
consensus_medians[a, b] = median
consensus_medians[b, a] = median
normalization_factor = weights.sum()
weights /= float(normalization_factor)
mean = np.inner(time_steps, weights)
consensus_means[a, b] = mean
consensus_means[b, a] = mean
variance = np.inner((time_steps - mean) ** 2, weights)
consensus_variances[a, b] = variance
consensus_variances[b, a] = variance
else:
consensus_distributions_values.append([0])
consensus_distributions_counts.append([0])
consensus_medians[a, b] = np.nan
consensus_medians[b, a] = np.nan
consensus_means[a, b] = np.nan
consensus_means[b, a] = np.nan
consensus_variances[a, b] = np.nan
consensus_variances[b, a] = np.nan
fileh.close()
return consensus_medians, consensus_means, consensus_variances
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in xrange(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def handle_disconnected_graph(consensus_medians, consensus_means,
consensus_variances, consensus_labels):
n = consensus_means.shape[0]
assert n == np.amax(consensus_labels) + 1
means_nan_ID = np.empty((n, n), dtype = int)
np.isnan(consensus_means, means_nan_ID)
null_means = np.where(means_nan_ID.sum(axis = 1) == n - 1)
variances_nan_ID = np.empty((n, n), dtype = int)
np.isnan(consensus_variances, variances_nan_ID)
null_variances = np.where(variances_nan_ID.sum(axis = 1) == n - 1)
medians_nan_ID = np.empty((n, n), dtype = int)
np.isnan(consensus_medians, medians_nan_ID)
null_medians = np.where(medians_nan_ID.sum(axis = 1) == n - 1)
if not (np.allclose(null_means[0], null_variances[0]) and np.allclose(null_means[0], null_medians[0])):
time_now = datetime.datetime.today()
format = "%Y-%m-%d %H:%M:%S"
time_now = str(time_now.strftime(format))
raise ValueError('\nECLAIR\t ERROR\t {}: serious problem with the noise clusters: mismatch between the computations of means and variances.\n'.format(time_now))
exclude_nodes = null_means[0]
consensus_medians = np.delete(consensus_medians, exclude_nodes, axis = 0)
consensus_medians = np.delete(consensus_medians, exclude_nodes, axis = 1)
consensus_means = np.delete(consensus_means, exclude_nodes, axis = 0)
consensus_means = np.delete(consensus_means, exclude_nodes, axis = 1)
consensus_variances = np.delete(consensus_variances, exclude_nodes, axis = 0)
consensus_variances = np.delete(consensus_variances, exclude_nodes, axis = 1)
for elt in exclude_nodes:
consensus_labels[np.where(consensus_labels == elt)[0]] = n
consensus_labels = one_to_max(consensus_labels)
consensus_labels[np.where(consensus_labels == n - exclude_nodes.size)[0]] = -1
cells_kept = np.where(consensus_labels != -1)[0]
return consensus_medians, consensus_means, consensus_variances, consensus_labels, cells_kept, exclude_nodes
def MST_coloring(data, consensus_labels):
data = StandardScaler().fit_transform(data)
pca = PCA(copy = True)
rotated_data = pca.fit_transform(data)
N_clusters = np.amax(consensus_labels) + 1
cc_centroids = np.zeros((N_clusters, data.shape[1]), dtype = float)
for cluster_ID in xrange(N_clusters):
cc_centroids[cluster_ID] = np.median(rotated_data[np.where(consensus_labels == cluster_ID)[0]], axis = 0)
cc_centroids = pca.transform(cc_centroids)
min_PCA = np.amin(cc_centroids[:, 0:3])
max_PCA = np.amax(cc_centroids[:, 0:3])
cc_RGB = np.divide(cc_centroids[:, 0:3] - min_PCA, max_PCA - min_PCA)
cc_RGB *= 255
cc_RGB = np.rint(cc_RGB)
cc_RGB = cc_RGB.astype(dtype = int)
cc_RGB = np.clip(cc_RGB, 0, 255)
cc_colors = ["rgb({}, {}, {})".format(cc_RGB[i, 0], cc_RGB[i, 1], cc_RGB[i, 2]) for i in xrange(N_clusters)]
return cc_colors
def DBSCAN_LOAD(hdf5_file_name, data, subsamples_matrix, clustering_parameters):
assert clustering_parameters.clustering_method == 'DBSCAN'
minPts = clustering_parameters.minPts
eps = clustering_parameters.eps
quantile = clustering_parameters.quantile
metric = clustering_parameters.metric
return DBSCAN_multiplex.load(hdf5_file_name, data, minPts, eps, quantile,
subsamples_matrix, metric = metric)
def subsamples_clustering(hdf5_file_name, data, sampled_indices, clustering_parameters, run):
time_now = datetime.datetime.today()
format = "%Y-%m-%d %H:%M:%S"
time_now = str(time_now.strftime(format))
print('ECLAIR\t INFO\t {}: starting run of clustering number {:n}.'.format(time_now, run + 1))
beg_clustering = time.time()
method = clustering_parameters.clustering_method
if method == 'affinity_propagation':
# Perform affinity propagation clustering with our customized module,
# computing and storing to disk the similarities matrix
# between those sampled cells, as a preliminary step.
args = "Concurrent_AP -f {0} -c {1} -i {2}".format(hdf5_file_name,
clustering_parameters.convergence_iter, clustering_parameters.max_iter)
subprocess.call(args, shell = True)
with open('./concurrent_AP_output/cluster_centers_indices.tsv', 'r') as f:
cluster_centers_indices = np.loadtxt(f, dtype = float, delimiter = '\t')
with open('./concurrent_AP_output/labels.tsv', 'r') as f:
cluster_labels = np.loadtxt(f, dtype = float, delimiter = '\t')
exemplars = [sampled_indices[i] for i in cluster_centers_indices]
# Exemplars: indices as per the full data-set
# that are the best representatives of their respective clusters
centroids = data[exemplars, :]
elif method == 'DBSCAN':
sampled_data = np.take(data, sampled_indices, axis = 0)
minPts = clustering_parameters.minPts
metric = clustering_parameters.metric
_, cluster_labels = DBSCAN_multiplex.shoot(hdf5_file_name, minPts, run,
random_state = None, verbose = True)
cluster_IDs = np.unique(cluster_labels)
cluster_IDs =
|
np.extract(cluster_IDs != -2, cluster_IDs)
|
numpy.extract
|
from pathlib import Path
import unittest
import numpy as np
import pylas
from laserfarm.grid import Grid
try:
import matplotlib
matplotlib_available = True
except ModuleNotFoundError:
matplotlib_available = False
if matplotlib_available:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class TestValidGridSetup(unittest.TestCase):
def setUp(self):
self.grid = Grid()
self.grid.setup(0., 0., 20., 20., 5)
def test_gridMins(self):
np.testing.assert_allclose(self.grid.grid_mins, [0., 0.])
def test_gridMaxs(self):
np.testing.assert_allclose(self.grid.grid_maxs, [20., 20.])
def test_gridWidth(self):
np.testing.assert_allclose(self.grid.grid_width, 20.)
def test_tileWidth(self):
np.testing.assert_allclose(self.grid.tile_width, 4.)
def test_tileIndexForPoint(self):
np.testing.assert_array_equal(self.grid.get_tile_index(0.1, 0.2),
(0, 0))
def test_tileIndexForArray(self):
np.testing.assert_array_equal(self.grid.get_tile_index((0.1, 19.9),
(0.2, 19.8)),
((0, 0), (4, 4)))
def test_tileBoundsForPoint(self):
np.testing.assert_array_equal(self.grid.get_tile_bounds(0, 0),
((0., 0.), (4., 4.)))
def test_tileBoundsForArray(self):
np.testing.assert_array_equal(self.grid.get_tile_bounds((0, 0),
(0, 1)),
(((0., 0.), (0., 4.)),
((4., 4.), (4., 8.))))
class TestInvalidGridSetup(unittest.TestCase):
def test_fractionalNumberOfTilesGrid(self):
with self.assertRaises(ValueError):
grid = Grid()
grid.setup(0., 0., 20., 20., 0.1)
def test_zeroNumberOfTilesGrid(self):
with self.assertRaises(ValueError):
grid = Grid()
grid.setup(0., 0., 20., 20., 0)
def test_zeroWidthGrid(self):
with self.assertRaises(ValueError):
grid = Grid()
grid.setup(0., 0., 0., 20., 5)
def test_rectangularGrid(self):
with self.assertRaises(ValueError):
grid = Grid()
grid.setup(0., 0., 10., 20., 5)
class TestRealGridValid(unittest.TestCase):
_test_dir = 'test_tmp_dir'
_test_data_dir = 'testdata'
_test_tile_idx = [101, 101]
_test_file_name = 'C_43FN1_1_1.LAZ'
_min_x = -113107.8100
_min_y = 214783.8700
_max_x = 398892.1900
_max_y = 726783.87
_n_tiles_sides = 256
plot = False
def setUp(self):
self.grid = Grid()
self.grid.setup(min_x=self._min_x,
min_y=self._min_y,
max_x=self._max_x,
max_y=self._max_y,
n_tiles_side=self._n_tiles_sides)
self._test_data_path = Path(self._test_data_dir).joinpath(self._test_file_name)
self.points = _read_points_from_file(str(self._test_data_path))
def test_isPointInTile(self):
x_pts, y_pts = self.points.T
mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,
*self._test_tile_idx)
self.assertTrue(
|
np.all(mask_valid_points)
|
numpy.all
|
import numpy as np
import pysubiso
import time
import itertools
import networkx as nx
import pytest
def reference_gen_possible_next_edges(adj, colors):
"""
"""
out = []
for i in range(adj.shape[0]):
for j in range(i +1, adj.shape[1]):
if adj[i, j] == 0:
for c in colors:
out.append([i, j, c])
return np.array(out, dtype=np.int32).reshape((len(out), 3))
def test_possible_next_edges():
np.random.seed(10)
# empty
adj = np.zeros((31, 31), dtype=np.int32)
possible_colors = np.array([1, 2, 3,4], dtype=np.int32)
possible_edges = pysubiso.gen_possible_next_edges(adj, possible_colors)
for N in [1, 5, 10, 20, 32, 64, 100]:
for color_n in [1, 4, 10]:
# color_n * 3 to generate sparsity
adj = np.random.randint(color_n*3, size=(N,N)).astype(np.int32)
adj[adj > color_n] = 0
adj = np.triu(adj, k=1)
possible_colors =np.arange(color_n, dtype=np.int32) + 1
t1 = time.time()
possible_edges = pysubiso.gen_possible_next_edges(adj, possible_colors)
t2 = time.time()
correct =reference_gen_possible_next_edges(adj, possible_colors)
t3 = time.time()
old_time = t3-t2
new_time = t2-t1
print(f"{old_time/new_time:3.1f} times faster ({old_time*1e6:3.1f} us vs {new_time*1e6:3.1f} us)")
np.testing.assert_array_equal(possible_edges, correct)
def test_get_color_edge_possible_table():
"""
Simple manual sanity-preserving examples
"""
# (node colors) -edge colors-
# (0) -1- (1) -2- (2) -3- (3)
adj = np.array([[0, 1, 0, 0],
[1, 0, 2, 0],
[0, 2, 0, 3],
[0, 0, 3, 0]], dtype=np.int32)
node_colors = np.array([0, 1, 2, 3], dtype=np.int32)
possible_edge_colors = np.array([1, 2, 3], dtype=np.int32)
possible_node_colors = np.unique(node_colors)
possible_table = pysubiso.get_color_edge_possible_table(adj, node_colors, possible_edge_colors)
ans = np.zeros((
|
np.max(node_colors)
|
numpy.max
|
'''
mod on Jan 17, 2019
@author: trevaz (<EMAIL>)
---------------------------------------------------------------------
fctlib
---------------------------------------------------------------------
'''
#################################################################################
# IMPORTS
#################################################################################
import os
import sys
import numpy as np
#################################################################################
# CONSTANTS
#################################################################################
#################################################################################
# FUNCTIONS
#################################################################################
def zs_fct0(x,y):
X, Y = np.meshgrid(x,y,indexing='ij')
zs = np.zeros(X.shape)
return zs
def zs_fct1(x,y):
zs_h = 0.285
zs_l = 0.570
sigma = zs_l/1.1774
zs_x = 5.0
X, Y = np.meshgrid(x,y,indexing='ij')
zs = zs_h*np.exp(-0.5*((X-zs_x)/sigma)**2)
# for i in range (x.size):
# for j in range (y.size):
# if (x[i]-zs_x)**2+(y[j]-zs_y)**2>zs_l**2:
# zs[i,j] = 0.
return zs
def zs_fct2(x,y):
zs_h = 0.04
zs_l = 0.1
zs_x = 0.6
zs_y = 0.3
X, Y =
|
np.meshgrid(x,y,indexing='ij')
|
numpy.meshgrid
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for plotting metrics."""
import pathlib
from typing import Any, List, Sequence
from absl import logging
from dm_c19_modelling.evaluation import base_indexing
from dm_c19_modelling.evaluation import constants
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import pandas as pd
def plot_metrics(metrics_df: pd.DataFrame, target_name: str,
last_observation_date: str, eval_dataset_creation_date: str,
forecast_horizon: int,
forecast_index_entries: Sequence[base_indexing.IndexEntryType],
num_dates: int, num_sites: int, cadence: int,
dropped_sites: np.ndarray) -> plt.Figure:
"""Plots metrics dataframe as a series of bar charts.
Args:
metrics_df: Dataframe of metrics, with columns [forecast_id, metric_name,
metric_value, target_name].
target_name: the target being predicted.
last_observation_date: the last date in the training data.
eval_dataset_creation_date: the creation date of the dataset used for
evaluation.
forecast_horizon: the number of days into the future that the forecasts
extend to.
forecast_index_entries: the entries in the forecast index for each of the
forecasts that are included in the metrics dataframe.
num_dates: the number of dates included in this evaluation.
num_sites: the number of sites included in this evaluation.
cadence: the cadence of the forecasts i.e. a cadence of 1 corresponds to
daily forecasts, a cadence of 7 corresponds to weekly forecasts.
dropped_sites: optional list of sites that were dropped during evaluation
from at least one forecast to ensure that all forecasts are for the same
sites.
Returns:
A series of bar plots, one for each metric calculated in the dataframe,
evaluating different forecasts against each other.
"""
fig = plt.figure(figsize=(4, 3))
plot_width = 2
offset = 0
column_width = 0.8
axes = []
metric_names = metrics_df.metric_name.unique()
for _ in metric_names:
ax = fig.add_axes([offset, 0.1, plot_width, 1.])
ax.grid(axis="y", alpha=0.3, which="both", zorder=0)
axes.append(ax)
offset += plot_width * 1.2
colour_map = plt.get_cmap("tab20c")(
np.linspace(0, 1.0, len(forecast_index_entries)))
x_centers = np.arange(len(forecast_index_entries))
for ax_idx, metric_name in enumerate(metric_names):
x_offset = ax_idx * column_width - plot_width / 2 + column_width / 2
x_values = x_centers + x_offset
ax = axes[ax_idx]
for bar_idx, forecast_entry in enumerate(forecast_index_entries):
forecast_id = forecast_entry["forecast_id"]
row = metrics_df.query(
f"forecast_id=='{forecast_id}' and metric_name=='{metric_name}'")
assert len(row) == 1, (
"Duplicate entries found in metrics dataframe. "
f"Found {len(row)} entries for {forecast_id} and {metric_name}")
row = row.iloc[0]
metric_value = row.metric_value
ax.bar(
x_values[bar_idx],
metric_value,
width=column_width,
zorder=2,
color=colour_map[bar_idx],
label=_get_model_label(forecast_entry))
ax.set_xticklabels([])
ax.set_xticks([])
ax.set_ylabel(metric_name)
axes[0].legend(
ncol=len(forecast_index_entries),
loc="center left",
bbox_to_anchor=[0., 1.07],
frameon=False)
fig.text(0, 0, _get_plot_footnote(num_sites, num_dates, dropped_sites,
cadence))
fig.suptitle(
_get_plot_title(target_name, last_observation_date,
eval_dataset_creation_date, forecast_horizon),
y=1.35,
x=1)
return fig
def _get_model_label(forecast_entry: base_indexing.IndexEntryType) -> str:
"""Gets a description of a model from its entry in the forecast index."""
description = str(forecast_entry["forecast_id"])
if "model_description" in forecast_entry["extra_info"]:
description += f": {forecast_entry['extra_info']['model_description']}"
return description
def _get_plot_title(target_name: str, last_observation_date: str,
eval_dataset_creation_date: str,
forecast_horizon: int) -> str:
"""Gets the title of the plot."""
return (
f"Comparison of metrics for predicting {target_name}. Forecast date: "
f"{last_observation_date}, forecast horizon: {forecast_horizon} days, "
f"evaluation reporting date: {eval_dataset_creation_date}.")
def _get_plot_footnote(num_sites: int, num_dates: int,
dropped_sites: np.ndarray, cadence: int):
"""Gets the footnote to be added to the plot."""
footnote = (
f"Forecasts evaluated in this plot have a cadence of {cadence} days. "
f"{num_dates} dates and {num_sites} sites were included in the "
"evaluation that produced this plot.")
if dropped_sites.size:
footnote += (
"Note that the following sites were dropped from some forecasts during "
f"evaluation to achieve an overlapping set of sites: {dropped_sites}")
return footnote
def _plot_trajectories(
all_forecast_entries: List[Any],
all_forecast_arrays: List[Any],
target_name: constants.Targets,
num_sites: int,
eval_dataset: Any = None
) -> plt.Figure:
"""Plots trajectories.
Args:
all_forecast_entries: TODO
all_forecast_arrays: TODO
target_name: the target being predicted.
num_sites: number of sites to plot
eval_dataset: evaluation dataset
Returns:
Figure.
"""
fig = plt.figure(figsize=(16, 16))
dates = all_forecast_arrays[0].dates_array
num_dates = len(dates)
forecast_x = np.arange(num_dates)
x = forecast_x.copy()
x_stride = 14 # Weekly x tick strides.
previous_x = None
avg_values = []
for fa in all_forecast_arrays:
avg_values.append(
|
np.squeeze(fa.data_array, axis=2)
|
numpy.squeeze
|
#! /usr/bin/python
# -*- encoding: utf-8 -*-
import torch
import numpy
import random
import pdb
import os
import threading
import time
import math
import glob
from scipy import signal
from scipy.io import wavfile
from torch.utils.data import Dataset, DataLoader
from audiomentations import Compose, PitchShift
import soundfile
def round_down(num, divisor):
return num - (num%divisor)
def worker_init_fn(worker_id):
numpy.random.seed(numpy.random.get_state()[1][0] + worker_id)
def loadWAV_test(filename, max_frames, evalmode=True, num_eval=10):
# Maximum audio length
max_audio = max_frames * 160 + 240
# Read wav file and convert to torch tensor
audio, sample_rate = soundfile.read(filename)
audiosize = audio.shape[0]
if audiosize <= max_audio:
shortage = max_audio - audiosize + 1
audio = numpy.pad(audio, (0, shortage), 'wrap')
audiosize = audio.shape[0]
if evalmode:
startframe = numpy.linspace(0,audiosize-max_audio,num=num_eval)
else:
startframe = numpy.array([numpy.int64(random.random()*(audiosize-max_audio))])
feats = []
if evalmode and max_frames == 0:
feats.append(audio)
else:
for asf in startframe:
feats.append(audio[int(asf):int(asf)+max_audio])
feat = numpy.stack(feats,axis=0).astype(numpy.float)
return feat
def loadWAV(filename, max_frames, evalmode=True, num_eval=10, list_augment=[], ps=0):
# Maximum audio length
max_audio = max_frames * 160 + 240
# Read wav file and convert to torch tensor
sample_rate, audio = wavfile.read(filename)
audiosize = audio.shape[0]
if audiosize <= max_audio:
shortage = max_audio - audiosize + 1
audio = numpy.pad(audio, (0, shortage), 'wrap')
audiosize = audio.shape[0]
if evalmode:
startframe = numpy.linspace(0,audiosize-max_audio,num=num_eval)
else:
startframe = numpy.array([numpy.int64(random.random()*(audiosize-max_audio))])
feats = []
if evalmode and max_frames == 0:
feats.append(audio)
else:
for asf in startframe:
feats.append(audio[int(asf):int(asf)+max_audio])
temp_feat = numpy.stack(feats,axis=0).astype(numpy.float32)
if ps == 1:
semitones = random.randint(0, 7)
augment = list_augment[semitones]
feat = augment(samples=temp_feat, sample_rate=sample_rate)
else:
feat = temp_feat
return feat
class AugmentWAV(object):
def __init__(self, musan_path, rir_path, max_frames):
self.max_frames = max_frames
self.max_audio = max_audio = max_frames * 160 + 240
self.noisetypes = ['noise','speech','music']
self.noisesnr = {'noise':[0,15],'speech':[13,20],'music':[5,15]}
self.numnoise = {'noise':[1,1], 'speech':[3,7], 'music':[1,1] }
self.noiselist = {}
augment_files = glob.glob(os.path.join(musan_path,'*/*/*/*.wav'));
for file in augment_files:
if not file.split('/')[-4] in self.noiselist:
self.noiselist[file.split('/')[-4]] = []
self.noiselist[file.split('/')[-4]].append(file)
self.rir_files = glob.glob(os.path.join(rir_path,'*/*/*.wav'));
def additive_noise(self, noisecat, audio):
clean_db = 10 * numpy.log10(numpy.mean(audio ** 2)+1e-4)
numnoise = self.numnoise[noisecat]
noiselist = random.sample(self.noiselist[noisecat], random.randint(numnoise[0],numnoise[1]))
noises = []
for noise in noiselist:
noiseaudio = loadWAV(noise, self.max_frames, evalmode=False)
noise_snr = random.uniform(self.noisesnr[noisecat][0],self.noisesnr[noisecat][1])
noise_db = 10 * numpy.log10(numpy.mean(noiseaudio[0] ** 2)+1e-4)
noises.append(numpy.sqrt(10 ** ((clean_db - noise_db - noise_snr) / 10)) * noiseaudio)
return numpy.sum(
|
numpy.concatenate(noises,axis=0)
|
numpy.concatenate
|
import numpy as np
class DTL_corr(object):
def __init__(self, X, X_2, selection='mean', uc=2):
self.X = X
self.X_2 = X_2
self.K = X.shape[0]
self.n = X.shape[1]
self.m = len(X_2)
self.X_bar = np.mean(X, 1)
self.selection = selection
self.uc = uc
self.UC = self.X_bar + uc * np.std(X, axis=1, ddof=1)
if selection == 'mean':
self.win_idx = np.argmax(self.X_bar)
else:
self.win_idx = np.argmax(self.UC)
stat = self.test_statistic(self.X, self.X_2, return_D0=True)
self.theta_hat = stat[0]
self.D_0 = stat[1]
def basis(self, X_b, X_2_b, basis_type="naive"):
"""
Compute the basis given any generated data
:param X_b:
:param X_2_b:
:param basis_type:
:return:
"""
d_M = (np.sum(X_b[self.win_idx, :]) + np.sum(X_2_b)) / (len(X_b[self.win_idx, :]) + len(X_2_b))
if basis_type == "complete":
Z_b = np.concatenate([np.mean(X_b, 1), np.mean(X_b ** 2, 1), [np.mean(X_2_b)], [np.mean(X_2_b ** 2)]])
elif basis_type == "naive":
Z_b = np.mean(X_b, 1)
Z_b[self.win_idx] = d_M
elif basis_type == "withD0":
Z_b = np.mean(X_b, 1)
Z_b = np.concatenate([Z_b, np.array(d_M).reshape(1, )])
else:
raise AssertionError("invalid basis_type")
return Z_b
def test_statistic(self, X_b, X_2_b, return_D0=False):
"""
Compute the test statistic \hat{\theta}
:param X_b:
:param X_2_b:
:return:
"""
d_M = (np.sum(X_b[self.win_idx, :]) + np.sum(X_2_b)) / (len(X_b[self.win_idx, :]) + len(X_2_b))
if not return_D0:
return d_M
d_0 = np.mean(X_b[self.win_idx, :]) - np.mean(X_2_b)
return d_M, d_0
def gen_train_data(self, ntrain, n_b, m_b, basis_type="naive", return_gamma=True, remove_D0=False, blocklength=10):
"""
bootstrap training data
:param ntrain: number of training data
:param return_gamma: whether return gamma = Cov(B, theta_hat) Var(theta_hat)^{-1}
:return:
"""
Z_train = []
W_train = []
theta_hat_train = []
D0_train = []
X_pooled = np.concatenate([self.X[self.win_idx], self.X_2])
num_blocks_1 = n_b // blocklength
num_blocks_2 = m_b // blocklength
for i in range(ntrain):
X_b = np.zeros([self.K, num_blocks_1 * blocklength])
for k in range(self.K):
if k != self.win_idx:
indices = np.random.choice(self.n - blocklength, num_blocks_1, replace=True)
for s in range(num_blocks_1):
idx = indices[s]
data_block = self.X[k, idx: idx + blocklength]
X_b[k, s * blocklength: (s + 1) * blocklength] = data_block
if k == self.win_idx:
indices = np.random.choice(self.n + self.m - blocklength, num_blocks_1, replace=True)
for s in range(num_blocks_1):
idx = indices[s]
data_block = X_pooled[idx: idx + blocklength]
X_b[k, s * blocklength: (s + 1) * blocklength] = data_block
if self.selection == 'mean':
idx = np.argmax(np.mean(X_b, 1))
else:
idx = np.argmax(np.mean(X_b, 1) + self.uc * np.std(X_b, axis=1, ddof=1))
if idx == self.win_idx:
W_train.append(1)
else:
W_train.append(0)
indices = np.random.choice(self.n + self.m - blocklength, num_blocks_2, replace=True)
X_2_b = []
for s in range(num_blocks_2):
idx = indices[s]
X_2_b.extend(X_pooled[idx: idx + blocklength])
X_2_b = np.array(X_2_b)
Z_train.append(self.basis(X_b, X_2_b, basis_type))
stat = self.test_statistic(X_b, X_2_b, return_D0=True)
theta_hat_train.append(stat[0])
D0_train.append(stat[1])
Z_train = np.array(Z_train)
W_train = np.array(W_train)
result = {'Z_train': Z_train, 'W_train': W_train}
theta_hat_train = np.array(theta_hat_train)
D0_train =
|
np.array(D0_train)
|
numpy.array
|
import numpy as np
import matplotlib.pyplot as plt
import pytest
import starry
def test_terminator_continuity(plot=False):
"""
Ensure the Oren-Nayar intensity is continuous across
the day/night boundary.
"""
# Simple map
map = starry.Map(reflected=True)
# Find the terminator latitude
ys = 1
zs = 1
b = -zs / np.sqrt(ys ** 2 + zs ** 2)
lat0 = np.arcsin(b) * 180 / np.pi
if plot:
# Latitude array spanning the terminator
delta = 1
lat = np.linspace(lat0 - delta, lat0 + delta, 1000)
# Lambertian intensity
map.roughness = 0
I_lamb = map.intensity(lat=lat, lon=0, xs=0, ys=ys, zs=zs).reshape(-1)
# Oren-Nayar intensity
map.roughness = 90
I_on94 = map.intensity(lat=lat, lon=0, xs=0, ys=ys, zs=zs).reshape(-1)
# View it
plt.plot(lat, I_lamb)
plt.plot(lat, I_on94)
plt.xlabel("lat")
plt.ylabel("I")
plt.show()
# Ensure there's a negligible jump across the terminator
eps = 1e-8
lat = np.array([lat0 - eps, lat0 + eps])
map.roughness = 90
diff = np.diff(
map.intensity(lat=lat, lon=0, xs=0, ys=ys, zs=zs).reshape(-1)
)[0]
assert np.abs(diff) < 1e-10, np.abs(diff)
def test_half_phase_discontinuity(plot=False):
"""
Ensure the Oren-Nayar intensity at a point is continuous
as we move across half phase (b = 0).
"""
# Simple map
map = starry.Map(reflected=True)
if plot:
# From crescent to gibbous
eps = 0.1
zs = np.linspace(-eps, eps, 1000)
# Oren-Nayar intensity
map.roughness = 90
I_on94 = map.intensity(lat=60, lon=0, xs=0, ys=1, zs=zs).reshape(-1)
# View it
plt.plot(-zs, I_on94)
plt.xlabel("b")
plt.ylabel("I")
plt.show()
# Ensure there's a negligible jump across the terminator
eps = 1e-8
zs = np.array([-eps, eps])
map.roughness = 90
diff = np.diff(
map.intensity(lat=60, lon=0, xs=0, ys=1, zs=zs).reshape(-1)
)[0]
assert np.abs(diff) < 1e-8, np.abs(diff)
def test_approximation(plot=False):
"""
Test our polynomial approximation to the Oren-Nayar intensity.
"""
# Simple map
map = starry.Map(reflected=True)
# Approximate and exact intensities
map.roughness = 90
img_approx = map.render(xs=1, ys=2, zs=3)
img_exact = map.render(xs=1, ys=2, zs=3, on94_exact=True)
img_diff = img_exact - img_approx
diff = img_diff.reshape(-1)
mu = np.nanmean(diff)
std = np.nanstd(diff)
maxabs = np.nanmax(np.abs(diff))
if plot:
fig, ax = plt.subplots(1, 3, figsize=(14, 2.5))
im = ax[0].imshow(
img_exact,
origin="lower",
extent=(-1, 1, -1, 1),
vmin=0,
vmax=np.nanmax(img_exact),
)
plt.colorbar(im, ax=ax[0])
im = ax[1].imshow(
img_approx,
origin="lower",
extent=(-1, 1, -1, 1),
vmin=0,
vmax=np.nanmax(img_exact),
)
plt.colorbar(im, ax=ax[1])
im = ax[2].imshow(img_diff, origin="lower", extent=(-1, 1, -1, 1))
plt.colorbar(im, ax=ax[2])
fig = plt.figure()
plt.hist(diff, bins=50)
plt.xlabel("diff")
plt.show()
assert
|
np.abs(mu)
|
numpy.abs
|
"""This module contains methods used for visualizing the enuerated structures."""
from phenum.base import testmode
from matplotlib import cm
import matplotlib
import os
if os.name != "nt":
matplotlib.use("Agg" if testmode else "TkAgg")
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def HNF_shapes(enum,lattice,show,testmode=False):
"""Plots the shape of each HNF.
Args:
enum (str): The enum.in style input file.
lattice (str): The lattice.in style input file.
show (bool): If true each HNF is plotted in an interactive window.\
testmode (bool, optional): True if unittests are running.
"""
from phenum.io_utils import read_lattice
from phenum.grouptheory import SmithNormalForm, get_full_HNF
from phenum.vector_utils import map_enumStr_to_real_space, cartesian2direct
from operator import mul
from numpy import array, mgrid, dot
try:
from functools import reduce
except ImportError: #pragma: no cover
import numpy as np
lattice_data = read_lattice(lattice)
system = _convert_read_lat_to_system_dat(lattice_data)
with open(enum,"r") as inf:
for line in inf:
if "#" in line:
pass
else:
structure = {}
hnf_name = [int(i) for i in line.strip().split()[:-1]]
if len(hnf_name) != 6:
hnf_name = hnf_name[0:6]
structure["HNF"] = get_full_HNF(hnf_name)
(SNF,L,R) = SmithNormalForm(structure["HNF"])
structure["diag"] = [SNF[0][0],SNF[1][1],SNF[2][2]]
structure["L"] = L
structure["n"] = reduce(mul,structure["diag"],1)
structure["labeling"] = "".join(["0" for i in range(structure["n"])])
system["nD"] = len(system["dvecs"])
space_data = map_enumStr_to_real_space(system,structure,True)
space_data["aBas"] = cartesian2direct(space_data["sLV"],space_data["aBas"],system["eps"])
# the corners of the polyhedron except the origin.
x = space_data["sLV"][0]
y = space_data["sLV"][1]
z = space_data["sLV"][2]
xy = (array(x)+array(y)).tolist()
xz = (array(x)+array(z)).tolist()
yz = (array(y)+array(z)).tolist()
xyz = (array(y)+array(x)+array(z)).tolist()
correct = [[0,0,0],x,y,z,xy,xz,yz,xyz]
xf = [i[0] for i in correct]
yf = [i[1] for i in correct]
zf = [i[2] for i in correct]
# we need to put the shifted atoms back into cartesian corrdinates.
atoms = []
for atom in space_data["aBas"]:
atoms.append(dot(atom,space_data["sLV"]).tolist())
xa = [atom[0] for atom in atoms]
ya = [atom[1] for atom in atoms]
za = [atom[2] for atom in atoms]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect('equal')
ax.set_axis_off()
ax.scatter(xa,ya,za,zdir='z',c='red',s=1000)
# ax.scatter(xf,yf,zf,zdir='z',c='red')
ax.plot([0,x[0]],[0,x[1]],[0,x[2]],'k')
ax.plot([0,y[0]],[0,y[1]],[0,y[2]],'k')
ax.plot([0,z[0]],[0,z[1]],[0,z[2]],'k')
ax.plot([xz[0],x[0]],[xz[1],x[1]],[xz[2],x[2]],'k')
ax.plot([xz[0],z[0]],[xz[1],z[1]],[xz[2],z[2]],'k')
ax.plot([xy[0],x[0]],[xy[1],x[1]],[xy[2],x[2]],'k')
ax.plot([xy[0],y[0]],[xy[1],y[1]],[xy[2],y[2]],'k')
ax.plot([xz[0],z[0]],[xz[1],z[1]],[xz[2],z[2]],'k')
ax.plot([yz[0],z[0]],[yz[1],z[1]],[yz[2],z[2]],'k')
ax.plot([yz[0],y[0]],[yz[1],y[1]],[yz[2],y[2]],'k')
ax.plot([yz[0],xyz[0]],[yz[1],xyz[1]],[yz[2],xyz[2]],'k')
ax.plot([xz[0],xyz[0]],[xz[1],xyz[1]],[xz[2],xyz[2]],'k')
ax.plot([xy[0],xyz[0]],[xy[1],xyz[1]],[xy[2],xyz[2]],'k')
max_range = array([array(xf).max()-array(xf).min(),array(yf).max()-array(yf).min(),
array(zf).max()-array(zf).min()]).max()
xb = 0.5*max_range*mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(array(xf).max()+array(xf).min())
yb = 0.5*max_range*mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(array(yf).max()+array(yf).min())
zb = 0.5*max_range*mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(array(zf).max()+array(zf).min())
for xb, yb, zb in zip(xb,yb,zb):
ax.plot([xb],[yb],[zb],'w')
if not testmode: # pragma: no cover
fig.savefig("{}.pdf".format("".join([str(i) for i in hnf_name])))
if show and not testmode: #pragma: no cover
plt.show()
else:
plt.close()
def HNF_atoms(enum,lattice,show,testmode=False):
"""Plots the atomic positions of the atoms in the cells.
Args:
enum (str): The enum.in style input file.
lattice (str): The lattice.in style input file.
show (bool): If true each HNF is plotted in an interactive window.
testmode (bool, optional): True if unit tests are being run.
"""
from phenum.io_utils import read_lattice
from phenum.grouptheory import SmithNormalForm, get_full_HNF
from phenum.vector_utils import map_enumStr_to_real_space, cartesian2direct
from operator import mul
from numpy import array, mgrid, dot
try:
from functools import reduce
except ImportError: #pragma: no cover
import numpy as np
lattice_data = read_lattice(lattice)
system = _convert_read_lat_to_system_dat(lattice_data)
with open(enum,"r") as inf:
for line in inf:
if "#" in line:
pass
else:
structure = {}
hnf_name = [int(i) for i in line.strip().split()[:-1]]
if len(hnf_name) != 6:
hnf_name = hnf_name[0:6]
structure["HNF"] = get_full_HNF(hnf_name)
(SNF,L,R) = SmithNormalForm(structure["HNF"])
structure["diag"] = [SNF[0][0],SNF[1][1],SNF[2][2]]
structure["L"] = L
structure["n"] = reduce(mul,structure["diag"],1)
structure["labeling"] = "".join(["0" for i in range(structure["n"])])
system["nD"] = len(system["dvecs"])
space_data = map_enumStr_to_real_space(system,structure,True)
space_data["aBas"] = cartesian2direct(space_data["sLV"],space_data["aBas"],system["eps"])
atoms = []
for atom in space_data["aBas"]:
atoms.append(dot(atom,space_data["sLV"]).tolist())
xf = [atom[0] for atom in atoms]
yf = [atom[1] for atom in atoms]
zf = [atom[2] for atom in atoms]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect('equal')
ax.set_axis_off()
ax.scatter(xf,yf,zf,zdir='z',c='red',s=1000)
max_range = array([array(xf).max()-array(xf).min(),array(yf).max()-array(yf).min(),
array(zf).max()-array(zf).min()]).max()
xb = 0.5*max_range*mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(array(xf).max()+array(xf).min())
yb = 0.5*max_range*mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(
|
array(yf)
|
numpy.array
|
# Copyright (C) 2020 Zurich Instruments
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
import numpy as np
from zhinst.toolkit.interface import LoggerModule
_logger = LoggerModule(__name__)
class Waveform(object):
"""Implements a waveform for two channels.
The 'data' attribute holds the waveform samples with the proper scaling,
granularity and minimal length. The 'data' attribute holds the actual
waveform array that can be sent to the instrument.
Arguments:
wave1 (array): list or numpy array for the waveform on channel 1, will
be scaled to have a maximum amplitude of 1
wave2 (array): list or numpy array for the waveform on channel 2, will
be scaled to have a maximum amplitude of 1
delay (float): individual waveform delay in seconds with respect to the
time origin of the sequence, a positive value shifts the start of
the waveform forward in time (default: 0)
granularity (int): granularity that the number of samples are aligned
to (default: 16)
align_start (bool): the waveform will be padded with zeros to match the
granularity, either before or after the samples (default: True)
Properties:
data (array): interleaved and normalized waveform data of the two
channels to be uplaoded to the AWG
delay (double): delay in seconds of the individual waveform w.r.t. the
sequence time origin
buffer_length (int): number of samples for the seuqence c buffer wave
"""
def __init__(self, wave1, wave2, delay=0, granularity=16, align_start=True):
self._granularity = granularity
self._align_start = align_start
self._waves = [wave1, wave2]
self._delay = delay
self._update()
def replace_data(self, wave1, wave2, delay=0):
"""Replaces the data in the waveform."""
new_buffer_length = self._round_up(max(len(wave1), len(wave2), 32))
self._delay = delay
if new_buffer_length == self.buffer_length:
self._waves = [wave1, wave2]
self._update()
else:
_logger.error(
"Waveform lengths don't match!",
_logger.ExceptionTypes.ToolkitError,
)
@property
def data(self):
return self._data
@property
def delay(self):
return self._delay
@property
def buffer_length(self):
return self._buffer_length
def _update(self):
"""Update the buffer length and data attributes for new waveforms."""
self._buffer_length = self._round_up(
max(len(self._waves[0]), len(self._waves[1]), 32)
)
self._data = self._interleave_waveforms(self._waves[0], self._waves[1])
def _interleave_waveforms(self, x1, x2):
"""Interleaves the waveforms of both channels and adjusts the scaling.
The data is actually sent as values in the range of +/- (2^15 - 1).
"""
if len(x1) == 0:
x1 =
|
np.zeros(1)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 2章 1次元データの整理
# +
import numpy as np
import pandas as pd
pd.set_option("precision", 3)
# -
df = pd.read_csv("../python_stat_sample/data/ch2_scores_em.csv", index_col="生徒番号")
df.head()
# +
# 英語の点数の最初の10個を取得
scores = np.array(df["英語"])[:10]
scores
# +
index = [chr(i+ord("A")) for i in range(10)]
scores_df = pd.DataFrame({"点数":scores},
index=pd.Index(index, name="生徒"))
scores_df
# -
# ## 平均値
#
# \begin{align*}
# \bar{x} = \frac{1}{N} \sum_{i=0}^{N} x_i
# \end{align*}
#
# - $\bar{x}: \text{average}$
# - $N: \text{length of data}$
# - $x_i: \text{each data in }x$
sum(scores)/len(scores)
# numpyを使った方法
np.mean(scores)
# pandasを使った方法
scores_df.mean()
# ## 中央値
# 中央値を導出するためにデータを順番に置き直す
scores_sorted = np.sort(scores)
scores_sorted
# +
n = len(scores_sorted)
if n%2 == 0:
median = (scores_sorted[n//2 - 1] + scores_sorted[n//2])/2
else:
median = scores_sorted[n//2+1]
median
# -
# numpy
np.median(scores)
# pandas
scores_df.median()
# ## 最頻値
tmp_list = [1, 1, 1, 2, 2, 3]
pd.Series(tmp_list).mode()
# multiple modes in list
tmp_list = [i+1 for i in range(5)]
pd.Series(tmp_list).mode()
# ## 偏差
mean = np.mean(scores)
deviation = scores - mean
deviation
# keep copy of scores_df
summary_df = scores_df.copy()
summary_df["偏差"] = deviation
summary_df
scores_ = [50, 60, 58, 54, 51, 56, 57, 53, 52, 59]
mean_ =
|
np.mean(scores_)
|
numpy.mean
|
import numpy as np
from numpy import exp, sqrt
from functools import partial
from scipy import optimize
from scipy.stats import norm
import scipy.integrate as integrate
from fox_toolbox.utils import rates
from hw.hw_helper import _B, _V, _A, sign_changes
from hw import hw_helper
from hw.hw_helper import get_var_x
"""This module price swaption under Hull White model using Jamshidian method.
Usage example:
from hw import Jamshidian as jamsh
jamsh_price, debug = jamsh.hw_swo(swo, ref_mr, sigma_hw_jamsh, dsc_curve, estim_curve)
swo : rates.Swaption
ref_mr : float
sigma_hw_jamsh : rates.Curve
dsc_curve : rates.RateCurve
estim_curve : rates.RateCurve
"""
def get_coef(swo, a, sigma, dsc_curve, estim_curve):
""" Coefficients for Put swaption from calibration basket. Jamishidian """
flt_adjs = swo.get_flt_adjustments(dsc_curve, estim_curve)
c0 = -_A(swo.expiry, swo.start_date, a, sigma, dsc_curve)
c = list(map(lambda dcf, pdate, fadj: dcf * (swo.strike - fadj) * _A(swo.expiry, pdate, a, sigma, dsc_curve),
swo.day_count_fractions, swo.payment_dates, flt_adjs))
c[-1] += _A(swo.expiry, swo.maturity, a, sigma, dsc_curve)
c.insert(0, c0)
return np.array(c)
def get_b_i(swo, a):
""" array of B_i for by each payment date """
b0 = _B(swo.expiry, swo.start_date, a)
b = list(map(lambda pdate: _B(swo.expiry, pdate, a), swo.payment_dates))
b.insert(0, b0)
return np.array(b)
def swap_value(coef, b_i, varx, x):
""" Swap function for finding x_star """
exp_b_var = exp(- b_i * sqrt(varx) * x)
return coef.dot(exp_b_var)
def get_x_star(coef, b_i, varx):
x0 = .0
func = partial(swap_value, coef, b_i, varx)
# optimum = optimize.newton(func, x0=x0)
optimum = optimize.bisect(func, -6, 6)
return optimum
def hw_swo_analytic(coef, b_i, varx, x_star, IsCall):
""" analytic """
sign = -1 if IsCall else 1
if IsCall: coef = np.negative(coef)
val_arr = exp(0.5 * b_i ** 2 * varx) * norm.cdf(sign*(x_star + b_i *
|
sqrt(varx)
|
numpy.sqrt
|
import os
import collections
import itertools
from typing import Tuple, Union, Optional
import numpy as np
from continuum.datasets.base import _AudioDataset
from continuum.download import download, untar
class FluentSpeech(_AudioDataset):
"""FluentSpeechCommand dataset.
Made of short audio with different speakers asking something.
https://fluent.ai/fluent-speech-commands-a-dataset-for-spoken-language-understanding-research/
"""
URL = "http://fluent.ai:2052/jf8398hf30f0381738rucj3828chfdnchs.tar.gz"
def __init__(self, data_path, train: Union[bool, str] = True, download: bool = True):
if not isinstance(train, bool) and train not in ("train", "valid", "test"):
raise ValueError(f"`train` arg ({train}) must be a bool or train/valid/test.")
if isinstance(train, bool):
if train:
train = "train"
else:
train = "test"
data_path = os.path.expanduser(data_path)
super().__init__(data_path, train, download)
def _download(self):
if not os.path.exists(os.path.join(self.data_path, "fluent_speech_commands_dataset")):
tgz_path = os.path.join(self.data_path, "jf8398hf30f0381738rucj3828chfdnchs.tar.gz")
if not os.path.exists(tgz_path):
print("Downloading tgz archive...", end=" ")
download(
self.URL,
self.data_path
)
print("Done!")
print("Extracting archive...", end=" ")
untar(tgz_path)
print("Done!")
def get_data(self) -> Tuple[np.ndarray, np.ndarray, Optional[np.ndarray]]:
base_path = os.path.join(self.data_path, "fluent_speech_commands_dataset")
self.transcriptions = []
x, y, t = [], [], []
with open(os.path.join(base_path, "data", f"{self.train}_data.csv")) as f:
lines = f.readlines()[1:]
for line in lines:
items = line[:-1].split(',')
action, obj, location = items[-3:]
x.append(os.path.join(base_path, items[1]))
y.append([
self.class_ids[action+obj+location],
self.actions[action],
self.objects[obj],
self.locations[location]
])
if self.train == "train":
t.append(self.train_speaker_ids[items[2]])
elif self.train == "valid":
t.append(self.valid_speaker_ids[items[2]])
else:
t.append(self.test_speaker_ids[items[2]])
self.transcriptions.append(items[3])
return np.array(x),
|
np.array(y)
|
numpy.array
|
# Load pickled data
import pickle
# TODO: Fill this in based on where you saved the training and testing data
training_file = 'traffic-signs-data/train.p'
validation_file = 'traffic-signs-data/valid.p'
testing_file = 'traffic-signs-data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
# TODO: Number of training examples
n_train = len(y_train)
# TODO: Number of validation examples
n_validation = len(y_valid)
# TODO: Number of testing examples.
n_test = len(y_test)
# TODO: What's the shape of an traffic sign image?
image_shape = X_train[0].shape
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(set(y_train))
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
#% matplotlib inline
import numpy as np
N = n_classes
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
training_hist = []
valid_hist = []
test_hist = []
for y in range(0, 43):
training_hist.append(np.count_nonzero(y_train == y))
valid_hist.append(np.count_nonzero(y_valid == y))
test_hist.append(np.count_nonzero(y_test == y))
fig, ax = plt.subplots()
rects1 = ax.bar(ind, training_hist, width, color='r')
rects2 = ax.bar(ind + width, valid_hist, width, color='y')
rects3 = ax.bar(ind + 2 * width, test_hist, width, color='b')
# add some text for labels, title and axes ticks
ax.set_ylabel('Number of samples in set')
ax.set_title('Number of samples')
ax.set_xticks(ind[::2])
# ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
ax.legend((rects1[0], rects2[0], rects3[0]), ('Training', 'Validation', 'Test'))
### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include
### converting to grayscale, etc.
### Feel free to use as many code cells as needed.
import numpy as np
X_train =
|
np.array(X_train,dtype=np.float32)
|
numpy.array
|
import numpy as np
import pytest
from astropy import units as u, constants
from plasmapy.physics.magnetostatics import (MagnetoStatics,
MagneticDipole,
Wire,
GeneralWire,
FiniteStraightWire,
InfiniteStraightWire,
CircularWire)
mu0_4pi = constants.mu0/4/np.pi
class Test_MagneticDipole:
def setup_method(self):
self.moment = np.array([0, 0, 1])*u.A*u.m*u.m
self.p0 =
|
np.array([0, 0, 0])
|
numpy.array
|
"""
Implementation of linear algebra operations.
"""
import contextlib
from llvmlite import ir
import numpy as np
import operator
from numba.core.imputils import (lower_builtin, impl_ret_borrowed,
impl_ret_new_ref, impl_ret_untracked)
from numba.core.typing import signature
from numba.core.extending import overload, register_jitable
from numba.core import types, cgutils
from numba.core.errors import TypingError
from .arrayobj import make_array, _empty_nd_impl, array_copy
from numba.np import numpy_support as np_support
ll_char = ir.IntType(8)
ll_char_p = ll_char.as_pointer()
ll_void_p = ll_char_p
ll_intc = ir.IntType(32)
ll_intc_p = ll_intc.as_pointer()
intp_t = cgutils.intp_t
ll_intp_p = intp_t.as_pointer()
# fortran int type, this needs to match the F_INT C declaration in
# _lapack.c and is present to accommodate potential future 64bit int
# based LAPACK use.
F_INT_nptype = np.int32
F_INT_nbtype = types.int32
# BLAS kinds as letters
_blas_kinds = {
types.float32: 's',
types.float64: 'd',
types.complex64: 'c',
types.complex128: 'z',
}
def get_blas_kind(dtype, func_name="<BLAS function>"):
kind = _blas_kinds.get(dtype)
if kind is None:
raise TypeError("unsupported dtype for %s()" % (func_name,))
return kind
def ensure_blas():
try:
import scipy.linalg.cython_blas
except ImportError:
raise ImportError("scipy 0.16+ is required for linear algebra")
def ensure_lapack():
try:
import scipy.linalg.cython_lapack
except ImportError:
raise ImportError("scipy 0.16+ is required for linear algebra")
def make_constant_slot(context, builder, ty, val):
const = context.get_constant_generic(builder, ty, val)
return cgutils.alloca_once_value(builder, const)
class _BLAS:
"""
Functions to return type signatures for wrapped
BLAS functions.
"""
def __init__(self):
ensure_blas()
@classmethod
def numba_xxnrm2(cls, dtype):
rtype = getattr(dtype, "underlying_float", dtype)
sig = types.intc(types.char, # kind
types.intp, # n
types.CPointer(dtype), # x
types.intp, # incx
types.CPointer(rtype)) # returned
return types.ExternalFunction("numba_xxnrm2", sig)
@classmethod
def numba_xxgemm(cls, dtype):
sig = types.intc(
types.char, # kind
types.char, # transa
types.char, # transb
types.intp, # m
types.intp, # n
types.intp, # k
types.CPointer(dtype), # alpha
types.CPointer(dtype), # a
types.intp, # lda
types.CPointer(dtype), # b
types.intp, # ldb
types.CPointer(dtype), # beta
types.CPointer(dtype), # c
types.intp # ldc
)
return types.ExternalFunction("numba_xxgemm", sig)
class _LAPACK:
"""
Functions to return type signatures for wrapped
LAPACK functions.
"""
def __init__(self):
ensure_lapack()
@classmethod
def numba_xxgetrf(cls, dtype):
sig = types.intc(types.char, # kind
types.intp, # m
types.intp, # n
types.CPointer(dtype), # a
types.intp, # lda
types.CPointer(F_INT_nbtype) # ipiv
)
return types.ExternalFunction("numba_xxgetrf", sig)
@classmethod
def numba_ez_xxgetri(cls, dtype):
sig = types.intc(types.char, # kind
types.intp, # n
types.CPointer(dtype), # a
types.intp, # lda
types.CPointer(F_INT_nbtype) # ipiv
)
return types.ExternalFunction("numba_ez_xxgetri", sig)
@classmethod
def numba_ez_rgeev(cls, dtype):
sig = types.intc(types.char, # kind
types.char, # jobvl
types.char, # jobvr
types.intp, # n
types.CPointer(dtype), # a
types.intp, # lda
types.CPointer(dtype), # wr
types.CPointer(dtype), # wi
types.CPointer(dtype), # vl
types.intp, # ldvl
types.CPointer(dtype), # vr
types.intp # ldvr
)
return types.ExternalFunction("numba_ez_rgeev", sig)
@classmethod
def numba_ez_cgeev(cls, dtype):
sig = types.intc(types.char, # kind
types.char, # jobvl
types.char, # jobvr
types.intp, # n
types.CPointer(dtype), # a
types.intp, # lda
types.CPointer(dtype), # w
types.CPointer(dtype), # vl
types.intp, # ldvl
types.CPointer(dtype), # vr
types.intp # ldvr
)
return types.ExternalFunction("numba_ez_cgeev", sig)
@classmethod
def numba_ez_xxxevd(cls, dtype):
wtype = getattr(dtype, "underlying_float", dtype)
sig = types.intc(types.char, # kind
types.char, # jobz
types.char, # uplo
types.intp, # n
types.CPointer(dtype), # a
types.intp, # lda
types.CPointer(wtype), # w
)
return types.ExternalFunction("numba_ez_xxxevd", sig)
@classmethod
def numba_xxpotrf(cls, dtype):
sig = types.intc(types.char, # kind
types.char, # uplo
types.intp, # n
types.CPointer(dtype), # a
types.intp # lda
)
return types.ExternalFunction("numba_xxpotrf", sig)
@classmethod
def numba_ez_gesdd(cls, dtype):
stype = getattr(dtype, "underlying_float", dtype)
sig = types.intc(
types.char, # kind
types.char, # jobz
types.intp, # m
types.intp, # n
types.CPointer(dtype), # a
types.intp, # lda
types.CPointer(stype), # s
types.CPointer(dtype), # u
types.intp, # ldu
types.CPointer(dtype), # vt
types.intp # ldvt
)
return types.ExternalFunction("numba_ez_gesdd", sig)
@classmethod
def numba_ez_geqrf(cls, dtype):
sig = types.intc(
types.char, # kind
types.intp, # m
types.intp, # n
types.CPointer(dtype), # a
types.intp, # lda
types.CPointer(dtype), # tau
)
return types.ExternalFunction("numba_ez_geqrf", sig)
@classmethod
def numba_ez_xxgqr(cls, dtype):
sig = types.intc(
types.char, # kind
types.intp, # m
types.intp, # n
types.intp, # k
types.CPointer(dtype), # a
types.intp, # lda
types.CPointer(dtype), # tau
)
return types.ExternalFunction("numba_ez_xxgqr", sig)
@classmethod
def numba_ez_gelsd(cls, dtype):
rtype = getattr(dtype, "underlying_float", dtype)
sig = types.intc(
types.char, # kind
types.intp, # m
types.intp, # n
types.intp, # nrhs
types.CPointer(dtype), # a
types.intp, # lda
types.CPointer(dtype), # b
types.intp, # ldb
types.CPointer(rtype), # S
types.float64, # rcond
types.CPointer(types.intc) # rank
)
return types.ExternalFunction("numba_ez_gelsd", sig)
@classmethod
def numba_xgesv(cls, dtype):
sig = types.intc(
types.char, # kind
types.intp, # n
types.intp, # nhrs
types.CPointer(dtype), # a
types.intp, # lda
types.CPointer(F_INT_nbtype), # ipiv
types.CPointer(dtype), # b
types.intp # ldb
)
return types.ExternalFunction("numba_xgesv", sig)
@contextlib.contextmanager
def make_contiguous(context, builder, sig, args):
"""
Ensure that all array arguments are contiguous, if necessary by
copying them.
A new (sig, args) tuple is yielded.
"""
newtys = []
newargs = []
copies = []
for ty, val in zip(sig.args, args):
if not isinstance(ty, types.Array) or ty.layout in 'CF':
newty, newval = ty, val
else:
newty = ty.copy(layout='C')
copysig = signature(newty, ty)
newval = array_copy(context, builder, copysig, (val,))
copies.append((newty, newval))
newtys.append(newty)
newargs.append(newval)
yield signature(sig.return_type, *newtys), tuple(newargs)
for ty, val in copies:
context.nrt.decref(builder, ty, val)
def check_c_int(context, builder, n):
"""
Check whether *n* fits in a C `int`.
"""
_maxint = 2**31 - 1
def impl(n):
if n > _maxint:
raise OverflowError("array size too large to fit in C int")
context.compile_internal(builder, impl,
signature(types.none, types.intp), (n,))
def check_blas_return(context, builder, res):
"""
Check the integer error return from one of the BLAS wrappers in
_helperlib.c.
"""
with builder.if_then(cgutils.is_not_null(builder, res), likely=False):
# Those errors shouldn't happen, it's easier to just abort the process
pyapi = context.get_python_api(builder)
pyapi.gil_ensure()
pyapi.fatal_error("BLAS wrapper returned with an error")
def check_lapack_return(context, builder, res):
"""
Check the integer error return from one of the LAPACK wrappers in
_helperlib.c.
"""
with builder.if_then(cgutils.is_not_null(builder, res), likely=False):
# Those errors shouldn't happen, it's easier to just abort the process
pyapi = context.get_python_api(builder)
pyapi.gil_ensure()
pyapi.fatal_error("LAPACK wrapper returned with an error")
def call_xxdot(context, builder, conjugate, dtype,
n, a_data, b_data, out_data):
"""
Call the BLAS vector * vector product function for the given arguments.
"""
fnty = ir.FunctionType(ir.IntType(32),
[ll_char, ll_char, intp_t, # kind, conjugate, n
ll_void_p, ll_void_p, ll_void_p, # a, b, out
])
fn = builder.module.get_or_insert_function(fnty, name="numba_xxdot")
kind = get_blas_kind(dtype)
kind_val = ir.Constant(ll_char, ord(kind))
conjugate = ir.Constant(ll_char, int(conjugate))
res = builder.call(fn, (kind_val, conjugate, n,
builder.bitcast(a_data, ll_void_p),
builder.bitcast(b_data, ll_void_p),
builder.bitcast(out_data, ll_void_p)))
check_blas_return(context, builder, res)
def call_xxgemv(context, builder, do_trans,
m_type, m_shapes, m_data, v_data, out_data):
"""
Call the BLAS matrix * vector product function for the given arguments.
"""
fnty = ir.FunctionType(ir.IntType(32),
[ll_char, ll_char, # kind, trans
intp_t, intp_t, # m, n
ll_void_p, ll_void_p, intp_t, # alpha, a, lda
ll_void_p, ll_void_p, ll_void_p, # x, beta, y
])
fn = builder.module.get_or_insert_function(fnty, name="numba_xxgemv")
dtype = m_type.dtype
alpha = make_constant_slot(context, builder, dtype, 1.0)
beta = make_constant_slot(context, builder, dtype, 0.0)
if m_type.layout == 'F':
m, n = m_shapes
lda = m_shapes[0]
else:
n, m = m_shapes
lda = m_shapes[1]
kind = get_blas_kind(dtype)
kind_val = ir.Constant(ll_char, ord(kind))
trans = ir.Constant(ll_char, ord('t') if do_trans else ord('n'))
res = builder.call(fn, (kind_val, trans, m, n,
builder.bitcast(alpha, ll_void_p),
builder.bitcast(m_data, ll_void_p), lda,
builder.bitcast(v_data, ll_void_p),
builder.bitcast(beta, ll_void_p),
builder.bitcast(out_data, ll_void_p)))
check_blas_return(context, builder, res)
def call_xxgemm(context, builder,
x_type, x_shapes, x_data,
y_type, y_shapes, y_data,
out_type, out_shapes, out_data):
"""
Call the BLAS matrix * matrix product function for the given arguments.
"""
fnty = ir.FunctionType(ir.IntType(32),
[ll_char, # kind
ll_char, ll_char, # transa, transb
intp_t, intp_t, intp_t, # m, n, k
ll_void_p, ll_void_p, intp_t, # alpha, a, lda
ll_void_p, intp_t, ll_void_p, # b, ldb, beta
ll_void_p, intp_t, # c, ldc
])
fn = builder.module.get_or_insert_function(fnty, name="numba_xxgemm")
m, k = x_shapes
_k, n = y_shapes
dtype = x_type.dtype
alpha = make_constant_slot(context, builder, dtype, 1.0)
beta = make_constant_slot(context, builder, dtype, 0.0)
trans = ir.Constant(ll_char, ord('t'))
notrans = ir.Constant(ll_char, ord('n'))
def get_array_param(ty, shapes, data):
return (
# Transpose if layout different from result's
notrans if ty.layout == out_type.layout else trans,
# Size of the inner dimension in physical array order
shapes[1] if ty.layout == 'C' else shapes[0],
# The data pointer, unit-less
builder.bitcast(data, ll_void_p),
)
transa, lda, data_a = get_array_param(y_type, y_shapes, y_data)
transb, ldb, data_b = get_array_param(x_type, x_shapes, x_data)
_, ldc, data_c = get_array_param(out_type, out_shapes, out_data)
kind = get_blas_kind(dtype)
kind_val = ir.Constant(ll_char, ord(kind))
res = builder.call(fn, (kind_val, transa, transb, n, m, k,
builder.bitcast(alpha, ll_void_p), data_a, lda,
data_b, ldb, builder.bitcast(beta, ll_void_p),
data_c, ldc))
check_blas_return(context, builder, res)
def dot_2_mm(context, builder, sig, args):
"""
np.dot(matrix, matrix)
"""
def dot_impl(a, b):
m, k = a.shape
_k, n = b.shape
if k == 0:
return np.zeros((m, n), a.dtype)
out = np.empty((m, n), a.dtype)
return np.dot(a, b, out)
res = context.compile_internal(builder, dot_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
def dot_2_vm(context, builder, sig, args):
"""
np.dot(vector, matrix)
"""
def dot_impl(a, b):
m, = a.shape
_m, n = b.shape
if m == 0:
return np.zeros((n, ), a.dtype)
out = np.empty((n, ), a.dtype)
return np.dot(a, b, out)
res = context.compile_internal(builder, dot_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
def dot_2_mv(context, builder, sig, args):
"""
np.dot(matrix, vector)
"""
def dot_impl(a, b):
m, n = a.shape
_n, = b.shape
if n == 0:
return np.zeros((m, ), a.dtype)
out = np.empty((m, ), a.dtype)
return np.dot(a, b, out)
res = context.compile_internal(builder, dot_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
def dot_2_vv(context, builder, sig, args, conjugate=False):
"""
np.dot(vector, vector)
np.vdot(vector, vector)
"""
aty, bty = sig.args
dtype = sig.return_type
a = make_array(aty)(context, builder, args[0])
b = make_array(bty)(context, builder, args[1])
n, = cgutils.unpack_tuple(builder, a.shape)
def check_args(a, b):
m, = a.shape
n, = b.shape
if m != n:
raise ValueError("incompatible array sizes for np.dot(a, b) "
"(vector * vector)")
context.compile_internal(builder, check_args,
signature(types.none, *sig.args), args)
check_c_int(context, builder, n)
out = cgutils.alloca_once(builder, context.get_value_type(dtype))
call_xxdot(context, builder, conjugate, dtype, n, a.data, b.data, out)
return builder.load(out)
@lower_builtin(np.dot, types.Array, types.Array)
def dot_2(context, builder, sig, args):
"""
np.dot(a, b)
a @ b
"""
ensure_blas()
with make_contiguous(context, builder, sig, args) as (sig, args):
ndims = [x.ndim for x in sig.args[:2]]
if ndims == [2, 2]:
return dot_2_mm(context, builder, sig, args)
elif ndims == [2, 1]:
return dot_2_mv(context, builder, sig, args)
elif ndims == [1, 2]:
return dot_2_vm(context, builder, sig, args)
elif ndims == [1, 1]:
return dot_2_vv(context, builder, sig, args)
else:
assert 0
lower_builtin(operator.matmul, types.Array, types.Array)(dot_2)
@lower_builtin(np.vdot, types.Array, types.Array)
def vdot(context, builder, sig, args):
"""
np.vdot(a, b)
"""
ensure_blas()
with make_contiguous(context, builder, sig, args) as (sig, args):
return dot_2_vv(context, builder, sig, args, conjugate=True)
def dot_3_vm_check_args(a, b, out):
m, = a.shape
_m, n = b.shape
if m != _m:
raise ValueError("incompatible array sizes for "
"np.dot(a, b) (vector * matrix)")
if out.shape != (n,):
raise ValueError("incompatible output array size for "
"np.dot(a, b, out) (vector * matrix)")
def dot_3_mv_check_args(a, b, out):
m, _n = a.shape
n, = b.shape
if n != _n:
raise ValueError("incompatible array sizes for np.dot(a, b) "
"(matrix * vector)")
if out.shape != (m,):
raise ValueError("incompatible output array size for "
"np.dot(a, b, out) (matrix * vector)")
def dot_3_vm(context, builder, sig, args):
"""
np.dot(vector, matrix, out)
np.dot(matrix, vector, out)
"""
xty, yty, outty = sig.args
assert outty == sig.return_type
dtype = xty.dtype
x = make_array(xty)(context, builder, args[0])
y = make_array(yty)(context, builder, args[1])
out = make_array(outty)(context, builder, args[2])
x_shapes = cgutils.unpack_tuple(builder, x.shape)
y_shapes = cgutils.unpack_tuple(builder, y.shape)
out_shapes = cgutils.unpack_tuple(builder, out.shape)
if xty.ndim < yty.ndim:
# Vector * matrix
# Asked for x * y, we will compute y.T * x
mty = yty
m_shapes = y_shapes
v_shape = x_shapes[0]
lda = m_shapes[1]
do_trans = yty.layout == 'F'
m_data, v_data = y.data, x.data
check_args = dot_3_vm_check_args
else:
# Matrix * vector
# We will compute x * y
mty = xty
m_shapes = x_shapes
v_shape = y_shapes[0]
lda = m_shapes[0]
do_trans = xty.layout == 'C'
m_data, v_data = x.data, y.data
check_args = dot_3_mv_check_args
context.compile_internal(builder, check_args,
signature(types.none, *sig.args), args)
for val in m_shapes:
check_c_int(context, builder, val)
zero = context.get_constant(types.intp, 0)
both_empty = builder.icmp_signed('==', v_shape, zero)
matrix_empty = builder.icmp_signed('==', lda, zero)
is_empty = builder.or_(both_empty, matrix_empty)
with builder.if_else(is_empty, likely=False) as (empty, nonempty):
with empty:
cgutils.memset(builder, out.data,
builder.mul(out.itemsize, out.nitems), 0)
with nonempty:
call_xxgemv(context, builder, do_trans, mty, m_shapes, m_data,
v_data, out.data)
return impl_ret_borrowed(context, builder, sig.return_type,
out._getvalue())
def dot_3_mm(context, builder, sig, args):
"""
np.dot(matrix, matrix, out)
"""
xty, yty, outty = sig.args
assert outty == sig.return_type
dtype = xty.dtype
x = make_array(xty)(context, builder, args[0])
y = make_array(yty)(context, builder, args[1])
out = make_array(outty)(context, builder, args[2])
x_shapes = cgutils.unpack_tuple(builder, x.shape)
y_shapes = cgutils.unpack_tuple(builder, y.shape)
out_shapes = cgutils.unpack_tuple(builder, out.shape)
m, k = x_shapes
_k, n = y_shapes
# The only case Numpy supports
assert outty.layout == 'C'
def check_args(a, b, out):
m, k = a.shape
_k, n = b.shape
if k != _k:
raise ValueError("incompatible array sizes for np.dot(a, b) "
"(matrix * matrix)")
if out.shape != (m, n):
raise ValueError("incompatible output array size for "
"np.dot(a, b, out) (matrix * matrix)")
context.compile_internal(builder, check_args,
signature(types.none, *sig.args), args)
check_c_int(context, builder, m)
check_c_int(context, builder, k)
check_c_int(context, builder, n)
x_data = x.data
y_data = y.data
out_data = out.data
# If eliminated dimension is zero, set all entries to zero and return
zero = context.get_constant(types.intp, 0)
both_empty = builder.icmp_signed('==', k, zero)
x_empty = builder.icmp_signed('==', m, zero)
y_empty = builder.icmp_signed('==', n, zero)
is_empty = builder.or_(both_empty, builder.or_(x_empty, y_empty))
with builder.if_else(is_empty, likely=False) as (empty, nonempty):
with empty:
cgutils.memset(builder, out.data,
builder.mul(out.itemsize, out.nitems), 0)
with nonempty:
# Check if any of the operands is really a 1-d vector represented
# as a (1, k) or (k, 1) 2-d array. In those cases, it is pessimal
# to call the generic matrix * matrix product BLAS function.
one = context.get_constant(types.intp, 1)
is_left_vec = builder.icmp_signed('==', m, one)
is_right_vec = builder.icmp_signed('==', n, one)
with builder.if_else(is_right_vec) as (r_vec, r_mat):
with r_vec:
with builder.if_else(is_left_vec) as (v_v, m_v):
with v_v:
# V * V
call_xxdot(context, builder, False, dtype,
k, x_data, y_data, out_data)
with m_v:
# M * V
do_trans = xty.layout == outty.layout
call_xxgemv(context, builder, do_trans,
xty, x_shapes, x_data, y_data, out_data)
with r_mat:
with builder.if_else(is_left_vec) as (v_m, m_m):
with v_m:
# V * M
do_trans = yty.layout != outty.layout
call_xxgemv(context, builder, do_trans,
yty, y_shapes, y_data, x_data, out_data)
with m_m:
# M * M
call_xxgemm(context, builder,
xty, x_shapes, x_data,
yty, y_shapes, y_data,
outty, out_shapes, out_data)
return impl_ret_borrowed(context, builder, sig.return_type,
out._getvalue())
@lower_builtin(np.dot, types.Array, types.Array,
types.Array)
def dot_3(context, builder, sig, args):
"""
np.dot(a, b, out)
"""
ensure_blas()
with make_contiguous(context, builder, sig, args) as (sig, args):
ndims = set(x.ndim for x in sig.args[:2])
if ndims == set([2]):
return dot_3_mm(context, builder, sig, args)
elif ndims == set([1, 2]):
return dot_3_vm(context, builder, sig, args)
else:
assert 0
fatal_error_sig = types.intc()
fatal_error_func = types.ExternalFunction("numba_fatal_error", fatal_error_sig)
@register_jitable
def _check_finite_matrix(a):
for v in np.nditer(a):
if not np.isfinite(v.item()):
raise np.linalg.LinAlgError(
"Array must not contain infs or NaNs.")
def _check_linalg_matrix(a, func_name, la_prefix=True):
# la_prefix is present as some functions, e.g. np.trace()
# are documented under "linear algebra" but aren't in the
# module
prefix = "np.linalg" if la_prefix else "np"
interp = (prefix, func_name)
# Unpack optional type
if isinstance(a, types.Optional):
a = a.type
if not isinstance(a, types.Array):
msg = "%s.%s() only supported for array types" % interp
raise TypingError(msg, highlighting=False)
if not a.ndim == 2:
msg = "%s.%s() only supported on 2-D arrays." % interp
raise TypingError(msg, highlighting=False)
if not isinstance(a.dtype, (types.Float, types.Complex)):
msg = "%s.%s() only supported on "\
"float and complex arrays." % interp
raise TypingError(msg, highlighting=False)
def _check_homogeneous_types(func_name, *types):
t0 = types[0].dtype
for t in types[1:]:
if t.dtype != t0:
msg = "np.linalg.%s() only supports inputs that have homogeneous dtypes." % func_name
raise TypingError(msg, highlighting=False)
def _copy_to_fortran_order():
pass
@overload(_copy_to_fortran_order)
def ol_copy_to_fortran_order(a):
# This function copies the array 'a' into a new array with fortran order.
# This exists because the copy routines don't take order flags yet.
F_layout = a.layout == 'F'
A_layout = a.layout == 'A'
def impl(a):
if F_layout:
# it's F ordered at compile time, just copy
acpy = np.copy(a)
elif A_layout:
# decide based on runtime value
flag_f = a.flags.f_contiguous
if flag_f:
# it's already F ordered, so copy but in a round about way to
# ensure that the copy is also F ordered
acpy = np.copy(a.T).T
else:
# it's something else ordered, so let asfortranarray deal with
# copying and making it fortran ordered
acpy = np.asfortranarray(a)
else:
# it's C ordered at compile time, asfortranarray it.
acpy = np.asfortranarray(a)
return acpy
return impl
@register_jitable
def _inv_err_handler(r):
if r != 0:
if r < 0:
fatal_error_func()
assert 0 # unreachable
if r > 0:
raise np.linalg.LinAlgError(
"Matrix is singular to machine precision.")
@register_jitable
def _dummy_liveness_func(a):
"""pass a list of variables to be preserved through dead code elimination"""
return a[0]
@overload(np.linalg.inv)
def inv_impl(a):
ensure_lapack()
_check_linalg_matrix(a, "inv")
numba_xxgetrf = _LAPACK().numba_xxgetrf(a.dtype)
numba_xxgetri = _LAPACK().numba_ez_xxgetri(a.dtype)
kind = ord(get_blas_kind(a.dtype, "inv"))
def inv_impl(a):
n = a.shape[-1]
if a.shape[-2] != n:
msg = "Last 2 dimensions of the array must be square."
raise
|
np.linalg.LinAlgError(msg)
|
numpy.linalg.LinAlgError
|
"""
Implementation of SVM using cvxopt package. Implementation uses
soft margin and I've defined linear, polynomial and gaussian kernels.
To understand the theory (which is a bit challenging) I recommend reading the following:
http://cs229.stanford.edu/notes/cs229-notes3.pdf
https://www.youtube.com/playlist?list=PLoROMvodv4rMiGQp3WXShtMGgzqpfVfbU (Lectures 6,7 by Andrew Ng)
To understand how to reformulate the optimization problem we obtain
to get the input to cvxopt QP solver this blogpost can be useful:
https://xavierbourretsicotte.github.io/SVM_implementation.html
Programmed by <NAME> <aladdin.persson at hotmail dot com>
* 2020-04-26 Initial coding
"""
import numpy as np
import cvxopt
from utils import create_dataset, plot_contour
def linear(x, z):
return np.dot(x, z.T)
def polynomial(x, z, p=5):
return (1 + np.dot(x, z.T)) ** p
def gaussian(x, z, sigma=0.1):
return np.exp(-np.linalg.norm(x - z, axis=1) ** 2 / (2 * (sigma ** 2)))
class SVM:
def __init__(self, kernel=gaussian, C=1):
self.kernel = kernel
self.C = C
def fit(self, X, y):
self.y = y
self.X = X
m, n = X.shape
# Calculate Kernel
self.K = np.zeros((m, m))
for i in range(m):
self.K[i, :] = self.kernel(X[i, np.newaxis], self.X)
# Solve with cvxopt final QP needs to be reformulated
# to match the input form for cvxopt.solvers.qp
P = cvxopt.matrix(np.outer(y, y) * self.K)
q = cvxopt.matrix(-np.ones((m, 1)))
G = cvxopt.matrix(np.vstack((np.eye(m) * -1, np.eye(m))))
h = cvxopt.matrix(np.hstack((np.zeros(m), np.ones(m) * self.C)))
A = cvxopt.matrix(y, (1, m), "d")
b = cvxopt.matrix(np.zeros(1))
cvxopt.solvers.options["show_progress"] = False
sol = cvxopt.solvers.qp(P, q, G, h, A, b)
self.alphas =
|
np.array(sol["x"])
|
numpy.array
|
import sys
import matplotlib.pyplot as plt
import numpy as np
import math
from scipy import stats
import matplotlib.patches as patches
import matplotlib.transforms as transforms
from .point import Point
from .vector import Vector
from .util import rotate
class Ellipse():
def __init__(self, mu, sigma, ci=0.95, color='#4ca3dd'):
self.mu = mu
self.sigma = sigma
self.ci = ci
self.color = color
self.chi2 = None
self.set_chisquare()
self.set_axes()
self.set_vectors()
def __str__(self):
return "Ellipse(%s,%s)" % (self.mu, self.sigma)
def __repr__(self):
return "Ellipse(%s,%s)" % (self.mu, self.sigma)
def set_chisquare(self):
"""
Upper-tail critical values of chi-square distribution with 2 degrees of freedom
"""
if self.ci == 0.90:
self.chi2 = 4.605
elif self.ci == 0.95:
self.chi2 = 5.991
elif self.ci == 0.975:
self.chi2 = 7.378
else:
self.chi2 = 5.991
def set_axes(self):
"""
Set the minor, major axes as well as the alpha angle
Also set the eigenvalues and eigenvectors of the covariance matrix
https://www.math.ubc.ca/~pwalls/math-python/linear-algebra/eigenvalues-eigenvectors/
"""
# Eigeinvalues and corresponding eigenvectors
# of the covariance matrix
eig_val, eig_vec = np.linalg.eig(self.sigma)
# Sort the eigenvalues by descending order
self.eigs = [(np.abs(eig_val[i]), eig_vec[:,i]) for i in range(len(eig_val))]
self.eigs.sort(key=lambda x: x[0], reverse=True)
# semi-major and semi-minor axes length
self.semi_major = math.sqrt(self.eigs[0][0]*self.chi2)
self.semi_minor = math.sqrt(self.eigs[1][0]*self.chi2)
# Get the eigenvector associated to the largest eigenvalue
vec = self.eigs[0][1]
# The ellipse orientation is the arctan of that vector y/x
self.alpha =
|
np.arctan(vec[1]/vec[0])
|
numpy.arctan
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on January 10, 2019
@author: talbpaul
Container to handle ROMs that are made of many sub-roms
"""
# standard libraries
from __future__ import division, print_function, absolute_import
import copy
import warnings
from collections import defaultdict
# external libraries
import abc
import numpy as np
# internal libraries
from utils import mathUtils, xmlUtils, randomUtils
from .SupervisedLearning import supervisedLearning
warnings.simplefilter('default', DeprecationWarning)
#
#
#
#
class Collection(supervisedLearning):
"""
A container that handles collections of ROMs in a particular way.
"""
def __init__(self, messageHandler, **kwargs):
"""
Constructor.
@ In, messageHandler, MesageHandler.MessageHandler, message tracker
@ In, kwargs, dict, options and initialization settings (from XML)
@ Out, None
"""
supervisedLearning.__init__(self, messageHandler, **kwargs)
self.printTag = 'ROM Collection' # message printing appearance
self._romName = kwargs.get('name', 'unnamed') # name of the requested ROM
self._templateROM = kwargs['modelInstance'] # example of a ROM that will be used in this grouping, set by setTemplateROM
self._roms = [] # ROMs that belong to this grouping.
def __getstate__(self):
"""
Customizes the serialization of this class.
@ In, None
@ Out, d, dict, dictionary with class members
"""
# construct a list of unpicklable entties and exclude them from pickling
nope = ['_divisionClassifier', '_assembledObjects']
d = dict((key, val) for key, val in self.__dict__.items() if key not in nope) # deepcopy needed
return d
@abc.abstractmethod
def train(self, tdict):
"""
Trains the SVL and its supporting SVLs. Overwrites base class behavior due to special clustering needs.
@ In, trainDict, dict, dicitonary with training data
@ Out, None
"""
pass
@abc.abstractmethod
def evaluate(self, edict):
"""
Method to evaluate a point or set of points via surrogate.
Overwritten for special needs in this ROM
@ In, edict, dict, evaluation dictionary
@ Out, evaluate, np.array, evaluated points
"""
pass
# dummy methods that are required by SVL and not generally used
def __confidenceLocal__(self, featureVals):
"""
This should return an estimation of the quality of the prediction.
This could be distance or probability or anything else, the type needs to be declared in the variable cls.qualityEstType
@ In, featureVals, 2-D numpy array , [n_samples,n_features]
@ Out, __confidenceLocal__, float, the confidence
"""
pass
def __resetLocal__(self):
"""
Reset ROM. After this method the ROM should be described only by the initial parameter settings
@ In, None
@ Out, None
"""
pass
def __returnCurrentSettingLocal__(self):
"""
Returns a dictionary with the parameters and their current values
@ In, None
@ Out, params, dict, dictionary of parameter names and current values
"""
return {}
def __returnInitialParametersLocal__(self):
"""
Returns a dictionary with the parameters and their initial values
@ In, None
@ Out, params, dict, dictionary of parameter names and initial values
"""
return {}
# Are private-ish so should not be called directly, so we don't implement them, as they don't fit the collection.
def __evaluateLocal__(self, featureVals):
"""
@ In, featureVals, np.array, 2-D numpy array [n_samples,n_features]
@ Out, targetVals , np.array, 1-D numpy array [n_samples]
"""
pass
def __trainLocal__(self, featureVals, targetVals):
"""
Perform training on samples in featureVals with responses y.
For an one-class model, +1 or -1 is returned.
@ In, featureVals, {array-like, sparse matrix}, shape=[n_samples, n_features],
an array of input feature values
@ Out, targetVals, array, shape = [n_samples], an array of output target
associated with the corresponding points in featureVals
"""
pass
#
#
#
#
class Segments(Collection):
"""
A container that handles ROMs that are segmented along some set of indices
"""
########################
# CONSTRUCTION METHODS #
########################
def __init__(self, messageHandler, **kwargs):
"""
Constructor.
@ In, messageHandler, MesageHandler.MessageHandler, message tracker
@ In, kwargs, dict, options and initialization settings (from XML)
@ Out, None
"""
Collection.__init__(self, messageHandler, **kwargs)
self.printTag = 'Segmented ROM'
self._divisionInstructions = {} # which parameters are clustered, and how, and how much?
self._divisionMetrics = None # requested metrics to apply; if None, implies everything we know about
self._divisionInfo = {} # data that should persist across methods
self._divisionPivotShift = {} # whether and how to normalize/shift subspaces
self._indexValues = {} # original index values, by index
# allow some ROM training to happen globally, seperate from individual segment training
## see design note for Clusters
self._romGlobalAdjustments = None # global ROM settings, provided by the templateROM before clustering
# set up segmentation
# get input specifications from inputParams
inputSpecs = kwargs['paramInput'].findFirst('Segment')
# initialize settings
divisionMode = {}
for node in inputSpecs.subparts:
if node.name == 'subspace':
subspace = node.value
# check for duplicate definition
if subspace in divisionMode.keys():
self.raiseAWarning('Subspace was defined multiple times for "{}"! Using the first.'.format(subspace))
continue
# check correct arguments are given
if 'divisions' in node.parameterValues and 'pivotLength' in node.parameterValues:
self.raiseAnError(IOError, 'Cannot provide both \'pivotLength\' and \'divisions\' for subspace "{}"!'.format(subspace))
if 'divisions' not in node.parameterValues and 'pivotLength' not in node.parameterValues:
self.raiseAnError(IOError, 'Must provide either \'pivotLength\' or \'divisions\' for subspace "{}"!'.format(subspace))
# determine segmentation type
if 'divisions' in node.parameterValues:
# splitting a particular number of times (or "divisions")
mode = 'split'
key = 'divisions'
elif 'pivotLength' in node.parameterValues:
# splitting by pivot parameter values
mode = 'value'
key = 'pivotLength'
divisionMode[subspace] = (mode, node.parameterValues[key])
# standardize pivot param?
if 'shift' in node.parameterValues:
shift = node.parameterValues['shift'].strip().lower()
# check value given makes sense (either "zero" or "first")
acceptable = ['zero', 'first']
if shift not in [None] + acceptable:
self.raiseAnError(IOError, 'If <subspace> "shift" is specificed, it must be one of {}; got "{}".'.format(acceptable, shift))
self._divisionPivotShift[subspace] = shift
else:
self._divisionPivotShift[subspace] = None
self._divisionInstructions = divisionMode
if len(self._divisionInstructions) > 1:
self.raiseAnError(NotImplementedError, 'Segmented ROMs do not yet handle multiple subspaces!')
###############
# RUN METHODS #
###############
def train(self, tdict):
"""
Trains the SVL and its supporting SVLs. Overwrites base class behavior due to special clustering needs.
@ In, trainDict, dict, dicitonary with training data
@ Out, None
"""
# read in assembled objects, if any
self.readAssembledObjects()
# subdivide space
divisions = self._subdivideDomain(self._divisionInstructions, tdict)
self._divisionInfo['delimiters'] = divisions[0] + divisions[1]
# allow ROM to handle some global training
self._romGlobalAdjustments, newTrainingDict = self._templateROM.getGlobalRomSegmentSettings(tdict, divisions)
# train segments
self._trainBySegments(divisions, newTrainingDict)
self.amITrained = True
self._templateROM.amITrained = True
def evaluate(self, edict):
"""
Method to evaluate a point or set of points via surrogate.
Overwritten for special needs in this ROM
@ In, edict, dict, evaluation dictionary
@ Out, result, np.array, evaluated points
"""
result = self._evaluateBySegments(edict)
# allow each segment ROM to modify signal based on global training settings
for s, segment in enumerate(self._getSequentialRoms()):
delim = self._divisionInfo['delimiters'][s]
picker = slice(delim[0], delim[-1] + 1)
result = segment.finalizeLocalRomSegmentEvaluation(self._romGlobalAdjustments, result, picker)
result = self._templateROM.finalizeGlobalRomSegmentEvaluation(self._romGlobalAdjustments, result)
return result
def writePointwiseData(self, writeTo):
"""
Writes pointwise data about this ROM to the data object.
@ In, writeTo, DataObject, data structure into which data should be written
@ Out, None
"""
rlz = self._writeSegmentsRealization(writeTo)
writeTo.addRealization(rlz)
def writeXML(self, writeTo, targets=None, skip=None):
"""
Write out ARMA information
@ In, writeTo, xmlUtils.StaticXmlElement, entity to write to
@ In, targets, list, optional, unused
@ In, skip, list, optional, unused
@ Out, None
"""
# write global information
newNode = xmlUtils.StaticXmlElement('GlobalROM', attrib={'segment':'all'})
self._templateROM.writeXML(newNode, targets, skip)
writeTo.getRoot().append(newNode.getRoot())
# write subrom information
for i, rom in enumerate(self._roms):
newNode = xmlUtils.StaticXmlElement('SegmentROM', attrib={'segment':str(i)})
rom.writeXML(newNode, targets, skip)
writeTo.getRoot().append(newNode.getRoot())
###################
# UTILITY METHODS #
###################
def _evaluateBySegments(self, evaluationDict):
"""
Evaluate ROM by evaluating its segments
@ In, evaluationDict, dict, realization to evaluate
@ Out, result, dict, dictionary of results
"""
# TODO assuming only subspace is pivot param
pivotID = self._templateROM.pivotParameterID
lastEntry = self._divisionInfo['historyLength']
result = None # we don't know the targets yet, so wait until we get the first evaluation to set this up
nextEntry = 0 # index to fill next data set into
self.raiseADebug('Sampling from {} segments ...'.format(len(self._roms)))
roms = self._getSequentialRoms()
for r, rom in enumerate(roms):
self.raiseADebug('Evaluating ROM segment', r)
subResults = rom.evaluate(evaluationDict)
# NOTE the pivot values for subResults will be wrong (shifted) if shifting is used in training
## however, we will set the pivotID values all at once after all results are gathered, so it's okay.
# build "results" structure if not already done -> easier to do once we gather the first sample
if result is None:
# TODO would this be better stored as a numpy array instead?
result = dict((target, np.zeros(lastEntry)) for target in subResults.keys())
# place subresult into overall result # TODO this assumes consistent history length! True for ARMA at least.
entries = len(list(subResults.values())[0])
# There's a problem here, if using Clustering; the residual shorter-length element at the end might be represented
# by a ROM that expects to deliver the full signal. TODO this should be handled in a better way,
# but for now we can truncate the signal to the length needed
for target, values in subResults.items():
# skip the pivotID
if target == pivotID:
continue
if len(result[target][nextEntry:]) < len(values):
result[target][nextEntry:] = values[:len(result[target][nextEntry:])]
else:
result[target][nextEntry:nextEntry + entries] = values
# update next subdomain storage location
nextEntry += entries
# place pivot values
result[pivotID] = self._indexValues[pivotID]
return result
def _getSequentialRoms(self):
"""
Returns ROMs in sequential order. Trivial for Segmented.
@ In, None
@ Out, list, list of ROMs in order (pointer, not copy)
"""
return self._roms
def _subdivideDomain(self, divisionInstructions, trainingSet):
"""
Creates markers for subdividing the pivot parameter domain, either based on number of subdivisions
or on requested pivotValue lengths.
@ In, divisionInstructions, dict, dictionary of inputs/indices to cluster on mapped to either
number of subdivisions to make or length of the pivot value segments to include
@ In, trainingSet, dict or list, data used to train the ROM; if a list is provided a temporal ROM is generated.
@ Out, counter, list(tuple), indices that belong to each division; at minimum (first index, last index)
@ Out, unclustered, list(tuple), as "counter" but for segments that will not be clustered
"""
unclustered = []
# division instructions are as {subspace: (mode, value)}
## where "value" is the number of segments in "split" mode
## or the length of pivot values per segment in "value" mode
self.raiseADebug('Training segmented subspaces for "{}" ...'.format(self._romName))
for subspace, (mode, value) in divisionInstructions.items():
dataLen = len(trainingSet[subspace][0]) # TODO assumes syncronized histories, or single history
self._divisionInfo['historyLength'] = dataLen # TODO assumes single pivotParameter
if mode == 'split':
numSegments = value # renamed for clarity
# divide the subspace into equally-sized segments, store the indexes for each segment
counter = np.array_split(np.arange(dataLen), numSegments)
# only store bounds, not all indices in between -> note that this is INCLUSIVE!
counter = list((c[0], c[-1]) for c in counter)
# Note that "segmented" doesn't have "unclustered" since chunks are evenly sized
elif mode == 'value':
segmentValue = value # renamed for clarity
# divide the subspace into segments with roughly the same pivot length (e.g. time length)
pivot = trainingSet[subspace][0]
# find where the data passes the requested length, and make dividers
floor = 0 # where does this pivot segment start?
nextOne = segmentValue # how high should this pivot segment go?
counter = []
# TODO speedup; can we do this without looping?
while pivot[floor] < pivot[-1]:
cross = np.searchsorted(pivot, nextOne)
# if the next crossing point is past the end, put the remainder piece
## into the "unclustered" grouping, since it might be very oddly sized
## and throw off segmentation (specifically for clustering)
if cross == len(pivot):
unclustered.append((floor, cross - 1))
break
# add this segment, only really need to know the first and last index (inclusive)
counter.append((floor, cross - 1)) # Note: indices are INCLUSIVE
# update search parameters
floor = cross
nextOne += segmentValue
self.raiseADebug('Dividing {:^20s} into {:^5d} divisions for training ...'.format(subspace, len(counter) + len(unclustered)))
# return the counter indicies as well as any odd-piece-out parts
return counter, unclustered
def _trainBySegments(self, divisions, trainingSet):
"""
Train ROM by training many ROMs depending on the input/index space clustering.
@ In, divisions, tuple, (division slice indices, unclustered spaces)
@ In, trainingSet, dict or list, data used to train the ROM; if a list is provided a temporal ROM is generated.
@ Out, None
"""
# train the subdomain ROMs
counter, remainder = divisions
roms = self._trainSubdomainROMs(self._templateROM, counter, trainingSet, self._romGlobalAdjustments)
# if there were leftover domain segments that didn't go with the rest, train those now
if remainder:
unclusteredROMs = self._trainSubdomainROMs(self._templateROM, remainder, trainingSet, self._romGlobalAdjustments)
roms = np.hstack([roms, unclusteredROMs])
self._roms = roms
def _trainSubdomainROMs(self, templateROM, counter, trainingSet, romGlobalAdjustments):
"""
Trains the ROMs on each clusterable subdomain
@ In, templateROM, SupervisedLEarning.supervisedLearning instance, template ROM
@ In, counter, list(tuple), instructions for dividing subspace into subdomains
@ In, trainingSet, dict, data on which ROMs should be trained
@ In, romGlobalAdjustments, object, arbitrary container created by ROMs and passed to ROM training
@ Out, roms, np.array(supervisedLearning), trained ROMs for each subdomain
"""
targets = templateROM.target[:]
# clear indices from teh training list, since they're independents
# TODO assumes pivotParameter is the only subspace being divided
pivotID = templateROM.pivotParameterID
targets.remove(pivotID)
# stash pivot values, since those will break up while training segments
# TODO assumes only pivot param
if pivotID not in self._indexValues:
self._indexValues[pivotID] = trainingSet[pivotID][0]
# loop over clusters and train data
roms = []
for i, subdiv in enumerate(counter):
# slicer for data selection
picker = slice(subdiv[0], subdiv[-1] + 1)
## TODO we need to be slicing all the data, not just one realization, once we support non-ARMA segmentation.
data = dict((var, [copy.deepcopy(trainingSet[var][0][picker])]) for var in trainingSet)
# renormalize the pivot if requested, e.g. by shifting values
norm = self._divisionPivotShift[pivotID]
if norm:
if norm == 'zero':
# left-shift pivot so subspace starts at 0 each time
delta = data[pivotID][0][0]
elif norm == 'first':
# left-shift so that first entry is equal to pivot's first value (maybe not zero)
delta = data[pivotID][0][0] - trainingSet[pivotID][0][0]
data[pivotID][0] -= delta
# create a new ROM and train it!
newROM = copy.deepcopy(templateROM)
newROM.name = '{}_seg{}'.format(self._romName, i)
newROM.adjustLocalRomSegment(self._romGlobalAdjustments)
self.raiseADebug('Training segment', i, picker)
newROM.train(data)
roms.append(newROM)
# format array for future use
roms = np.array(roms)
return roms
def _writeSegmentsRealization(self, writeTo):
"""
Writes pointwise data about segmentation to a realization. Won't actually add rlz to D.O.,
but will register names to it.
@ In, writeTo, DataObject, data structure into which data should be written
@ Out, None
"""
pivotID = self._templateROM.pivotParameterID
pivot = self._indexValues[pivotID]
# realization to add eventually
rlz = {}
segmentNames = range(len(self._divisionInfo['delimiters']))
# pivot for all this stuff is the segment number
rlz['segment_number'] = np.asarray(segmentNames)
# start indices
varName = 'seg_index_start'
writeTo.addVariable(varName,
|
np.array([])
|
numpy.array
|
from matplotlib import pyplot as plt
import numpy as np
# Compute the bit-reverse of an N-bit input
# (ARM RBIT instruction may be useful here?)
def bit_reverse(input, N):
result = input & 1
for _ in range(N - 1):
result <<= 1
input >>= 1
result |= (input & 1)
return result
# Bit-reverse shuffle an array of 2^N entries in place.
def bit_reverse_shuffle(samples, N):
assert len(samples) == 2**N, f'bad array length, {len(samples)} != {2**N} (2^{N})'
for i in range(2**N):
j = bit_reverse(i, N)
if i < j:
t = samples[i]
samples[i] = samples[j]
samples[j] = t
# Radix-2, time decimation FFT.
# Inputs are real-valued; input length must be 2^N.
#
# very good match to numpy for float64, float32
# float16 works for small but overflows for big.
# int64, int32 work for big but not for small (underflow)
# int16 is bad for big as well (overflow)
#
# TODO: can we do a fixed-point FFT with appropriate scaling?
# TODO: optimize for real-valued FFT being symmetrical?
# TODO: replace bit-reverse shuffle with per-pass strides?
# TODO: small optimizations like calculating angles by addition.
#
def fft(input, N):
length = len(input)
assert length == 2**N, f'bad input length, {length} != {2**N} (2^{N})'
# The radix-2 FFT is a recursive algorithm that breaks a
# DFT of 2^N entries into two DFTs each of 2^(N-1) entries.
# It combines the results of the sub-DFTs using a set of
# 'butterfly' multiply-and-add operations.
#
# In order to operate in place, the recursive traversal
# is actually implemented breadth-first. Conceptually
# we do 2^N 1-entry DFTs (which are noops), then combine
# them into 2^(N-1) 2-entry DFTs, and so on until we have
# one 2^N-entry DFT.
#
# Each recursive step would have split the inputs into even
# and odd entries. We can avoid shuffling between stages by
# shuffling once first.
real = input.copy()
bit_reverse_shuffle(real, N)
# No need to shuffle the imaginary parts, which are all zeros.
imag = np.zeros(len(real), real.dtype)
#for (sub_length = 2; sub_length <= 2**N; sub_length *= 2):
for n in range(1, N + 1):
sub_length = 2**n
# In this pass we are merging smaller DFTs into DFTs
# of length sub_length. This should look like:
# for (base = 0; base < length; base += sub_length):
# for (step = 0; step < sub_length // 2; step++):
# calculate 'twiddle factor' for this step
# merge [base+step] with [base+(sub_length/2)+step]
# but calculating twiddle factors is expensive so
# we invert the inner two loops so we can reuse them.
for step in range(sub_length // 2):
# "twiddle factor" of e^(-2*pi*i*step/sub_length)
twiddle_angle = -2 * np.pi * step / sub_length
twiddle_sin = np.sin(twiddle_angle)
twiddle_cos = np.cos(twiddle_angle)
for base in range(0, length, sub_length):
# Load the two entries that we're going to merge.
A_index = base + step
A_real = real[A_index]
A_imag = imag[A_index]
B_index = A_index + (sub_length // 2)
B_real = real[B_index]
B_imag = imag[B_index]
# Butterfly. This is equivalent to taking
# complex A and B and twiddle T and calculating
# A' = A + TB
# B' = A - TB
TB_real = B_real * twiddle_cos - B_imag * twiddle_sin
TB_imag = B_imag * twiddle_cos + B_real * twiddle_sin
real[A_index] = (A_real + TB_real)
imag[A_index] = (A_imag + TB_imag)
real[B_index] = (A_real - TB_real)
imag[B_index] = (A_imag - TB_imag)
# Return a complex-valued numpy array to match the usual API
return real + 1j * imag
# Size of inputs.
power_of_two = 12
number_of_samples = 2**power_of_two
# Format a 1D array for C.
def c_decl(name, data):
decl = f'static const float {name}[{len(data)}]'
init = '{' + ', '.join(data.astype(str)) + '}'
return f'{decl} = {init};\n'
# Test our code on one set of input data,
# plot the answers and save the test patterns.
def compare(name, data):
reference = np.fft.rfft(data.astype(np.float64))
ours64 = fft(data.astype(np.float64), power_of_two)
ours32 = fft(data.astype(np.float32), power_of_two)
status = 'OK'
# FFT of real-valued inputs should be symmetrical apart from DC.
if not np.allclose(np.conj(ours64[-1:0:-1]), ours64[1:]):
status = 'NOT SYMMETRICAL'
# 64bit should match numpy closely.
if not np.allclose(ours64[:len(ours64) // 2 + 1], reference, 1e-5):
status = 'DOES NOT MATCH NUMPY'
# 32bit should match 64bit within reason.
if not np.allclose(ours64, ours32, 1e-2, 1e-3):
status = '32 DOES NOT MATCH 64'
plt.clf()
plt.plot(
|
np.abs(reference)
|
numpy.abs
|
from operator import ne
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import copy
from scipy.interpolate.fitpack import splint
import py3Dmol
import sympy as sp
#import braketlab.solid_harmonics as solid_harmonics
#import braketlab.hydrogen as hydrogen
import braketlab.basisbank as basisbank
import braketlab.animate as anim
from functools import lru_cache
import warnings
from scipy.interpolate import LinearNDInterpolator
from scipy.interpolate import RegularGridInterpolator
from scipy import integrate
from scipy.stats import multivariate_normal
from scipy.interpolate import interp1d
from sympy.utilities.runtests import split_list
def locate(f):
"""
Determine (numerically) the center and spread of a sympy function
## Returns
position (numpy.array)
standard deviation (numpy.array)
"""
s = get_ordered_symbols(f)
nd = len(s)
fn = sp.lambdify(s, f, "numpy")
xn = np.zeros(nd)
ss = 100 # initial distribution
# qnd norm
n20 = 10000
x0 = np.random.multivariate_normal(xn, np.eye(nd)*ss, n20)
P = multivariate_normal(mean=xn, cov=np.eye(nd)*ss).pdf(x0)
n2 = np.mean(fn(*x0.T).real**2*P**-1, axis = -1)**-1
assert(n2>1e-15), "Unable to normalize function"
n_tot = 0
mean_estimate = 0
for i in range(1,100):
x0 = np.random.multivariate_normal(xn, np.eye(nd)*ss, n20)
P = multivariate_normal(mean=xn, cov=np.eye(nd)*ss).pdf(x0)
n2 = np.mean(fn(*x0.T).real**2*P**-1, axis = -1)**-1
assert(n2>1e-15), "Unable to normalize function"
x_ = np.mean(x0.T*n2*fn(*x0.T).real**2*P**-1, axis = -1)
if i>50:
mean_old = mean_estimate
mean_estimate = (mean_estimate*n_tot + np.sum(x0.T*n2*fn(*x0.T).real**2*P**-1, axis = -1))/(n_tot+n20)
n_tot += n20
xn = x_
# determine spread
n20 *= 100
sig = .5
x0 = np.random.multivariate_normal(xn, np.eye(nd)*sig, n20)
P = multivariate_normal(mean=xn, cov=np.eye(nd)*sig).pdf(x0)
#first estimate of spread
n2 = np.mean(fn(*x0.T).real**2*P**-1, axis = -1)**-1
x_ = np.mean(x0.T**2*n2*fn(*x0.T).real**2*P**-1, axis = -1) - xn.T**2
# ensure positive non-zero standard-deviation
x_ = np.max(np.stack([np.zeros(3)+.0001,x_]), axis = 0 )
i = .5*(2*x_)**-1
sig = (2*i)**-.5
# recompute spread with better precision
x0 = np.random.multivariate_normal(xn, np.diag(sig), n20)
P = multivariate_normal(mean=xn, cov=np.diag(sig)).pdf(x0)
n2 = np.mean(fn(*x0.T).real**2*P**-1, axis = -1)**-1
x_ = np.mean(x0.T**2*n2*fn(*x0.T).real**2*P**-1, axis = -1) - xn.T**2
# ensure positive non-zero standard-deviation
x_ = np.max(np.stack([np.zeros(3)+.0001,x_]), axis = 0 )
i = .5*(2*x_)**-1
sig = (2*i)**-.5
return mean_estimate, sig
def plot(*p):
warnings.warn("replaced by show( ... )", DeprecationWarning, stacklevel=2)
show(*p)
def get_cubefile(p):
Nx = 60
t = np.linspace(-20,20,Nx)
cubic = p(t[:,None,None], t[None,:,None], t[None,None,:])
if cubic.dtype == np.complex128:
cubic = cubic.real
cube = """CUBE FILE.
OUTER LOOP: X, MIDDLE LOOP: Y, INNER LOOP: Z
3 0.000000 0.000000 0.000000
%i 1 0.000000 0.000000
%i 0.000000 1 0.000000
%i 0.000000 0.000000 1
""" % (Nx,Nx,Nx)
for i in range(Nx):
for j in range(Nx):
for k in range(Nx):
cube += "%.4f " % cubic[i,j,k].real
cube += "\n"
#print(cube)
#f = open("cubeworld.cube", "w")
#f.write(cube)
#f.close()
return cube, cubic.mean(), cubic.max(), cubic.min()
def show(*p, t=None):
"""
all-purpose vector visualization
Example usage to show the vectors as an image
a = ket( ... )
b = ket( ... )
show(a, b)
"""
mpfig = False
mv = 1
for i in list(p):
spe = i.get_ket_sympy_expression()
if type(spe) in [np.array, list, np.ndarray]:
# 1d vector
if not mpfig:
mpfig = True
plt.figure(figsize=(6,6))
vec_R2 = i.coefficients[0]*i.basis[0] + i.coefficients[1]*i.basis[1]
plt.plot([0, vec_R2[0]], [0, vec_R2[1]], "-")
plt.plot([vec_R2[0]], [vec_R2[1]], "o", color = (0,0,0))
plt.text(vec_R2[0]+.1, vec_R2[1], "%s" % i.__name__)
mv = max( mv, max(vec_R2[1], vec_R2[0]) )
else:
vars = list(spe.free_symbols)
nd = len(vars)
Nx = 200
x = np.linspace(-8,8,200)
mv = 8
if nd == 1:
if not mpfig:
mpfig = True
plt.figure(figsize=(6,6))
plt.plot(x,i(x) , label=i.__name__)
mpfig = True
if nd == 2:
if not mpfig:
mpfig = True
plt.figure(figsize=(6,6))
plt.contour(x,x,i(x[:, None], x[None,:]))
if nd == 3:
"""
cube, cm, cmax, cmin = get_cubefile(i)
v = py3Dmol.view()
#cm = cube.mean()
offs = cmax*.05
bins = np.linspace(cm-offs,cm+offs, 2)
for i in range(len(bins)):
di = int((255*i/len(bins)))
v.addVolumetricData(cube, "cube", {'isoval':bins[i], 'color': '#%02x%02x%02x' % (255 - di, di, di), 'opacity': 1.0})
v.zoomTo()
v.show()
"""
import k3d
import SimpleITK as sitk
#psi = bk.basisbank.get_hydrogen_function(5,2,2)
#psi = bk.basisbank.get_gto(4,2,0)
x = np.linspace(-1,1,100)*80
img = i(x[None,None,:], x[None,:,None], x[:,None,None])
#Nc = 3
#colormap = interp1d(np.linspace(0,1,Nc), np.random.uniform(0,1,(3, Nc)))
#embryo = k3d.volume(img.astype(np.float32),
# color_map=np.array(k3d.basic_color_maps.BlackBodyRadiation, dtype=np.float32),
# opacity_function = np.linspace(0,1,30)[::-1]**.1)
orb_pos = k3d.volume(img.astype(np.float32),
color_map=np.array(k3d.basic_color_maps.Gold, dtype=np.float32),
opacity_function = np.linspace(0,1,30)[::-1]**.2)
orb_neg = k3d.volume(-1*img.astype(np.float32),
color_map=np.array(k3d.basic_color_maps.Blues, dtype=np.float32),
opacity_function = np.linspace(0,1,30)[::-1]**.2)
plot = k3d.plot()
plot += orb_pos
plot += orb_neg
plot.display()
if mpfig:
plt.grid()
plt.xlim(-mv-1,mv+1)
plt.ylim(-mv-1,mv+1)
plt.legend()
plt.show()
def show_old(*p, t=None):
"""
all-purpose vector visualization
Example usage to show the vectors as an image
a = ket( ... )
b = ket( ... )
plot(a, b)
"""
plt.figure(figsize=(6,6))
try:
Nx = 200
x = np.linspace(-8,8,200)
Z = np.zeros((Nx, Nx, 3), dtype = float)
colors = np.random.uniform(0,1,(len(list(p)), 3))
for i in list(p):
try:
plt.contour(x,x,i(x[:, None], x[None,:]))
except:
plt.plot(x,i(x) , label=i.__name__)
plt.grid()
plt.legend()
#plt.show()
except:
mv = 1
#plt.figure(figsize = (6,6))
for i in list(p):
vec_R2 = i.coefficients[0]*i.basis[0] + i.coefficients[1]*i.basis[1]
plt.plot([0, vec_R2[0]], [0, vec_R2[1]], "-")
plt.plot([vec_R2[0]], [vec_R2[1]], "o", color = (0,0,0))
plt.text(vec_R2[0]+.1, vec_R2[1], "%s" % i.__name__)
mv = max( mv, max(vec_R2[1], vec_R2[0]) )
plt.grid()
plt.xlim(-mv-1,mv+1)
plt.ylim(-mv-1,mv+1)
plt.show()
def construct_basis(p):
"""
Build basis from prism object
"""
basis = []
for atom, pos in zip(p.basis_set, p.atoms):
for shell in atom:
for contracted in shell:
contracted = np.array(contracted)
l = int(contracted[0,2])
a = contracted[:, 0]
w = contracted[:, 1]
for m in range(-l,l+1):
bf = w[0]*get_solid_harmonic_gaussian(a[0],l,m, position = [0,0,0])
for weights in range(1,len(w)):
bf += w[i]*get_solid_harmonic_gaussian(a[i],l,m, position = [0,0,0])
#get_solid_harmonic_gaussian(a,l,m, position = [0,0,0])
basis.append( bf )
return basis
class basisfunction:
"""
# A general class for a basis function in $\mathbb{R}^n$
## Keyword arguments:
| Argument | Description |
| ----------- | ----------- |
| sympy_expression | A sympy expression |
| position | assumed center of basis function (defaults to $\mathbf{0}$ ) |
| name | (unused) |
| domain |if None, the domain is R^n, if [ [x0, x1], [ y0, y1], ... ] , the domain is finite |
## Methods
| Method | Description |
| ----------- | ----------- |
| normalize | Perform numerical normalization of self |
| estimate_decay | Estimate decay of self, used for importance sampling (currently inactive) |
| get_domain(other) | Returns the intersecting domain of two basis functions |
## Example usage:
```
x = sympy.Symbol("x")
x2 = basisfunction(x**2)
x2.normalize()
```
"""
position = None
normalization = 1
domain = None
__name__ = "\chi"
def __init__(self, sympy_expression, position = None, domain = None, name = "\chi"):
self.__name__ = name
self.dimension = len(sympy_expression.free_symbols)
self.position = np.array(position)
if position is None:
self.position = np.zeros(self.dimension, dtype = float)
assert(len(self.position)==self.dimension), "Basis function position contains incorrect number of dimensions (%.i)." % self.dimension
# sympy expressions
self.ket_sympy_expression = translate_sympy_expression(sympy_expression, self.position)
self.bra_sympy_expression = translate_sympy_expression(sp.conjugate(sympy_expression), self.position)
# numeric expressions
symbols = np.array(list(sympy_expression.free_symbols))
l_symbols = np.argsort([i.name for i in symbols])
symbols = symbols[l_symbols]
self.ket_numeric_expression = sp.lambdify(symbols, self.ket_sympy_expression, "numpy")
self.bra_numeric_expression = sp.lambdify(symbols, self.bra_sympy_expression, "numpy")
# decay
self.decay = 1.0
def normalize(self, domain = None):
"""
Set normalization factor $N$ of self ($\chi$) so that $\langle \chi \\vert \chi \\rangle = 1$.
"""
s_12 = inner_product(self, self)
self.normalization = s_12**-.5
def locate(self):
"""
Locate and determine spread of self
"""
self.position, self.decay = locate(self.ket_sympy_expression)
def estimate_decay(self):
# estimate standard deviation
#todo : proper decay estimate (this one is incorrect)
#x = np.random.multivariate_normal(self.position*0, np.eye(len(self.position)), 1e7)
#r2 = np.sum(x**2, axis = 1)
#P = multivariate_normal(mean=self.position*0, cov=np.eye(len(self.position))).pdf(x)
self.decay = 1 #np.mean(self.numeric_expression(*x.T)*r2*P**-1)**.5
def get_domain(self, other = None):
if other is None:
return self.domain
else:
domain = self.domain
if self.domain is not None:
domain = []
for i in range(len(self.domain)):
domain.append([np.array([self.domain[i].min(), other.domain[i].min()]).max(),
np.array([self.domain[i].max(), other.domain[i].max()]).min()])
return domain
def __call__(self, *r):
"""
Evaluate function in coordinates ```*r``` (arbitrary dimensions).
## Returns
The basisfunction $\chi$ evaluated in the coordinates provided in the array(s) ```*r```:
$\int_{\mathbb{R}^n} \delta(\mathbf{r} - \mathbf{r'}) \chi(\mathbf{r'}) d\mathbf{r'}$
"""
return self.normalization*self.ket_numeric_expression(*r)
def __mul__(self, other):
"""
Returns a basisfunction $\chi_{a*b}(\mathbf{r})$, where
$\chi_{a*b}(\mathbf{r}) = \chi_a(\mathbf{r}) \chi_b(\mathbf{r})$
"""
return basisfunction(self.ket_sympy_expression * other.ket_sympy_expression,
position = .5*(self.position + other.position),
domain = self.get_domain(other), name = self.__name__+other.__name__)
def __rmul__(self, other):
return basisfunction(self.ket_sympy_expression * other.ket_sympy_expression,
position = .5*(self.position + other.position),
domain = self.get_domain(other), name = self.__name__+other.__name__)
def __add__(self, other):
"""
Returns a basisfunction $\chi_{a+b}(\mathbf{r})$, where
$\chi_{a+b}(\mathbf{r}) = \chi_a(\mathbf{r}) + \chi_b(\mathbf{r})$
"""
return basisfunction(self.ket_sympy_expression + other.ket_sympy_expression,
position = .5*(self.position + other.position),
domain = self.get_domain(other))
def __sub__(self, other):
"""
Returns a basisfunction $\chi_{a-b}(\mathbf{r})$, where
$\chi_{a-b}(\mathbf{r}) = \chi_a(\mathbf{r}) - \chi_b(\mathbf{r})$
"""
return basisfunction(self.ket_sympy_expression - other.ket_sympy_expression,
position = .5*(self.position + other.position),
domain = self.get_domain(other))
def _repr_html_(self):
"""
Returns a latex-formatted string to display the mathematical expression of the basisfunction.
"""
return "$ %s $" % sp.latex(self.ket_sympy_expression)
#def get_solid_harmonic_gaussian(a,l,m, position = [0,0,0]):
# return basisfunction(solid_harmonics.get_Nao(a,l,m), position = position)
class operator_expression(object):
"""
# A class for algebraic operator manipulations
instantiate with a list of list of operators
## Example
```operator([[a, b], [c,d]], [1,2]]) = 1*ab + 2*cd ```
"""
def __init__(self, ops, coefficients = None):
self.ops = ops
if issubclass(type(ops),operator):
self.ops = [[ops]]
self.coefficients = coefficients
if coefficients is None:
self.coefficients = np.ones(len(self.ops))
def __mul__(self, other):
"""
# Operator multiplication
"""
if type(other) is operator:
new_ops = []
for i in self.ops:
for j in other.ops:
new_ops.append(i+j)
return operator(new_ops).flatten()
else:
return self.apply(other)
def __add__(self, other):
"""
# Operator addition
"""
new_ops = self.ops + other.ops
new_coeffs = self.coefficients + other.coefficients
return operator_expression(new_ops, new_coeffs).flatten()
def __sub__(self, other):
"""
# Operator subtraction
"""
new_ops = self.ops + other.ops
new_coeffs = self.coefficients + [-1*i for i in other.coefficients]
return operator_expression(new_ops, new_coeffs).flatten()
def flatten(self):
"""
# Remove redundant terms
"""
new_ops = []
new_coeffs = []
found = []
for i in range(len(self.ops)):
if i not in found:
new_ops.append(self.ops[i])
new_coeffs.append(1)
for j in range(i+1, len(self.ops)):
if self.ops[i]==self.ops[j]:
print("flatten:", i,j, self.ops[i], self.ops[j])
#self.coefficients[i] += 1
found.append(j)
new_coeffs[-1] += self.coefficients[j]
return operator_expression(new_ops, new_coeffs)
def apply(self, other_ket):
"""
# Apply operator to ket
$\hat{\Omega} \vert a \rangle = \vert a' \rangle $
## Returns
A new ket
"""
ret = 0
for i in range(len(self.ops)):
ret_term = other_ket*1
for j in range(len(self.ops[i])):
ret_term = self.ops[i][-j]*ret_term
if i==0:
ret = ret_term
else:
ret = ret + ret_term
return ret
def _repr_html_(self):
"""
Returns a latex-formatted string to display the mathematical expression of the operator.
"""
ret = ""
for i in range(len(self.ops)):
if np.abs(self.coefficients[i]) == 1:
if self.coefficients[i]>0:
ret += "+"
else:
ret += "-"
else:
if self.coefficients[i]>0:
ret += "+ %.2f" % self.coefficients[i]
else:
ret += "%.2f" % self.coefficients[i]
for j in range(len(self.ops[i])):
ret += "$\\big{(}$" + self.ops[i][j]._repr_html_() + "$\\big{)}$"
return ret
class operator(object):
"""
Parent class for operators
"""
def __init__(self):
pass
class sympy_operator_action:
def __init__(self, sympy_expression):
self.sympy_expression = sympy_expression
def __mul__(self, other):
assert(type(other) is basisfunction), "cannot operate on %s" %type(other)
bs = basisfunction(self.sympy_expression*other.ket_sympy_expression)
bs.position = other.position
return ket( bs)
class translation(operator):
def __init__(self, translation_vector):
self.translation_vector = np.array(translation_vector)
def __mul__(self, other):
#assert(type(other) is basisfunction), "cannot translate %s" %type(other)
new_expression = translate_sympy_expression(other.get_ket_sympy_expression(), self.translation_vector)
#bs = basisfunction(new_expression)
#if other.position is not None:
# bs.position = other.position + self.translation_vector
return ket( new_expression )
class differential(operator):
def __init__(self, order):
self.order = order
def __mul__(self, other):
#assert(type(other) is basisfunction), "cannot differentiate %s" %type(other)
new_expression = 0
symbols = np.array(list(other.get_ket_sympy_expression().free_symbols))
l_symbols = np.argsort([i.name for i in symbols])
symbols = symbols[l_symbols]
for i in range(len(symbols)):
new_expression += sp.diff(other.get_ket_sympy_expression(), symbols[i], self.order[i])
bs = basisfunction(new_expression)
#bs.position = other.position
return ket( new_expression)
def translate_sympy_expression(sympy_expression, translation_vector):
symbols = np.array(list(sympy_expression.free_symbols))
l_symbols = np.argsort([i.name for i in symbols])
shifts = symbols[l_symbols]
assert(len(shifts)==len(translation_vector)), "Incorrect length of translation vector"
return_expression = sympy_expression*1
for i in range(len(shifts)):
return_expression = return_expression.subs(shifts[i], shifts[i]-translation_vector[i])
return return_expression
# Operators
class kinetic_operator(operator):
def __init__(self, p = None):
self.p = p
if p is not None:
self.variables = get_default_variables(p)
def __mul__(self, other):
if self.p is None:
self.variables = other.basis[0].ket_sympy_expression.free_symbols
#ret = 0
new_coefficients = other.coefficients
new_basis = []
for i in other.basis:
new_basis_ = 0
for j in self.variables:
new_basis_ += sp.diff(i.ket_sympy_expression,j, 2)
new_basis.append(basisfunction(new_basis_))
new_basis[-1].position = i.position
return ket([-.5*i for i in new_coefficients], basis = new_basis)
def _repr_html_(self):
return "$ -\\frac{1}{2} \\nabla^2 $"
def get_translation_operator(pos):
return operator_expression(translation(pos))# , special_operator = True)
def get_sympy_operator(sympy_expression):
return operator_expression(sympy_expression)
def get_differential_operator(order):
return operator_expression(differential(order)) #,special_operator = True)
def get_onebody_coulomb_operator(position = np.array([0,0,0.0]), Z = 1.0, p = None, variables = None):
return operator_expression(onebody_coulomb_operator(position, Z = Z, p = p, variables = variables))
def get_twobody_coulomb_operator(p1=0,p2=1):
return operator_expression(twobody_coulomb_operator(p1,p2))
def get_kinetic_operator(p = None):
return operator_expression(kinetic_operator(p = p))
def get_default_variables(p, n = 3):
variables = []
for i in range(n):
variables.append(sp.Symbol("x_{%i; %i}" % (p, i)))
return variables
class onebody_coulomb_operator(operator):
def __init__(self, position = np.array([0.0,0,0]), Z = 1.0, p = None, variables = None):
self.position = position
self.Z = Z
self.p = p
self.variables = variables
if p is not None:
self.variables = get_default_variables(self.p, len(position))
r = 0
for j in range(len(self.variables)):
r += (self.variables[j]-self.position[j])**2
self.r_inv = r**-.5
def __mul__(self, other, r = None):
variables = self.variables
if self.variables is None:
#variables = other.basis[0].ket_sympy_expression.free_symbols
symbols = np.array(list(other.basis[0].ket_sympy_expression.free_symbols))
"""
symbols_particles = [int(x.name.split("{")[1].split(";")[0]) for x in symbols]
particle_symbols = []
for i in range(len(symbols)):
if symbols_particles[i] == self.p:
particle_symbols.append(symbols[i])
print("part_s:", particle_symbols)
symbols = particle_symbols
"""
l_symbols = np.argsort([i.name for i in symbols])
variables = symbols[l_symbols]
r = 0
for j in range(len(variables)):
r += (variables[j]-self.position[j])**2
self.r_inv = r**-.5
new_coefficients = other.coefficients
new_basis = []
for i in other.basis:
new_basis.append(basisfunction(self.r_inv*i.ket_sympy_expression))#, position = i.position+self.position))
return ket([-self.Z*i for i in new_coefficients], basis = new_basis)
def _repr_html_(self):
if self.position is None:
return "$ -\\frac{1}{\\mathbf{r}} $"
else:
return "$ -\\frac{1}{\\vert \\mathbf{r} - (%f, %f, %f) \\vert }$" % (self.position[0], self.position[1], self.position[2])
def twobody_denominator(p1, p2, ndim):
v_1 = get_default_variables(p1, ndim)
v_2 = get_default_variables(p2, ndim)
ret = 0
for i in range(ndim):
ret += (v_1[i] - v_2[i])**2
return ret**.5
class twobody_coulomb_operator(operator):
def __init__(self, p1 = 0, p2 = 1, ndim = 3):
self.p1 = p1
self.p2 = p2
self.ndim = ndim
def __mul__(self, other):
#vid = other.variable_identities
#if vid is None:
# assert(False), "unable to determine variables of ket"
new_basis = 0
for i in range(len(other.basis)):
#new_basis += other.coefficients[i]*apply_twobody_operator(other.basis[i].ket_sympy_expression, self.p1, self.p2)
new_basis += other.coefficients[i]*other.basis[i].ket_sympy_expression/twobody_denominator(self.p1, self.p2, self.ndim)
return ket(new_basis)
def _repr_html_(self):
return "$ -\\frac{1}{\\vert \\mathbf{r}_1 - \\mathbf{r}_2 \\vert} $"
class twobody_coulomb_operator_older(operator):
def __init__(self):
pass
def __mul__(self, other):
vid = other.variable_identities
if vid is None:
assert(False), "unable to determine variables of ket"
new_basis = []
for i in range(len(other.basis)):
fs1, fs2 = vid[i]
denom = 0
for k,l in zip(list(fs1), list(fs2)):
denom += (k - l)**2
new_basis.append( ket(other.basis[i].ket_sympy_expression/np.sqrt(denom) ) )
return ket(other.coefficients, basis = new_basis)
def _repr_html_(self):
return "$ -\\frac{1}{\\vert \\mathbf{r}_1 - \\mathbf{r}_2 \\vert} $"
class twobody_coulomb_operator_old():
def __init__(self):
pass
def __mul__(self, other, r = None):
variables = other.basis[0].ket_sympy_expression.free_symbols
r = 0
for j in variables:
r += j**2
r_inv = r**-.5
new_coefficients = other.coefficients
new_basis = []
for i in other.basis:
new_basis.append(basisfunction(r_inv*i.ket_sympy_expression, position = i.position))
return ket(-new_coefficients, basis = new_basis)
def _repr_html_(self):
return "$ -\\frac{1}{\\mathbf{r}} $"
def get_standard_basis(n):
b = np.eye(n)
basis = []
for i in range(n):
basis.append(ket(b[i], basis = b))
return basis
class ket(object):
"""
A class for vectors defined on general vector spaces
Author: <NAME> (<EMAIL>)
## Keyword arguments:
| Method | Description |
| ----------- | ----------- |
| generic_input | if list or numpy.ndarray: if basis is None, returns a cartesian vector else, assumes input to contain coefficients. If sympy expression, returns ket([1], basis = [basisfunction(generic_input)]) |
| name | a string, used for labelling and plotting, visual aids |
| basis | a list of basisfunctions |
| position | assumed centre of function $\langle \\vert \hat{\mathbf{r}} \\vert \\rangle$. |
| energy | if this is an eigenstate of a Hamiltonian, it's eigenvalue may be fixed at initialization |
## Operations
For kets B and A and scalar c
| Operation | Description |
| ----------- | ----------- |
| A + B | addition |
| A - C | subtraction |
| A * c | scalar multiplication |
| A / c | division by a scalar |
| A * B | pointwise product |
| A.bra*B | inner product |
| A.bra@B | inner product |
| A @ B | cartesian product |
| A(x) | $\int_R^n \delta(x - x') f(x') dx'$ evaluate function at x |
"""
def __init__(self, generic_input, name = "", basis = None, position = None, energy = None):
"""
## Initialization of a ket
"""
self.position = position
if type(generic_input) in [np.ndarray, list]:
self.coefficients = list(generic_input)
self.basis = [i for i in np.eye(len(self.coefficients))]
if basis is not None:
self.basis = basis
else:
# assume sympy expression
if position is None:
position = np.zeros(len(generic_input.free_symbols), dtype = float)
self.coefficients = [1.0]
self.basis = [basisfunction(generic_input, position = position)]
self.ket_sympy_expression = self.get_ket_sympy_expression()
self.bra_sympy_expression = self.get_bra_sympy_expression()
if energy is not None:
self.energy = energy
else:
self.energy = [0 for i in range(len(self.basis))]
self.__name__ = name
self.bra_state = False
self.a = None
"""
Algebraic operators
"""
def __add__(self, other):
new_basis = self.basis + other.basis
new_coefficients = self.coefficients + other.coefficients
new_energies = self.energy + other.energy
ret = ket(new_coefficients, basis = new_basis, energy = new_energies)
ret.flatten()
ret.__name__ = "%s + %s" % (self.__name__, other.__name__)
return ret
def __sub__(self, other):
new_basis = self.basis + other.basis
new_coefficients = self.coefficients + [-i for i in other.coefficients]
ret = ket(new_coefficients, basis = new_basis)
ret.flatten()
ret.__name__ = "%s - %s" % (self.__name__, other.__name__)
return ret
def __mul__(self, other):
if type(other) is ket:
new_basis = []
new_coefficients = []
for i in range(len(self.basis)):
for j in range(len(other.basis)):
new_basis.append(self.basis[i]*other.basis[j])
new_coefficients.append(self.coefficients[i]*other.coefficients[j])
#return self.__matmul__(other)
return ket(new_coefficients, basis = new_basis)
else:
return ket([other*i for i in self.coefficients], basis = self.basis)
def __rmul__(self, other):
return ket([other*i for i in self.coefficients], basis = self.basis)
def __truediv__(self, other):
assert(type(other) in [float, int]), "Divisor must be float or int"
return ket([i/other for i in self.coefficients], basis = self.basis)
def __matmul__(self, other):
"""
Inner- and Cartesian products
"""
if type(other) in [float, int]:
return self*other
if type(other) is ket:
if self.bra_state:
# Compute inner product: < self | other >
metric = np.zeros((len(self.basis), len(other.basis)), dtype = np.complex)
for i in range(len(self.basis)):
for j in range(len(other.basis)):
if type(self.basis[i]) is np.ndarray and type(other.basis[j]) is np.ndarray:
metric[i,j] = np.dot(self.basis[i], other.basis[j])
else:
if type(self.basis[i]) is basisfunction:
if type(other.basis[j]) is basisfunction:
# (basisfunction | basisfunction)
metric[i,j] = inner_product(self.basis[i], other.basis[j])
if other.basis[j] is ket:
# (basisfunction | ket )
metric[i,j] = ket([1.0], basis = [self.basis[i]])[email protected][j]
else:
if type(other.basis[j]) is basisfunction:
# ( ket | basisfunction )
metric[i,j] = self.basis[i].bra@ket([1.0], basis = [other.basis[j]])
else:
# ( ket | ket )
metric[i,j] = self.basis[i][email protected][j]
if np.linalg.norm(metric.imag)<=1e-10:
metric = metric.real
return np.array(self.coefficients).T.dot(metric.dot(np.array(other.coefficients)))
else:
if type(other) is ket:
if other.bra_state:
return outerprod(self, other)
else:
new_coefficients = []
new_basis = []
variable_identities = [] #for potential two-body interactions
for i in range(len(self.basis)):
for j in range(len(other.basis)):
#bij, sep = split_variables(self.basis[i].ket_sympy_expression, other.basis[j].ket_sympy_expression)
bij, sep = relabel_direct(self.basis[i].ket_sympy_expression, other.basis[j].ket_sympy_expression)
#bij = ket(bij)
bij = basisfunction(bij) #, position = other.basis[j].position)
bij.position = np.append(self.basis[i].position, other.basis[j].position)
new_basis.append(bij)
new_coefficients.append(self.coefficients[i]*other.coefficients[j])
variable_identities.append(sep)
ret = ket(new_coefficients, basis = new_basis)
ret.flatten()
ret.__name__ = self.__name__ + other.__name__
ret.variable_identities = variable_identities
return ret
def set_position(self, position):
for i in range(len(self.basis)):
pass
def flatten(self):
"""
Remove redundancies in the expansion of self
"""
new_coefficients = []
new_basis = []
new_energies = []
found = []
for i in range(len(self.basis)):
if i not in found:
new_coefficients.append(self.coefficients[i])
new_basis.append(self.basis[i])
new_energies.append(self.energy[i])
for j in range(i+1, len(self.basis)):
if type(self.basis[i]) is np.ndarray:
if type(self.basis[j]) is np.ndarray:
if np.all(self.basis[i]==self.basis[j]):
new_coefficients[i] += self.coefficients[j]
found.append(j)
else:
if self.basis[i].ket_sympy_expression == self.basis[j].ket_sympy_expression:
if np.all(self.basis[i].position == self.basis[j].position):
new_coefficients[i] += self.coefficients[j]
found.append(j)
self.basis = new_basis
self.coefficients = new_coefficients
self.energy = new_energies
def get_ket_sympy_expression(self):
ret = 0
for i in range(len(self.coefficients)):
if type(self.basis[i]) in [basisfunction, ket]:
ret += self.coefficients[i]*self.basis[i].ket_sympy_expression
else:
ret += self.coefficients[i]*self.basis[i]
return ret
def get_bra_sympy_expression(self):
ret = 0
for i in range(len(self.coefficients)):
if type(self.basis[i]) in [basisfunction, ket]:
ret += np.conjugate(self.coefficients[i])*self.basis[i].bra_sympy_expression
else:
ret += np.conjugate(self.coefficients[i]*self.basis[i])
return ret
def __call__(self, *R, t = None):
#Ri = *np.array([R[i] - self.position[i] for i in range(len(self.position))])
#Ri = np.array([R[i] - self.position[i] for i in range(len(self.position))], dtype = object)
if t is None:
result = 0
if self.bra_state:
for i in range(len(self.basis)):
result += np.conjugate(self.coefficients[i]*self.basis[i](*R))
else:
for i in range(len(self.basis)):
result += self.coefficients[i]*self.basis[i](*R)
return result
else:
result = 0
if self.bra_state:
for i in range(len(self.basis)):
result += np.conjugate(self.coefficients[i]*self.basis[i](*R)*np.exp(-np.complex(0,1)*self.energy[i]*t))
else:
for i in range(len(self.basis)):
result += self.coefficients[i]*self.basis[i](*R)*np.exp(-np.complex(0,1)*self.energy[i]*t)
return result
@property
def bra(self):
return self.__a
@bra.setter
def a(self, var):
self.__a = copy.copy(self)
self.__a.bra_state = True
def _repr_html_(self):
if self.bra_state:
return "$\\langle %s \\vert$" % self.__name__
else:
return "$\\vert %s \\rangle$" % self.__name__
def run(self, x = 8*np.linspace(-1,1,100), t = 0, dt = 0.001):
anim_s = anim.system(self, x, t, dt)
anim_s.run()
"""
Measurement
"""
def measure(self, observable = None, repetitions = 1):
"""
Make a mesaurement of the observable (hermitian operator)
Measures by default the continuous distribution as defined by self.bra*self
"""
if observable is None:
# Measure position
P = self.get_bra_sympy_expression()*self.get_ket_sympy_expression()
symbols = get_ordered_symbols(P)
P = sp.lambdify(symbols, P, "numpy")
nd = len(symbols)
sig = .1 #variance of initial distribution
r = np.random.multivariate_normal(np.zeros(nd), sig*np.eye(nd), repetitions )
# Metropolis-Hastings
for i in range(1000):
dr = np.random.multivariate_normal(np.zeros(nd), 0.01*sig*np.eye(nd), repetitions)
accept = P(r+dr)/P(r) > np.random.uniform(0,1,nd)
r[accept] += dr[accept]
return r
else:
#assert(False), "Arbitrary measurements not yet implemented"
# get coefficients
P = np.zeros(len(observable.eigenstates), dtype = float)
for i in range(len(observable.eigenstates)):
P[i] = (observable.eigenstates[i].bra@self)**2
distribution = discrete_metropolis_hastings(P, n_samples = repetitions)
return observable.eigenvalues[distribution]
def discrete_metropolis_hastings(P, n_samples = 10000, n_iterations = 10000, stepsize = None):
"""
Perform a random walk in the discrete distribution P (array)
"""
#ensure normality
n = np.sum(P)
Px = interp1d(np.linspace(0,1,len(P)), P/n)
x = np.random.uniform(0,1,n_samples)
if stepsize is None:
#set stepsize proportional to discretization
stepsize = .5*len(P)**-1
print("stepsize:", stepsize)
for i in range(n_iterations):
dx = np.random.normal(0,stepsize, n_samples)
xdx = x + dx
# periodic boundaries
xdx[xdx<0] += 1
xdx[xdx>1] -= 1
accept = Px(xdx)/Px(x) > np.random.uniform(0,1,n_samples)
x[accept] = xdx[accept]
return np.array(x*len(P), dtype = int)
def metropolis_hastings(f, N, x0, a):
"""
Metropolis-Hastings random walk in the function f
"""
x = np.random.multivariate_normal(x0, a, N)
for i in range(1000):
dx = np.random.multivariate_normal(x0, a*0.01, N)
accept = f(x+dx)/f(x) > np.random.uniform(0,1,N)
x[accept] += dx[accept]
return x
def get_particles_in_expression(s):
symbols = get_ordered_symbols(s)
particles = []
for i in symbols:
particles.append( int(i.name.split("{")[1].split(";")[0] ) )
particles = np.array(particles)
return np.unique(particles)
def get_ordered_symbols(sympy_expression):
symbols = np.array(list(sympy_expression.free_symbols))
l_symbols = np.argsort([i.name for i in symbols])
return symbols[l_symbols]
def substitute_sequence(s, var_i, var_f):
for i in range(len(var_i)):
s = s.subs(var_i[i], var_f[i])
return s
def relabel_direct(s1,s2):
p1 = get_particles_in_expression(s1)
p_max = p1.max() + 1
p2 = get_particles_in_expression(s2)
for i in p2:
if i in p1:
s2 = substitute_sequence(s2, get_default_variables(i), get_default_variables(p_max))
p_max += 1
return s1*s2, get_ordered_symbols(s1*s2)
def split_variables(s1,s2):
"""
make a product where
"""
# gather particles in first symbols
s1s = get_ordered_symbols(s1)
for i in range(len(s1s)):
s1 = s1.subs(s1s[i], sp.Symbol("x_{0; %i}" % i))
s2s = get_ordered_symbols(s2)
for i in range(len(s2s)):
s2 = s2.subs(s2s[i], sp.Symbol("x_{1; %i}" % i))
return s1*s2, get_ordered_symbols(s1*s2)
def parse_symbol(x):
"""
Parse a symbol of the form
x_{i;j}
Return a list
[i,j]
"""
strspl = str(x).split("{")[1].split("}")[0].split(";")
return [int(i) for i in strspl]
def map_expression(sympy_expression, x1=0, x2=1):
"""
Map out the free symbols of sympy_expressions
in order to determine
z[p, x]
where p = [0,1] is particle x1 and x2, while
x is their cartesian component
"""
map_ = {x1:0, x2:1}
s = sympy_expression.free_symbols
n = int(len(s)/2)
z = np.zeros((2, n), dtype = object)
for i in s:
j,k = parse_symbol(i) #particle, coordinate
z[map_[j], k] = i
return z, n
def get_twobody_denominator(sympy_expression, p1, p2):
"""
For a sympy_expression of arbitrary dimensionality,
generate the coulomb operator
1/sqrt( r_{p1, p2} )
assuming that the symbols are of the form "x_{pn, x_i}"
where x_i is the cartesian vector component
"""
mex, n = map_expression(sympy_expression, p1, p2)
denom = 0
for i in range(n):
denom += (mex[0,i] - mex[1,i])**2
return sp.sqrt(denom)
def apply_twobody_operator(sympy_expression, p1, p2):
"""
Generate the sympy expression
sympy_expression / | x_p1 - x_p2 |
"""
return sympy_expression/get_twobody_denominator(sympy_expression, p1, p2)
def trace(outer):
assert(len(outer.ket.basis)==len(outer.bra.basis)), "Trace ill-defined."
return projector(outer.ket, outer.bra)
class outerprod(object):
def __init__(self, ket, bra):
self.ket = ket
self.bra = bra
def _repr_html_(self):
return "$$\\sum_{ij} \\vert %s_i \\rangle \\langle %s_j \\vert$$" % (self.ket.__name__, self.bra.__name__)
def __mul__(self, other):
if type(other) is ket:
coefficients = []
for i in range(len(self.ket.basis)):
coeff_i = 0
for j in range(len(self.bra.basis)):
if type(self.bra.basis[j]) is ket:
coeff_i += self.bra.basis[i].bra@other
if type(self.bra.basis[j]) is basisfunction:
coeff_i += ket([1], basis = [self.bra.basis[j]]).bra@other
coefficients.append(coeff_i*self.ket.coefficients[i])
return ket(coefficients, basis = copy.copy(self.ket.basis), energy = copy.copy(self.ket.energy))
class projector(object):
def __init__(self, ket, bra):
self.ket = ket
self.bra = bra
def _repr_html_(self):
return "$$\\sum_{i} \\vert %s_i \\rangle \\langle %s_i \\vert$$" % (self.ket.__name__, self.bra.__name__)
def __mul__(self, other):
if type(other) is ket:
coefficients = []
for i in range(len(self.ket.basis)):
coeff_i = 0
if type(self.bra.basis[i]) is ket:
coeff_i = self.bra.basis[i].bra@other
if type(self.bra.basis[i]) is basisfunction:
coeff_i = ket([1], basis = [self.bra.basis[i]]).bra@other
coefficients.append(coeff_i*self.ket.coefficients[i])
return ket(coefficients, basis = copy.copy(self.ket.basis), energy = copy.copy(self.ket.energy))
@lru_cache(maxsize=100)
def inner_product(b1, b2, operator = None, n_samples = int(1e6), grid = 101, sigma = None):
"""
Computes the inner product < b1 | b2 >, where bn are instances of basisfunction
Keyword arguments:
b1, b2 -- basisfunction objects
operator -- obsolete
n_samples -- number of Monte Carlo samples
grid -- number of grid-points in every direction for the
spline control variate
Returns:
The inner product as a float
"""
ri = b1.position*0
rj = b2.position*0
integrand = lambda *R, \
f1 = b1.bra_numeric_expression, \
f2 = b2.ket_numeric_expression, \
ri = ri, rj = rj: \
f1(*np.array([R[i] - ri[i] for i in range(len(ri))]))*f2(*np.array([R[i] - rj[i] for i in range(len(rj))]))
#f1(*np.array([R[i] - ri[i] for i in range(len(ri))]))*f2(*np.array([R[i] - rj[i] for i in range(len(rj))]))
variables_b1 = b1.bra_sympy_expression.free_symbols
variables_b2 = b2.ket_sympy_expression.free_symbols
if len(variables_b1) == 1 and len(variables_b2) == 1:
return integrate.quad(integrand, -10,10)[0]
else:
ai,aj = b1.decay, b2.decay
ri,rj = b1.position, b2.position
R = (ai*ri + aj*rj)/(ai+aj)
if sigma is None:
sigma = .5*(ai + aj)
#print("R, sigma:", R, sigma)
return onebody(integrand, np.ones(len(R))*sigma, R, n_samples) #, control_variate = "spline", grid = grid)
"""
else:
R, sigma = locate(b1.bra_sympy_expression*b2.ket_sympy_expression)
return onebody(integrand, np.ones(len(R))*sigma, R, n_samples) #, control_variate = "spline", grid = grid)
"""
def compose_basis(p):
"""
generate a list of basis functions
corresponding to the AO-basis
(same ordering and so on)
"""
basis = []
for charge in np.arange(p.charges.shape[0]):
atomic_number = p.charges[charge]
atom = np.argwhere(p.atomic_numbers==atomic_number)[0,0] #index of basis function
pos = p.atoms[charge]
for shell in np.arange(len(p.basis_set[atom])):
for contracted in np.arange(len(p.basis_set[atom][shell])):
W = np.array(p.basis_set[atom][shell][contracted])
w = W[:,1]
a = W[:,0]
if shell == 1:
for m in np.array([1,-1,0]):
basis.append(basis_function([shell,m,a,w], basis_type = "cgto",domain = [[-8,8],[-8,8],[-8,8]], position = pos))
else:
for m in np.arange(-shell, shell+1):
basis.append(basis_function([shell,m,a,w], basis_type = "cgto",domain = [[-8,8],[-8,8],[-8,8]], position = pos))
return basis
def get_control_variate(integrand, loc, a = .6, tmin = 1e-5, extent = 6, grid = 101):
"""
Generate an nd interpolated control variate
returns RegularGridInterpolator, definite integral on mesh, mesh points
Keyword arguments
integrand -- an evaluateable function
loc -- position offset for integrand
a -- grid density decay,
tmin --
extent --
grid -- number of grid points
"""
t = np.linspace(tmin,extent**a,grid)**(a**-1)
t = np.append(-t[::-1],t)
R_ = np.ones((loc.shape[0],t.shape[0]))*t[None,:]
R = np.meshgrid(*(R_ - loc[:, None]), indexing='ij', sparse=True)
data = integrand(*R)
# Integrate
I0 = rgrid_integrate_nd(t, data)
#return RegularGridInterpolator(R_, data, bounds_error = False, fill_value = 0), I0, t
return RegularGridInterpolator(R_-loc[:, None], data, bounds_error = False, fill_value = 0), I0, t
def rgrid_integrate_3d(points, values):
"""
regular grid integration, 3D
"""
# volume per cell
v = np.diff(points)
v = v[:,None,None]*v[None,:,None]*v[None,None,:]
# weight per cell
w = values[:-1] + values[1:]
w = w[:, :-1] + w[:, 1:]
w = w[:, :, :-1] + w[:, :, 1:]
w = w/8
return np.sum(w*v)
def rgrid_integrate_nd(points, values):
"""
Integrate over n dimensions as linear polynomials on a grid
Keyword arguments:
points -- cartesian coordinates of gridpoints
values -- values of integrand at gridpoints
Returns:
Integral of linearly interpolated integrand
"""
points = np.diff(points)
w = ""
for i in range(len(values.shape)):
cycle = ""
for j in range(len(values.shape)):
if j==i:
cycle+=":,"
else:
cycle+="None,"
w +="points[%s] * " % cycle[:-1]
v = eval(w[:-2])
w = values
wd= 1
for i in range(len(values.shape)):
w = eval("w[%s:-1] + w[%s1:]" % (i*":,", i*":,"))
wd *= 2
return np.sum(v*w/wd)
def sphere_distribution(N_samples, scale = 1):
theta = np.random.uniform(0,2*np.pi, N_samples)
phi = np.arccos(np.random.uniform(-1,1, N_samples))
r = np.random.exponential(scale, N_samples)
x = r*np.sin(phi)*np.cos(theta)
y = r*np.sin(phi)*np.sin(theta)
z = r*np.cos(phi)
return np.array([x,y,z])
def sphere_pdf(x, scale =1):
r = np.sqrt(np.sum(x**2, axis= 0))
return np.exp(-x/scale)/scale #/scale
def onebody(integrand, sigma, loc, n_samples, control_variate = lambda *r : 0, grid = 101, I0 = 0):
"""
Monte Carlo (MC) estimate of integral
Keyword arguments:
integrand -- evaluatable function
sigma -- standard deviation of normal distribution used
for importance sampling
loc -- centre used for control variate and importance sampling
n_sampes -- number of MC-samples
control_variate -- evaluatable function
grid -- sampling density of spline control variate
I0 -- analytical integral of control variate
returns:
Estimated integral (float)
"""
if control_variate == "spline":
control_variate, I0, t = get_control_variate(integrand, loc, a = .6, tmin = 1e-5, extent = 6, grid = grid)
#R = np.random.multivariate_normal(loc, np.eye(len(loc))*sigma, n_samples)
#R = np.random.Generator.multivariate_normal(loc, np.eye(len(loc))*sigma, size=n_samples)
#sig = np.eye(len(loc))*sigma
sig = np.diag(sigma)
R = np.random.default_rng().multivariate_normal(loc, sig, n_samples)
P = multivariate_normal(mean=loc, cov=sig).pdf(R)
return I0+np.mean((integrand(*R.T)-control_variate(R)) * P**-1)
def eri_mci(phi_p, phi_q, phi_r, phi_s,
pp = np.array([0,0,0]),
pq = np.array([0,0,0]),
pr = np.array([0,0,0]),
ps = np.array([0,0,0]),
N_samples = 1000000, sigma = .5,
Pr = np.array([0,0,0]),
Qr = np.array([0,0,0]),
zeta = 1,
eta = 1,
auto = False,
control_variate = lambda x1,x2,x3,x4,x5,x6 : 0):
"""
Electron repulsion integral estimate using zero-variance Monte Carlo
"""
x = np.random.multivariate_normal([0,0,0,0,0,0], np.eye(6)*sigma, N_samples)
P = multivariate_normal(mean=[0,0,0,0,0,0], cov=np.eye(6)*sigma).pdf
if auto:
# estimate mean and variance of orbitals
X,Y,Z = np.random.uniform(-5,5,(3, 10000))
P_1 = phi_p(X,Y,Z)*phi_q(X,Y,Z)
P_2 = phi_r(X,Y,Z)*phi_s(X,Y,Z)
Pr[0] = np.mean(P_1**2*X)
Pr[1] = np.mean(P_1**2*Y)
Pr[2] =
|
np.mean(P_1**2*Z)
|
numpy.mean
|
import argparse
import math
import os
import numpy as np
import torch
from torch.nn import Module, Dropout, Embedding, Linear, Transformer
from torch import optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils import WordVocab, Seq2seqDataset, split
class PositionalEncoding(Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TrfmSmiles(Module):
def __init__(self, in_size, hidden_size, out_size, n_layers, nhead=4, dropout=0.1):
super(TrfmSmiles, self).__init__()
self.in_size = in_size
self.hidden_size = hidden_size
self.embed = Embedding(in_size, hidden_size)
self.pe = PositionalEncoding(hidden_size, dropout)
self.trfm = Transformer(d_model=hidden_size, nhead=nhead, num_encoder_layers=n_layers,
num_decoder_layers=n_layers, dim_feedforward=hidden_size)
self.out = Linear(hidden_size, out_size)
def forward(self, src):
# src: (T,B)
embedded = self.embed(src) # (T,B,H)
embedded = self.pe(embedded) # (T,B,H)
hidden = self.trfm(embedded, embedded) # (T,B,H)
out = self.out(hidden) # (T,B,V)
out = F.log_softmax(out, dim=2) # (T,B,V)
return out # (T,B,V)
def _encode(self, src):
# src: (T,B)
embedded = self.embed(src) # (T,B,H)
embedded = self.pe(embedded) # (T,B,H)
output = embedded
for i in range(self.trfm.encoder.num_layers - 1):
output = self.trfm.encoder.layers[i](output, None) # (T,B,H)
penul = output.detach().numpy()
output = self.trfm.encoder.layers[-1](output, None) # (T,B,H)
if self.trfm.encoder.norm:
output = self.trfm.encoder.norm(output) # (T,B,H)
output = output.detach().numpy()
# mean, max, first*2
return np.hstack([
|
np.mean(output, axis=0)
|
numpy.mean
|
# 2021-03 : Initial code [<NAME>, IGE-CNRS]
#============================================================================================
import numpy as np
from scipy import interpolate
#============================================================================================
def vertical_interp(original_depth,interpolated_depth):
""" Find upper and lower bound indices for simple vertical interpolation
"""
if ( original_depth[1] < original_depth[0] ):
ll_kupward = True
else:
ll_kupward = False
nn = np.size(interpolated_depth)
kinf=np.zeros(nn,dtype='int')
ksup=np.zeros(nn,dtype='int')
for k in np.arange(nn):
knear = np.argmin( np.abs( original_depth - interpolated_depth[k] ) )
if (original_depth[knear] > interpolated_depth[k]):
ksup[k] = knear
if ll_kupward:
kinf[k] = np.min([ np.size(original_depth)-1, knear+1 ])
else:
kinf[k] = np.max([ 0, knear-1 ])
else:
kinf[k] = knear
if ll_kupward:
ksup[k] = np.max([ 0, knear-1 ])
else:
ksup[k] = np.min([ np.size(original_depth)-1, knear+1 ])
return (kinf,ksup)
#============================================================================================
def horizontal_interp( lon_in_1d, lat_in_1d, mlat_misomip, mlon_misomip, lon_out_1d, lat_out_1d, var_in_1d ):
""" Interpolates one-dimension data horizontally to a 2d numpy array reshaped to the misomip standard (lon,lat) format.
Method: triangular linear barycentryc interpolation, using nans (i.e. gives nan if any nan in the triangle)
lon\_in\_1d, lat\_in\_1d: 1d longitude and latitude of data to interpolate
var\_in\_1d: 1d input data (same dimension as lon\_in\_1d and lat\_in\_1d)
mlat\_misomip, mlon\_misomip: misomip grid size (nb points) alond latitude and longitude dimensions
lon\_out\_1d, lat\_out\_1d: 1d longitude and latitude of the target misomip grid
"""
txxxx = interpolate.griddata( (lon_in_1d,lat_in_1d), var_in_1d, (lon_out_1d,lat_out_1d), method='linear', fill_value=np.nan )
out = np.reshape( txxxx, (mlat_misomip, mlon_misomip) )
return out
#============================================================================================
def horizontal_interp_nonan( lon_in_1d, lat_in_1d, mlat_misomip, mlon_misomip, lon_out_1d, lat_out_1d, var_in_1d ):
""" Interpolates one-dimension data horizontally to a 2d numpy array reshaped to the misomip standard (lon,lat) format.
Method: triangular linear barycentryc interpolation, NOT using nans (i.e. find triangle with non-nan values)
and nearest-neighbor interpolations for points not surrounded by 3 data points.
lon\_in\_1d, lat\_in\_1d: 1d longitude and latitude of data to interpolate
var\_in\_1d: 1d input data (same dimension as lon\_in\_1d and lat\_in\_1d)
mlat\_misomip, mlon\_misomip: misomip grid size (nb points) alond latitude and longitude dimensions
lon\_out\_1d, lat\_out\_1d: 1d longitude and latitude of the target misomip grid
"""
var1d_nonan = var_in_1d[ (~np.isnan(var_in_1d)) & (~np.isinf(var_in_1d)) ]
if ( np.size(var1d_nonan)==0 ):
out =
|
np.zeros((mlat_misomip, mlon_misomip))
|
numpy.zeros
|
# coding: utf-8
###
# @file attacker.py
# @author <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
#
# @section LICENSE
#
# MIT License
#
# Copyright (c) 2020 Distributed Computing Laboratory, EPFL
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###
#!/usr/bin/env python
import numpy as np
class Attacker:
""" Class defining the attack used. """
def __init__(self, attack):
_possible_attacks = {
'Random': self.random_attack,
'Reverse': self.reverse_attack,
'PartialDrop': self.partial_drop_attack,
'LittleIsEnough': self.little_is_enough_attack,
'FallEmpires': self.fall_empires_attack
}
self.attack_strategy = _possible_attacks[attack]
def attack(self, **kwargs):
""" Compute the attack.
Args:
- gradients: numpy array
Returns:
gradients: numpy array
"""
return self.attack_strategy(**kwargs)
def random_attack(self, gradient):
""" Return a random gradient with the same size of the submitted one.
Args:
- gradients numpy array
Returns:
Random gradient
"""
return np.random.random(gradient.shape).astype(np.float32)
def reverse_attack(self, gradient, coeff=100):
""" Return the gradient, yet in the opposite direction and amplified.
Args:
- gradients numpy array
- coeff float number representing the amplification
Returns:
numpy array
"""
return gradient*coeff*(-1.)
def partial_drop_attack(self, gradient, probability):
""" return the gradient but with some missing coordinates (replaced by zeros)
Args
- gradient numpy array
- probability float number representing the percent of the values that should be replaced by zeros
Returns:
numpy array
"""
mask = np.random.rand(gradient.shape) > 1-probability
return np.ma.array(gradient, mask=mask).filled(fill_value=0)
### Should be available only for Byzantine Workers:
def little_is_enough_attack(self, gradient, byz_gradients):
""" return a Byzantine gradient based on the little is enough attack
Args:
- gradient numpy array
- byz_gradients list of numpy array
"""
#First, calculate fw true gradients; this simulates the cooperation of fw Byzantine workers
grad = gradient
est_grads = byz_gradients
est_grads.append(grad)
#Stack these gradients together and calcualte their mean and standard deviation
est_grads = np.stack(est_grads)
mu = np.mean(est_grads,axis=0)
sigma = np.std(est_grads,axis=0)
#Now, apply the rule of the attack to generate the Byzantine gradient
z = 1.035 #Pre-calculated value for z_{max} from z-table, based on n=20, f=8 (and hence, s=3)
grad = mu + z*sigma
return grad
def fall_empires_attack(self, gradient, byz_gradients):
""" return a Byzantine gradient based on the fall of empires attack
Args:
- gradient numpy array
- byz_gradients list of numpy array
"""
#First, calculate fw true gradients; this simulates the cooperation of fw Byzantine workers
grad = gradient
est_grads = byz_gradients
est_grads.append(grad)
#Stack these gradients together and calcualte their mean and standard deviation
est_grads = np.stack(est_grads)
mu =
|
np.mean(est_grads,axis=0)
|
numpy.mean
|
import datetime as dt
import functools
import networkx as nx
from numba import njit
import numpy as np
import scipy.sparse as sp
from .fem_attribute import FEMAttribute
from . import functions
class GraphProcessorMixin:
def separate(self):
"""Separate the FEMData object into parts in terms of connected
subgraphs.
Returns
-------
list_fem_data: List[femio.FEMData]
Connected subgraphs of the FEMData object. The components are
in the order of the smallest node ids.
"""
original_graph = nx.from_scipy_sparse_matrix(
self.calculate_adjacency_matrix_element())
list_element_indices = list(nx.connected_components(original_graph))
unsorted_list_fem_data = [
self.extract_with_element_indices(list(element_indices))
for element_indices in list_element_indices]
return sorted(
unsorted_list_fem_data, key=lambda fd: np.min(fd.nodes.ids))
def convert_id_elements_to_index_elements(self, element_ids=None):
if element_ids is None:
return self.nodes.ids2indices(self.elements.data)
else:
return self.nodes.ids2indices(
self.elements.filter_with_ids[element_ids].data)
@functools.lru_cache(maxsize=1)
def extract_surface(self, elements=None, element_type=None):
"""Extract surface from solid mesh.
Returns
-------
surface_indices:
indices of nodes (not IDs).
surface_positions:
Positions of each nodes on the surface.
"""
dict_facets = self.extract_facets()
dict_facet_shapes = {'tri': [], 'quad': [], 'polygon': []}
for facet in dict_facets.values():
for f in facet:
n_node_per_element = f.shape[-1]
if n_node_per_element == 3:
dict_facet_shapes['tri'].append(f)
elif n_node_per_element == 4:
dict_facet_shapes['quad'].append(f)
else:
n_nodes = np.array([len(f_) for f_ in f])
unique_n_nodes = np.unique(n_nodes)
if 3 in unique_n_nodes:
dict_facet_shapes['tri'].append(
np.stack(f[n_nodes == 3]))
if 4 in unique_n_nodes:
dict_facet_shapes['quad'].append(
np.stack(f[n_nodes == 4]))
dict_facet_shapes['polygon'].append(f[n_nodes > 4])
extracted_surface_info = {
k: self._extract_surface(np.concatenate(v, axis=0), facet_type=k)
for k, v in dict_facet_shapes.items() if len(v) > 0}
if len(extracted_surface_info) == 1:
s = list(extracted_surface_info.values())[0]
return s[0], s[1]
else:
return {k: v[0] for k, v in extracted_surface_info.items()}, \
{k: v[1] for k, v in extracted_surface_info.items()}
def _extract_surface(self, facets, facet_type):
sorted_facets = np.array([np.sort(f) for f in facets])
if facet_type == 'polygon':
surface_indices, surface_positions \
= self._extract_surface_polygon(facets, sorted_facets)
else:
unique_sorted_facets, unique_indices, unique_counts = np.unique(
sorted_facets, return_index=True, return_counts=True, axis=0)
surface_ids = facets[unique_indices[np.where(unique_counts == 1)]]
surface_indices = self.nodes.ids2indices(surface_ids)
surface_positions = self.nodes.data[surface_indices]
return surface_indices, surface_positions
def _extract_surface_polygon(self, facets, sorted_facets):
n_nodes = np.array([len(f) for f in sorted_facets])
unique_n_nodes = np.unique(n_nodes)
list_surface_indices = []
list_surface_positions = []
n_surface = 0
for n_node in unique_n_nodes:
focus_sorted_facets = sorted_facets[n_nodes == n_node]
unique_sorted_facets, unique_indices, unique_counts = np.unique(
np.stack(focus_sorted_facets),
return_index=True, return_counts=True, axis=0)
focus_facets = facets[n_nodes == n_node]
surface_ids = focus_facets[
unique_indices[np.where(unique_counts == 1)]]
surface_indices = self.nodes.ids2indices(surface_ids)
surface_positions = np.array([
self.nodes.data[si] for si in surface_indices], dtype=object)
n_surface += len(surface_ids)
list_surface_indices.append(surface_indices)
list_surface_positions.append(surface_positions)
ret_surface_indices = np.empty(n_surface, object)
ret_surface_indices[:] = [
f_ for f in list_surface_indices for f_ in f]
ret_surface_positions = np.empty(n_surface, object)
ret_surface_positions[:] = [
f_ for f in list_surface_positions for f_ in f]
return ret_surface_indices, ret_surface_positions
def extract_surface_fistr(self):
"""Extract surface from solid mesh.
Returns
-------
surface_data: 2D array of int.
row data correspond to (element_id, surface_id) of surface.
"""
data = self.elements.data
N = len(data)
# node_0, node_1, node_2, elem_id, surf_id
surfs = np.empty((4 * N, 5), np.int32)
surfs[0 * N:1 * N, :3] = data[:, [0, 1, 2]]
surfs[1 * N:2 * N, :3] = data[:, [0, 1, 3]]
surfs[2 * N:3 * N, :3] = data[:, [1, 2, 3]]
surfs[3 * N:4 * N, :3] = data[:, [2, 0, 3]]
surfs[0 * N:1 * N, 3] = self.elements.ids
surfs[1 * N:2 * N, 3] = self.elements.ids
surfs[2 * N:3 * N, 3] = self.elements.ids
surfs[3 * N:4 * N, 3] = self.elements.ids
surfs[0 * N:1 * N, 4] = 1
surfs[1 * N:2 * N, 4] = 2
surfs[2 * N:3 * N, 4] = 3
surfs[3 * N:4 * N, 4] = 4
surfs[:, :3].sort(axis=1)
ind = np.lexsort(
(surfs[:, 4], surfs[:, 3], surfs[:, 2], surfs[:, 1], surfs[:, 0]))
surfs = surfs[ind]
# select surce
unique = np.ones(4 * N, np.bool_)
distinct = (surfs[:-1, 0] != surfs[1:, 0])
distinct |= (surfs[:-1, 1] != surfs[1:, 1])
distinct |= (surfs[:-1, 2] != surfs[1:, 2])
unique[:-1] &= distinct
unique[1:] &= distinct
surfs = surfs[unique]
return surfs[:, 3:]
def extract_facets(
self, elements=None, element_type=None, remove_duplicates=False,
method=None):
"""Extract facets.
Parameters
----------
elements: femio.FEMAttribute, optional
If fed, extract facets of the specified elements.
elements: str, optional
If not fed, infer element type from the number of nodes per
element.
method: callable
A method to aggregate facet features. If not fed, numpy.concatenate
is used.
Returns
-------
facets: dict[tuple(numpy.ndarray)]
"""
if elements is None:
elements = self.elements
if element_type is None:
if hasattr(elements, 'element_type'):
element_type = elements.element_type
else:
nodes_per_element = elements.data.shape[1]
if nodes_per_element == 3:
element_type = 'tri'
elif nodes_per_element == 4:
element_type = 'tet'
elif nodes_per_element == 10:
element_type = 'tet2'
elif nodes_per_element == 8:
element_type = 'hex'
elif nodes_per_element == 12:
element_type = 'hexprism'
else:
raise ValueError(
f"Unknown nodes_per_element: {nodes_per_element}")
if hasattr(elements, 'element_type'):
if elements.element_type == 'mix':
return {
element_type:
self.extract_facets(
element, element_type=element_type,
remove_duplicates=remove_duplicates, method=method)[
element_type]
for element_type, element in self.elements.items()}
else:
elements = list(elements.values())[0]
if element_type == 'tri' or element_type == 'quad':
facets = (elements.data,)
else:
facets = self._generate_all_faces(
elements, element_type, method=method)
if remove_duplicates:
facets = tuple(functions.remove_duplicates(f) for f in facets)
return {element_type: facets}
def _generate_all_faces(
self, elements=None, element_type=None, method=None):
if elements is None:
elements = self.elements
if element_type is None:
if hasattr(elements, 'element_type'):
element_type = elements.element_type
else:
nodes_per_element = elements.data.shape[1]
if nodes_per_element == 3:
element_type = 'tri'
elif nodes_per_element == 4:
element_type = 'tet'
elif nodes_per_element == 10:
element_type = 'tet2'
elif nodes_per_element == 8:
element_type = 'hex'
else:
raise ValueError(
f"Unknown nodes_per_element: {nodes_per_element}")
if hasattr(elements, 'element_type'):
root_data = {
element_type:
self.extract_surface(element, element_type=element_type)
for element_type, element in self.elements.items()}
return {
e: d[0] for e, d in root_data.items()}, {
e: d[1] for e, d in root_data.items()}
if method is None:
method = np.concatenate
if isinstance(elements, np.ndarray):
elements_data = elements
else:
elements_data = elements.data
if element_type in ['tri', 'quad', 'polygon']:
face_ids = elements.data
elif element_type == 'tet':
face_ids = method([
np.stack([
[element[0], element[2], element[1]],
[element[0], element[1], element[3]],
[element[1], element[2], element[3]],
[element[0], element[3], element[2]],
]) for element in elements_data])
elif element_type == 'tet2':
tet1_elements = elements_data[:, :4]
face_ids = self._generate_all_faces(
tet1_elements, 'tet', method=method)
elif element_type == 'hex':
face_ids = method([[
[e[0], e[1], e[5], e[4]],
[e[0], e[3], e[2], e[1]],
[e[1], e[2], e[6], e[5]],
[e[2], e[3], e[7], e[6]],
[e[3], e[0], e[4], e[7]],
[e[4], e[5], e[6], e[7]]]
for e in elements_data])
elif element_type == 'pyr':
face_ids = (
method([
[
[e[0], e[1], e[4]],
[e[1], e[2], e[4]],
[e[2], e[3], e[4]],
[e[3], e[0], e[4]],
]
for e in elements_data]),
method([
[
[e[0], e[3], e[2], e[1]],
]
for e in elements_data]))
elif element_type == 'prism':
face_ids = (
method([
[
[e[0], e[2], e[1]],
[e[3], e[4], e[5]],
]
for e in elements_data]),
method([
[
[e[0], e[1], e[4], e[3]],
[e[1], e[2], e[5], e[4]],
[e[0], e[3], e[5], e[2]],
]
for e in elements_data]))
elif element_type == 'hexprism':
face_ids = method([[
[e[0], e[5], e[4], e[1]],
[e[1], e[4], e[3], e[2]],
[e[5], e[11], e[10], e[4]],
[e[4], e[10], e[9], e[3]],
[e[3], e[9], e[8], e[2]],
[e[0], e[6], e[11], e[5]],
[e[6], e[7], e[10], e[11]],
[e[7], e[8], e[9], e[10]],
[e[1], e[2], e[8], e[7]],
[e[0], e[1], e[7], e[6]]]
for e in elements_data])
elif element_type == 'polyhedron':
assert 'face' in self.elemental_data, \
'No face definition found for polyhedron: ' \
f"{self.elemental_data.keys()}"
face_ids = method([
self._parse_polyhedron_faces(f)
for f in self.elemental_data.get_attribute_data('face')])
else:
raise NotImplementedError(
f"Unexpected element type: {element_type}")
if isinstance(face_ids, tuple):
return face_ids
else:
return (face_ids,)
def _parse_polyhedron_faces(self, faces):
def split(n, f):
return f[:n], f[n:]
d = faces[1:]
parsed_faces = []
for i in range(faces[0]):
face, d = split(d[0], d[1:])
parsed_faces.append(
|
np.array(face)
|
numpy.array
|
from .interval import IntervalGoalEnv
from abc import ABC, abstractmethod
import numpy as np
import copy
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import math
#todo first run jsut the algorithm with the minimizer of collision along side to see what Q values it does create
#A space visualizer V value will be needed that(heat map)
env_dt = 0.02
#becomes obstacles in both steps returns the most current one as 2-dim array
def extract_most_current_obstacles(obstacles_array):
splitted = np.array(np.split(obstacles_array, len(obstacles_array)/4))
most_recent = splitted[0:len(splitted):2, :]
return most_recent
#b_bboxes is expected to be 2 dim array
def check_collisions(a_bbox, b_bboxes):
# b_min_x - a_max_x
d1x = (b_bboxes[:, 0] - b_bboxes[:, 2]) - (a_bbox[0]+a_bbox[2])
d1y = (b_bboxes[:, 1] - b_bboxes[:, 3]) - (a_bbox[1]+a_bbox[3])
d2x = (a_bbox[0] - a_bbox[2]) - (b_bboxes[:, 0] + b_bboxes[:, 2])
d2y = (a_bbox[1] - a_bbox[3]) - (b_bboxes[:, 1] + b_bboxes[:, 3])
d1_bools = np.logical_or(d1x>0., d1y>0.)
d2_bools = np.logical_or(d2x>0., d2y>0.)
d_bools = np.logical_or(d1_bools, d2_bools)
return np.logical_not(d_bools)
def aabbs_max_distances(a_bbox, b_bboxes):
dcxs = np.abs(b_bboxes[:, 0] - a_bbox[0])
extra_x_dist = b_bboxes[:, ] + a_bbox[2]
x_max_dists = dcxs + extra_x_dist
dcys = np.abs(b_bboxes[:, 1] - a_bbox[1])
extra_y_dist = b_bboxes[:, 3] + a_bbox[3]
y_max_dists = dcys + extra_y_dist
d_maxs = np.sqrt(x_max_dists**2 + y_max_dists**2)
return d_maxs
def aabbs_min_distances(a_bbox, b_bboxes):
dcxs = np.abs(b_bboxes[:, 0] - a_bbox[0])
extra_x_dist = b_bboxes[:, 2] + a_bbox[2]
zeros_array = np.zeros(shape=extra_x_dist.shape)
x_min_dists = np.maximum(dcxs - extra_x_dist, zeros_array)
dcys = np.abs(b_bboxes[:, 1] - a_bbox[1])
extra_y_dist = b_bboxes[:, 3] + a_bbox[3]
y_min_dists = np.maximum(dcys - extra_y_dist, zeros_array)
d_mins = np.sqrt(x_min_dists**2 + y_min_dists**2)
return d_mins
def calc_vels(bboxes, bboxes_prev, dt):
pos_dif = bboxes[:, 0:2] - bboxes_prev[:, 0:2]
vel = pos_dif / dt
return vel
def calc_angles(a_bbox, b_bboxes):
if a_bbox[0] == 100. and a_bbox[1] == 100.:
# use a negative angle so it is different in this case
angles = np.repeat(-1., repeats=b_bboxes.shape[0])
else:
angles = np.arctan2(b_bboxes[:, 1] - a_bbox[1], b_bboxes[:, 0] - a_bbox[0]) * 180 / np.pi # to degree
angles = angles % 360.
angles = np.expand_dims(angles, axis=1)
return angles
class ObsExtender(ABC):
def __init__(self, args):
self.args = args
self.counter = 0
@abstractmethod
def extend_obs(self, obs, env):
pass
def step(self):#just here makes sense to increment step
self.counter += 1
def reset_ep(self):
self.counter = 0
#leaves everything as it is, used for test of HGG
class DummyExtender(ObsExtender):
def __init__(self, args):
super(DummyExtender, self).__init__(args)
def extend_obs(self, obs, env):
return obs
class ObsExtenderBbox(ObsExtender):
def __init__(self, args):
super(ObsExtenderBbox, self).__init__(args)
def extend_obs(self, obs, env):
if self.args.vae_dist_help:
extra_goal_state = np.concatenate([obs['achieved_goal_latent'],
obs['achieved_goal_size_latent']])
obstacle_l = obs['obstacle_latent']
obstacle_s_l = obs['obstacle_size_latent']
obstacle_len_shape = len(obstacle_l.shape)
if len(obstacle_l.shape) > 1:
extra_obstacle_state = np.concatenate([obstacle_l, obstacle_s_l], axis=1)
else:
extra_obstacle_state = np.concatenate([obstacle_l, obstacle_s_l])
else:
extra_goal_state = np.concatenate([obs['achieved_goal'][:2],
obs['real_size_goal'][:2]])
obstacle_info = obs['real_obstacle_info']
obstacle_len_shape = len(obstacle_info.shape)
if len(obstacle_info.shape) > 1:
extra_obstacle_state = np.concatenate([obstacle_info[:, :2], obstacle_info[:, -3:-1]], axis=1)
else:
extra_obstacle_state = np.concatenate([obstacle_info[:2], obstacle_info[-3:-1]])
if self.counter == 0:
extra_goal_state = np.concatenate([extra_goal_state, extra_goal_state])
if obstacle_len_shape > 1:
extra_obstacle_state = np.ravel(np.concatenate([extra_obstacle_state, extra_obstacle_state],
axis=1)
)
else:
extra_obstacle_state = np.concatenate([extra_obstacle_state, extra_obstacle_state])
self.single_step_extra_goal_state_size = len(extra_goal_state) // 2
self.single_step_extra_obstacle_state_size = len(extra_obstacle_state) // 2
self.start_index_extra_observation = len(obs['observation'])
else:
# the first entries will always have the more recent representations
prev_obs = env.last_obs.copy()
begin_index = self.start_index_extra_observation
end_index = begin_index + self.single_step_extra_goal_state_size
prev_goal_state = prev_obs['observation'][begin_index: end_index]
begin_index = self.start_index_extra_observation + 2 * self.single_step_extra_goal_state_size
# This one extract until the end since it might exist more tah one obstacle
end_index = begin_index + self.single_step_extra_obstacle_state_size * 2
prev_obstacle_state = prev_obs['observation'][begin_index: end_index]
prev_obstacle_state = extract_most_current_obstacles(prev_obstacle_state)
# the previous ones are pushed to the back
extra_goal_state = np.concatenate([extra_goal_state, prev_goal_state])
if obstacle_len_shape > 1:
extra_obstacle_state = np.ravel(np.concatenate([extra_obstacle_state, prev_obstacle_state], axis=1))
else:
extra_obstacle_state = np.concatenate([extra_obstacle_state, prev_obstacle_state[0]])
new_state = np.concatenate([obs['observation'], extra_goal_state, extra_obstacle_state])
obs['observation'] = new_state
return obs
#basically the same but does not extend the state that is passed to agent. This class will be inherited to extend the
#class in other ways
class ObsExtBboxInfo(ObsExtender):
def __init__(self, args):
super(ObsExtBboxInfo, self).__init__(args)
def extend_obs(self, obs, env):
if self.args.vae_dist_help:
extra_goal_state = np.concatenate([obs['achieved_goal_latent'],
obs['achieved_goal_size_latent']])
obstacle_l = obs['obstacle_latent']
obstacle_s_l = obs['obstacle_size_latent']
obstacle_len_shape = len(obstacle_l.shape)
if len(obstacle_l.shape) > 1:
extra_obstacle_state = np.concatenate([obstacle_l, obstacle_s_l], axis=1)
else:
extra_obstacle_state = np.concatenate([obstacle_l, obstacle_s_l])
extra_obstacle_state = np.expand_dims(extra_obstacle_state, axis=0)
else:
extra_goal_state = np.concatenate([obs['achieved_goal'][:2],
obs['real_size_goal'][:2]])
obstacle_info = obs['real_obstacle_info']
if len(obstacle_info.shape) > 1:
extra_obstacle_state = np.concatenate([obstacle_info[:, :2], obstacle_info[:, -3:-1]], axis=1)
else:
extra_obstacle_state = np.concatenate([obstacle_info[:2], obstacle_info[-3:-1]])
extra_obstacle_state = np.expand_dims(extra_obstacle_state, axis=0)
if self.counter == 0:
#It is the first observation. We cannot assume nothing from previous steps and therefore init every
#field with the same
#might not need to store all goal state since they will not be used
obs['goal_st_t'] = extra_goal_state.copy()
obs['goal_st_t_minus1'] = extra_goal_state.copy()
obs['goal_st_t_minus2'] = extra_goal_state.copy()
obs['obstacle_st_t'] = extra_obstacle_state.copy()
obs['obstacle_st_t_minus1'] = extra_obstacle_state.copy()
obs['obstacle_st_t_minus2'] = extra_obstacle_state.copy()
else:
# the previous ones are pushed to the back
prev_obs = env.last_obs.copy()
obs['goal_st_t'] = extra_goal_state.copy()
obs['goal_st_t_minus1'] = prev_obs['goal_st_t'].copy()
obs['goal_st_t_minus2'] = prev_obs['goal_st_t_minus1'].copy()
obs['obstacle_st_t'] = extra_obstacle_state.copy()
obs['obstacle_st_t_minus1'] = prev_obs['obstacle_st_t'].copy()
obs['obstacle_st_t_minus2'] = prev_obs['obstacle_st_t_minus2'].copy()
return obs
def _modify_obs(self, obs, new_obstacle_list, extra_info, index):
new_obs = copy.deepcopy(obs)
new_obs['obstacle_st_t'] = new_obstacle_list.copy()
new_obs['obstacle_st_t_minus1'] = None
new_obs['obstacle_st_t_minus1'] = None
return new_obs
class ObsExtBboxColl(ObsExtBboxInfo):
def __init__(self, args):
super(ObsExtBboxColl, self).__init__(args)
def extend_obs(self, obs, env):
obs = ObsExtBboxInfo.extend_obs(self, obs, env)
goal_bbox = obs['goal_st_t']
obstacle_bboxes = obs['obstacle_st_t']
# goal object is not in visible range
if goal_bbox[0] == 100. and goal_bbox[1] == 100.:
obs['coll'] = 0.
obs['coll_bool_ar'] = np.repeat(False, len(obstacle_bboxes))
else:
cols = check_collisions(goal_bbox, obstacle_bboxes)
obs['coll_bool_ar'] = cols.copy()
ncols = np.sum(cols.astype(np.float))
obs['coll'] = ncols
return obs
def _modify_obs(self, obs, new_obstacle_list, extra_info, index):
new_obs = super(ObsExtBboxColl, self)._modify_obs(obs, new_obstacle_list, extra_info, index)
goal_bbox = new_obs['goal_st_t']
obstacle_bboxes = new_obs['obstacle_st_t']
# goal object is not in visible range
if goal_bbox[0] == 100. and goal_bbox[1] == 100.:
new_obs['coll'] = 0.
new_obs['coll_bool_ar'] = np.repeat(False, len(obstacle_bboxes))
else:
cols = check_collisions(goal_bbox, obstacle_bboxes)
new_obs['coll_bool_ar'] = cols.copy()
ncols = np.sum(cols.astype(np.float))
new_obs['coll'] = ncols
return new_obs
class ObsExtMinDist(ObsExtBboxColl):
def __init__(self, args):
super(ObsExtMinDist, self).__init__(args)
def extend_obs(self, obs, env):
obs = super(ObsExtMinDist, self).extend_obs(obs, env)
goal_bbox = obs['goal_st_t']
obstacle_bboxes = obs['obstacle_st_t']
# goal object is not in visible range therefore distance really far away
if goal_bbox[0] == 100. and goal_bbox[1] == 100.:
dists = np.repeat(1000., repeats=obstacle_bboxes.shape[0])
else:
dists = aabbs_min_distances(goal_bbox, obstacle_bboxes)
obs['dists'] = dists.copy()
new_state = np.concatenate([obs['observation'], dists.copy()])
obs['observation'] = new_state
return obs
def _modify_obs(self, obs, new_obstacle_list, extra_info, index):
new_obs = super(ObsExtMinDist, self)._modify_obs(obs, new_obstacle_list, extra_info, index)
goal_bbox = new_obs['goal_st_t']
obstacle_bboxes = new_obs['obstacle_st_t']
# goal object is not in visible range therefore distance really far away
if goal_bbox[0] == 100. and goal_bbox[1] == 100.:
dists = np.repeat(1000., repeats=obstacle_bboxes.shape[0])
else:
dists = aabbs_min_distances(goal_bbox, obstacle_bboxes)
new_obs['dists'] = dists.copy()
len_dists = len(dists)
new_obs['observation'][-len_dists:] = dists
return new_obs
class ObsExtP(ObsExtMinDist):
def __init__(self, args):
super(ObsExtP, self).__init__(args)
def extend_obs(self, obs, env):
obs = super(ObsExtP, self).extend_obs(obs, env)
goal_bbox = obs['goal_st_t']
obstacle_bboxes = obs['obstacle_st_t']
dists = obs['dists'].copy()
len_dists = len(dists)
dists = np.expand_dims(dists, axis=1)
pos = obstacle_bboxes[:, 0:2]
observation_without_dist = obs['observation'][:-len_dists]
extension = np.concatenate([dists, pos], axis=1)
new_state = np.concatenate([observation_without_dist, np.ravel(extension)])
obs['observation'] = new_state
return obs
def _modify_obs(self, obs, new_obstacle_list, extra_info, index):
new_obs = super(ObsExtP, self)._modify_obs(obs, new_obstacle_list, extra_info, index)
goal_bbox = new_obs['goal_st_t']
obstacle_bboxes = new_obs['obstacle_st_t']
dists = new_obs['dists'].copy()
len_dists = len(dists)
dists = np.expand_dims(dists, axis=1)
pos = obstacle_bboxes[:, 0:2]
extension = np.ravel(np.concatenate([dists, pos], axis=1))
len_extension = len(extension)
new_obs['observation'][-len_extension:] = extension
return new_obs
class ObsExtPAV(ObsExtMinDist):
def __init__(self, args):
super(ObsExtPAV, self).__init__(args)
self.length_extension = None
self.env_dt = None
def extend_obs(self, obs, env):
if self.env_dt is None:
self.env_dt = env.env.env.dt
obs = super(ObsExtPAV, self).extend_obs(obs, env)
goal_bbox = obs['goal_st_t']
obstacle_bboxes = obs['obstacle_st_t']
previous_obstacle_bboxes = obs['obstacle_st_t_minus1']
dists = obs['dists'].copy()
len_dists = len(dists)
dists = np.expand_dims(dists, axis=1)
pos = obstacle_bboxes[:, 0:2]
dt = env.env.dt
vel = calc_vels(obstacle_bboxes, previous_obstacle_bboxes, dt)
angles = calc_angles(goal_bbox, obstacle_bboxes)
observation_without_dist = obs['observation'][:-len_dists]
extension = np.ravel(np.concatenate([dists, pos, angles, vel], axis=1))
if self.length_extension is None:
self.length_extension = len(extension)
new_state = np.concatenate([observation_without_dist, extension])
obs['observation'] = new_state
return obs
def _modify_obs(self, obs, new_obstacle_list, extra_info, index):
new_obs = super(ObsExtPAV, self)._modify_obs(obs, new_obstacle_list, extra_info, index)
goal_bbox = new_obs['goal_st_t']
obstacle_bboxes = new_obs['obstacle_st_t']
dists = new_obs['dists'].copy()
len_dists = len(dists)
dists = np.expand_dims(dists, axis=1)
pos = obstacle_bboxes[:, 0:2]
len_pos_el = len(pos[0])
#vel = calc_vels(obstacle_bboxes, previous_obstacle_bboxes, dt)
angles = calc_angles(goal_bbox, obstacle_bboxes)
mock_extension = np.ravel(np.concatenate([dists, pos, angles, pos], axis=1))
len_extension = len(mock_extension)
unmodified_extension = obs['observation'][-len_extension:].copy()
unmodified_extension =np.reshape(unmodified_extension, newshape=(-1, 1+2*len_pos_el+1))
vel = unmodified_extension[:, -2:]
dir_not_scaled = extra_info['dir_not_scaled']
if self.env_dt is None:
raise Exception('this was called before modification of obs')
vel[index] = dir_not_scaled / self.env_dt
extension = np.ravel(np.concatenate([dists, pos, angles, vel], axis=1))
len_extension = len(extension)
new_obs['observation'][-len_extension:] = extension
return new_obs
def visualize(self, obs, file_name):
extension = obs['observation'][- self.length_extension:].copy()
extension = np.reshape(extension, (-1, 6))
dists = extension[:, 0:1]
pos = extension[:, 1:3]
angles = extension[:, 3:4]
vel = extension[:, 4:6]
goal_bbox = obs['goal_st_t']
obstacle_bboxes = obs['obstacle_st_t']
fig, ax = plt.subplots()
if True:
c_x, c_y = self.args.field_center[0], self.args.field_center[1]
d_x, d_y = self.args.field_size[0], self.args.field_size[1]
support_points = np.array([[c_x - d_x, c_y - d_y],
[c_x - d_x, c_y + d_y],
[c_x + d_x, c_y - d_y],
[c_x + d_x, c_y + d_y]])
ax.scatter(support_points[:, 0], support_points[:, 1], c='black')
ax.scatter(pos[:, 0], pos[:, 1], c='blue')
ax.scatter(pos[:, 0], pos[:, 1], s=dists[:, 0], c='blue', alpha=0.8)
ax.scatter([goal_bbox[0]], [goal_bbox[1]], c='red')
ax.quiver(pos[:, 0], pos[:, 1], vel[:, 0], vel[:, 1])
def plot_point_angle(x, y, a, length, color):
# find the end point
endy = y + length * math.sin(math.radians(a))
endx = x + length * math.cos(math.radians(a))
# plot the points
ax.plot([x, endx], [y, endy], '--', c=color)
colors_extended_area = ['yellow', 'green', 'orange']
if True:
for i in range(len(obstacle_bboxes)):
bb = obstacle_bboxes[i]
#half of extended area
ax.add_patch(
patches.Rectangle(
(bb[0] - bb[2], bb[1] - bb[3]),
bb[2] * 2,
bb[3] * 2,
edgecolor='blue',
facecolor='blue',
fill=True,
alpha = 0.6
))
#extended area
e_h = dists[i]
ax.add_patch(
patches.Rectangle(
(goal_bbox[0] - goal_bbox[2] - e_h, goal_bbox[1] - goal_bbox[3] - e_h),
(goal_bbox[2]+e_h) * 2,
(goal_bbox[3]+e_h) * 2,
edgecolor=colors_extended_area[i],
facecolor=colors_extended_area[i],
fill=True,
alpha=0.2
))
#todo visualization for corner cases
l = np.linalg.norm(pos[i] - goal_bbox[:2])
plot_point_angle(goal_bbox[0], goal_bbox[1], angles[i], l, colors_extended_area[i])
#goal
ax.add_patch(
patches.Rectangle(
(goal_bbox[0] - goal_bbox[2], goal_bbox[1] - goal_bbox[3]),
goal_bbox[2] * 2,goal_bbox[3] * 2,
edgecolor='red',facecolor='red',
fill=True,
alpha=0.5
))
plt.savefig(file_name)
plt.close()
#calculates position realitve to goal object
class ObsExtPRel(ObsExtMinDist):
def __init__(self, args):
super(ObsExtPRel, self).__init__(args)
def extend_obs(self, obs, env):
obs = super(ObsExtPRel, self).extend_obs(obs, env)
goal_bbox = obs['goal_st_t']
obstacle_bboxes = obs['obstacle_st_t']
dists = obs['dists'].copy()
len_dists = len(dists)
dists = np.expand_dims(dists, axis=1)
pos = obstacle_bboxes[:, 0:2]
# here transformation to relative
pos = pos - goal_bbox[0:2]
observation_without_dist = obs['observation'][:-len_dists]
extension = np.concatenate([dists, pos], axis=1)
new_state = np.concatenate([observation_without_dist,
|
np.ravel(extension)
|
numpy.ravel
|
from collections import defaultdict
import csv
import json
from logging import Logger
import os
import sys
from typing import Callable, Dict, List, Tuple
import subprocess
import numpy as np
import pandas as pd
from .run_training import run_training
from chemprop.args import TrainArgs
from chemprop.constants import TEST_SCORES_FILE_NAME, TRAIN_LOGGER_NAME
from chemprop.data import get_data, get_task_names, MoleculeDataset, validate_dataset_type
from chemprop.utils import create_logger, makedirs, timeit
from chemprop.features import set_extra_atom_fdim, set_extra_bond_fdim, set_explicit_h, set_reaction, reset_featurization_parameters
@timeit(logger_name=TRAIN_LOGGER_NAME)
def cross_validate(args: TrainArgs,
train_func: Callable[[TrainArgs, MoleculeDataset, Logger], Dict[str, List[float]]]
) -> Tuple[float, float]:
"""
Runs k-fold cross-validation.
For each of k splits (folds) of the data, trains and tests a model on that split
and aggregates the performance across folds.
:param args: A :class:`~chemprop.args.TrainArgs` object containing arguments for
loading data and training the Chemprop model.
:param train_func: Function which runs training.
:return: A tuple containing the mean and standard deviation performance across folds.
"""
logger = create_logger(name=TRAIN_LOGGER_NAME, save_dir=args.save_dir, quiet=args.quiet)
if logger is not None:
debug, info = logger.debug, logger.info
else:
debug = info = print
# Initialize relevant variables
init_seed = args.seed
save_dir = args.save_dir
args.task_names = get_task_names(path=args.data_path, smiles_columns=args.smiles_columns,
target_columns=args.target_columns, ignore_columns=args.ignore_columns)
# Print command line
debug('Command line')
debug(f'python {" ".join(sys.argv)}')
# Print args
debug('Args')
debug(args)
# Save args
makedirs(args.save_dir)
try:
args.save(os.path.join(args.save_dir, 'args.json'))
except subprocess.CalledProcessError:
debug('Could not write the reproducibility section of the arguments to file, thus omitting this section.')
args.save(os.path.join(args.save_dir, 'args.json'), with_reproducibility=False)
#set explicit H option and reaction option
reset_featurization_parameters(logger=logger)
set_explicit_h(args.explicit_h)
set_reaction(args.reaction, args.reaction_mode)
# Get data
debug('Loading data')
data = get_data(
path=args.data_path,
args=args,
logger=logger,
skip_none_targets=True,
data_weights_path=args.data_weights_path
)
validate_dataset_type(data, dataset_type=args.dataset_type)
args.features_size = data.features_size()
if args.atom_descriptors == 'descriptor':
args.atom_descriptors_size = data.atom_descriptors_size()
args.ffn_hidden_size += args.atom_descriptors_size
elif args.atom_descriptors == 'feature':
args.atom_features_size = data.atom_features_size()
set_extra_atom_fdim(args.atom_features_size)
if args.bond_features_path is not None:
args.bond_features_size = data.bond_features_size()
set_extra_bond_fdim(args.bond_features_size)
debug(f'Number of tasks = {args.num_tasks}')
if args.target_weights is not None and len(args.target_weights) != args.num_tasks:
raise ValueError('The number of provided target weights must match the number and order of the prediction tasks')
# Run training on different random seeds for each fold
all_scores = defaultdict(list)
for fold_num in range(args.num_folds):
info(f'Fold {fold_num}')
args.seed = init_seed + fold_num
args.save_dir = os.path.join(save_dir, f'fold_{fold_num}')
makedirs(args.save_dir)
data.reset_features_and_targets()
# If resuming experiment, load results from trained models
test_scores_path = os.path.join(args.save_dir, 'test_scores.json')
if args.resume_experiment and os.path.exists(test_scores_path):
print('Loading scores')
with open(test_scores_path) as f:
model_scores = json.load(f)
# Otherwise, train the models
else:
model_scores = train_func(args, data, logger)
for metric, scores in model_scores.items():
all_scores[metric].append(scores)
all_scores = dict(all_scores)
# Convert scores to numpy arrays
for metric, scores in all_scores.items():
all_scores[metric] = np.array(scores)
# Report results
info(f'{args.num_folds}-fold cross validation')
# Report scores for each fold
for fold_num in range(args.num_folds):
for metric, scores in all_scores.items():
info(f'\tSeed {init_seed + fold_num} ==> test {metric} = {np.nanmean(scores[fold_num]):.6f}')
if args.show_individual_scores:
for task_name, score in zip(args.task_names, scores[fold_num]):
info(f'\t\tSeed {init_seed + fold_num} ==> test {task_name} {metric} = {score:.6f}')
# Report scores across folds
for metric, scores in all_scores.items():
avg_scores = np.nanmean(scores, axis=1) # average score for each model across tasks
mean_score, std_score = np.nanmean(avg_scores),
|
np.nanstd(avg_scores)
|
numpy.nanstd
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
y = [2,4,6,8,10,12,14,16,18,20]
x =
|
np.arange(10)
|
numpy.arange
|
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import numpy as np
class MCCube:
"""A cube used for drawing points and running a Monte Carlo integration."""
def __init__(self, intervals: [(float, float)] = None, volume = .0):
"""Constructor.
Args:
intervals: A list of tuples of two floats, defining the interval boundaries of each parameter.
volume: The total integration volume (separately calculated from the intervals).
"""
self.intervals = intervals
self.volume = volume
self.values = []
self.ordered_values = []
self.ordered_indices = []
self.zeros = None
self.ones = None
# self.probabilities = {}
self.log_p = {}
self.log_1_p = {}
self.p_non_positive = {}
self.p_non_ones = {}
def draw(self, n_draws):
"""Draw points uniformly within the cube.
Args:
n_draws: The number of draws of points from the cube.
"""
self.values = []
for interval in self.intervals:
values = np.random.uniform(interval[0], interval[1], n_draws)
self.values.append(values)
self.zeros = np.zeros(n_draws)
self.ones =
|
np.ones(n_draws)
|
numpy.ones
|
import sys
import yaml
from itertools import groupby
from matplotlib import pyplot as plt
import numpy as np
folder = sys.argv[1]
data_list = [] # Initialize array of datasets. Each element is a time step.
with open(folder + "/E") as f:
# Split datafile into sections separated by empty lines
for k, g in groupby(f, lambda x: x=="\n"):
if not k:
# Split line of data after comma, striping whitespace.
data_list.append(np.array([[float(x) for x in d.split(',')]
for d in g if len(d.strip()) ]))
with open(folder + "/params") as f:
p = yaml.load(f)
for key, val in p.items():
p[key] = float(val)
matlablines = open("%s/inte0" % sys.argv[2]).read().splitlines()
for line in matlablines:
line = float(line.strip())
I_init = 1e-4*np.asarray(matlablines,dtype='double')
matlablines = open("%s/inte" % sys.argv[2]).read().splitlines()
for line in matlablines:
line = float(line.strip())
I_finl = 1e-4*np.asarray(matlablines,dtype='double')
matlablines = open("%s/spec0" % sys.argv[2]).read().splitlines()
for line in matlablines:
line = float(line.strip())
If_init = 1e-4*np.asarray(matlablines,dtype='double')
matlablines = open("%s/spec" % sys.argv[2]).read().splitlines()
for line in matlablines:
line = float(line.strip())
If_finl = 1e-4*np.asarray(matlablines,dtype='double')
# x axese
dt = p["tmax"]/(p["Nt"]-1) # Time step
points = np.arange(-p["Nt"]/2,p["Nt"]/2,1)
t = points*dt # Time grid iterator
f = points/p["tmax"] # Frequency grid 0 centered
f = f + 299792458/p["lambda"]
# frequency axis
# Initialize figure
fig = plt.figure()
fig.suptitle(r"Simulation of: $\lambda = %.0f$ nm, $E = %.0f$ $\mu$J, $\Delta t_{fwhm} = %.0f$ fs, $P=%.0f$ Bar" % (p["lambda"]*1E9, p["Energy"]*1E6, p["Tfwhm"]*1E15, p["Pout"]))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
# Only make sys.argv[2]# of plots in between the intitial and final.
# If set to only 1, prints the final plot
#data_list = [data_list[i] for i in map(int, np.linspace(len(data_list)-1,0,2))]
data_list = [data_list[i] for i in map(int, np.linspace(len(data_list)-1,0,2))]
# Position in Color Space
cpos = np.linspace(0.8, 0.2, len(data_list))
# Plot each timestep
re, im = data_list[0].T
ax3.plot(t*1e15,1E-4*(np.power(re,2) + np.power(im,2))[::-1],
#color=plt.cm.viridis(c))
)
re, im = data_list[1].T
ax1.plot(t*1e15,1E-4*(np.power(re,2) + np.power(im,2)),
#color=plt.cm.viridis(c))
)
ax1.plot(t*1e15, I_init)
ax3.plot(t*1e15, I_finl)
ax1.set_title("Pulse Shape")
ax1.set_xlabel("t (fs)")
ax1.set_ylabel(r"Intensity (W$\cdot$cm$^{-2}$)")
ax3.set_title("Pulse Shape")
ax3.set_xlabel("t (fs)")
ax3.set_ylabel(r"Intensity (W$\cdot$cm$^{-2}$)")
re, im = data_list[0].T
Ef = np.fft.fftshift(np.fft.fft(np.fft.fftshift(re+im*1j)))
re = np.real(Ef)
im = np.imag(Ef)
If = (np.power(re,2) + np.power(im,2))
ax4.plot(f*1e-12,(If/max(If))[::-1],
#color=plt.cm.viridis(c))
)
re, im = data_list[1].T
Ef = np.fft.fftshift(np.fft.fft(np.fft.ifftshift(re+im*1j)))
re = np.real(Ef)
im = np.imag(Ef)
If = (np.power(re,2) + np.power(im,2))
print(len(If))
print(len(If_init))
ax2.plot(f*1e-12,If/max(If),
#color=plt.cm.viridis(c))
)
# THIS IS DIRTY, FIX THIS WHEN YOU'RE LESS LAZY
ax4.plot(f*1e-12, np.roll(If_finl/max(If_finl),0))
ax2.plot(f*1e-12, If_init/max(If_init))
ax2.set_xlim(
|
np.array([-60,60])
|
numpy.array
|
import os
import json
import time
import torch
import argparse
import numpy as np
import datetime
import torch.nn as nn
from tensorboardX import SummaryWriter
from loss import XE_LOSS, BPR_LOSS, SIG_LOSS
from metric import get_example_recall_precision, compute_bleu, get_bleu, get_sentence_bleu
from model import GraphX
import random
import torch.nn.functional as F
from rouge import Rouge
from torch_geometric.utils import to_dense_batch
class TRAINER(object):
def __init__(self, vocab_obj, args, device):
super().__init__()
self.m_device = device
self.m_save_mode = True
self.m_mean_train_loss = 0
self.m_mean_train_precision = 0
self.m_mean_train_recall = 0
self.m_mean_val_loss = 0
self.m_mean_eval_precision = 0
self.m_mean_eval_recall = 0
self.m_mean_eval_bleu = 0
self.m_epochs = args.epoch_num
self.m_batch_size = args.batch_size
# self.m_rec_loss = XE_LOSS(vocab_obj.item_num, self.m_device)
# self.m_rec_loss = BPR_LOSS(self.m_device)
self.m_rec_loss = SIG_LOSS(self.m_device)
self.m_rec_soft_loss = BPR_LOSS(self.m_device)
# self.m_criterion = nn.BCEWithLogitsLoss(reduction="none")
self.m_train_step = 0
self.m_valid_step = 0
self.m_model_path = args.model_path
self.m_model_file = args.model_file
self.m_data_dir = args.data_dir
self.m_dataset = args.data_set
self.m_dataset_name = args.data_name
self.m_grad_clip = args.grad_clip
self.m_weight_decay = args.weight_decay
# self.m_l2_reg = args.l2_reg
self.m_feature_loss_lambda = args.feature_lambda # the weight for the feature loss
self.m_soft_train = args.soft_label # use soft label for sentence prediction
self.m_multi_task = args.multi_task # use multi-task loss (sent + feat)
self.m_valid_trigram = args.valid_trigram # use trigram blocking for valid
self.m_valid_trigram_feat = args.valid_trigram_feat # use trigram + feature unigram for valid
self.m_select_topk_s = args.select_topk_s # select topk sentence for valid
self.m_select_topk_f = args.select_topk_f # select topk feature for valid
self.m_train_iteration = 0
self.m_valid_iteration = 0
self.m_eval_iteration = 0
self.m_print_interval = args.print_interval
self.m_sid2swords = vocab_obj.m_sid2swords
self.m_item2iid = vocab_obj.m_item2iid
self.m_user2uid = vocab_obj.m_user2uid
self.m_iid2item = {self.m_item2iid[k]: k for k in self.m_item2iid}
self.m_uid2user = {self.m_user2uid[k]: k for k in self.m_user2uid}
feature2id_file = os.path.join(self.m_data_dir, 'train/feature/feature2id.json')
testset_combined_file = os.path.join(self.m_data_dir, 'test_combined.json')
with open(feature2id_file, 'r') as f:
self.d_feature2id = json.load(f)
self.d_testset_combined = dict()
with open(testset_combined_file, 'r') as f:
for line in f:
line_data = json.loads(line)
userid = line_data['user']
itemid = line_data['item']
review_text = line_data['review']
if userid not in self.d_testset_combined:
self.d_testset_combined[userid] = dict()
self.d_testset_combined[userid][itemid] = review_text
else:
assert itemid not in self.d_testset_combined[userid]
self.d_testset_combined[userid][itemid] = review_text
print("--"*10+"train params"+"--"*10)
print("print_interval", self.m_print_interval)
print("number of topk selected sentences: {}".format(self.m_select_topk_s))
if self.m_valid_trigram:
print("use trigram blocking for validation")
elif self.m_valid_trigram_feat:
print("use trigram + feature unigram for validation")
else:
print("use the original topk scores for validation")
self.m_overfit_epoch_threshold = 3
def f_save_model(self, checkpoint):
# checkpoint = {'model':network.state_dict(),
# 'epoch': epoch,
# 'en_optimizer': en_optimizer,
# 'de_optimizer': de_optimizer
# }
torch.save(checkpoint, self.m_model_file)
def f_train(self, train_data, valid_data, network, optimizer, logger_obj):
last_train_loss = 0
last_eval_loss = 0
self.m_mean_eval_loss = 0
overfit_indicator = 0
# best_eval_precision = 0
best_eval_recall = 0
best_eval_bleu = 0
# self.f_init_word_embed(pretrain_word_embed, network)
try:
for epoch in range(self.m_epochs):
print("++"*10, epoch, "++"*10)
s_time = datetime.datetime.now()
self.f_eval_epoch(valid_data, network, optimizer, logger_obj)
e_time = datetime.datetime.now()
print("validation epoch duration", e_time-s_time)
if last_eval_loss == 0:
last_eval_loss = self.m_mean_eval_loss
elif last_eval_loss < self.m_mean_eval_loss:
print(
"!"*10, "error val loss increase", "!"*10,
"last val loss %.4f" % last_eval_loss,
"cur val loss %.4f" % self.m_mean_eval_loss
)
overfit_indicator += 1
# if overfit_indicator > self.m_overfit_epoch_threshold:
# break
else:
print(
"last val loss %.4f" % last_eval_loss,
"cur val loss %.4f" % self.m_mean_eval_loss
)
last_eval_loss = self.m_mean_eval_loss
if best_eval_bleu < self.m_mean_eval_bleu:
print("... saving model ...")
checkpoint = {'model': network.state_dict()}
self.f_save_model(checkpoint)
best_eval_bleu = self.m_mean_eval_bleu
print("--"*10, epoch, "--"*10)
s_time = datetime.datetime.now()
# train_data.sampler.set_epoch(epoch)
self.f_train_epoch(train_data, network, optimizer, logger_obj)
# self.f_eval_train_epoch(train_data, network, optimizer, logger_obj)
e_time = datetime.datetime.now()
print("epoch duration", e_time-s_time)
if last_train_loss == 0:
last_train_loss = self.m_mean_train_loss
elif last_train_loss < self.m_mean_train_loss:
print(
"!"*10, "error training loss increase", "!"*10,
"last train loss %.4f" % last_train_loss,
"cur train loss %.4f" % self.m_mean_train_loss
)
# break
else:
print(
"last train loss %.4f" % last_train_loss,
"cur train loss %.4f" % self.m_mean_train_loss
)
last_train_loss = self.m_mean_train_loss
# if best_eval_bleu < self.m_mean_eval_bleu:
# print("... saving model ...")
# checkpoint = {'model': network.state_dict()}
# self.f_save_model(checkpoint)
# best_eval_bleu = self.m_mean_eval_bleu
s_time = datetime.datetime.now()
self.f_eval_epoch(valid_data, network, optimizer, logger_obj)
e_time = datetime.datetime.now()
print("test epoch duration", e_time-s_time)
if best_eval_bleu < self.m_mean_eval_bleu:
print("... saving model ...")
checkpoint = {'model': network.state_dict()}
self.f_save_model(checkpoint)
best_eval_bleu = self.m_mean_eval_bleu
except KeyboardInterrupt:
print("--"*20)
print("... exiting from training early")
if best_eval_bleu < self.m_mean_eval_bleu:
print("... final save ...")
checkpoint = {'model': network.state_dict()}
self.f_save_model(checkpoint)
best_eval_bleu = self.m_mean_eval_bleu
s_time = datetime.datetime.now()
self.f_eval_epoch(valid_data, network, optimizer, logger_obj)
e_time = datetime.datetime.now()
print("test epoch duration", e_time-s_time)
print(" done !!!")
def f_train_epoch(self, train_data, network, optimizer, logger_obj):
loss_s_list, loss_f_list, loss_list = [], [], []
tmp_loss_s_list, tmp_loss_f_list, tmp_loss_list = [], [], []
iteration = 0
logger_obj.f_add_output2IO(" "*10+"training the user and item encoder"+" "*10)
start_time = time.time()
# Start one epoch train of the network
network.train()
feat_loss_weight = self.m_feature_loss_lambda
for g_batch in train_data:
# print("graph_batch", g_batch)
# if i % self.m_print_interval == 0:
# print("... eval ... ", i)
graph_batch = g_batch.to(self.m_device)
logits_s, logits_f = network(graph_batch)
labels_s = graph_batch.s_label
loss = None
loss_s = None
if not self.m_soft_train:
# If not using soft label, only the gt sentences are labeled as 1
labels_s = (labels_s == 3)
loss_s = self.m_rec_loss(logits_s, labels_s.float())
else:
loss_s = self.m_rec_soft_loss(graph_batch, logits_s, labels_s)
# 1. Loss from feature prediction
labels_f = graph_batch.f_label
loss_f = self.m_rec_loss(logits_f, labels_f.float())
# 2. multi-task loss, sum of sentence loss and feature loss
loss = loss_s + feat_loss_weight*loss_f
# add current sentence prediction loss
loss_s_list.append(loss_s.item())
tmp_loss_s_list.append(loss_s.item())
# add current feature prediction loss
loss_f_list.append(loss_f.item())
tmp_loss_f_list.append(loss_f.item())
# add current loss
loss_list.append(loss.item())
tmp_loss_list.append(loss.item())
optimizer.zero_grad()
loss.backward()
# perform gradient clip
# if self.m_grad_clip:
# max_norm = 5.0
# torch.nn.utils.clip_grad_norm_(network.parameters(), max_norm)
optimizer.step()
self.m_train_iteration += 1
iteration += 1
if iteration % self.m_print_interval == 0:
logger_obj.f_add_output2IO(
"%d, loss:%.4f, sent loss:%.4f, weighted feat loss:%.4f, feat loss:%.4f" % (
iteration, np.mean(tmp_loss_list), np.mean(tmp_loss_s_list),
feat_loss_weight*np.mean(tmp_loss_f_list), np.mean(tmp_loss_f_list)
)
)
tmp_loss_s_list, tmp_loss_f_list, tmp_loss_list = [], [], []
logger_obj.f_add_output2IO(
"%d, loss:%.4f, sent loss:%.4f, weighted feat loss:%.4f, feat loss:%.4f" % (
self.m_train_iteration, np.mean(loss_list), np.mean(loss_s_list),
feat_loss_weight*np.mean(loss_f_list), np.mean(loss_f_list)
)
)
logger_obj.f_add_scalar2tensorboard("train/loss", np.mean(loss_list), self.m_train_iteration)
logger_obj.f_add_scalar2tensorboard("train/sent_loss", np.mean(loss_s_list), self.m_train_iteration)
logger_obj.f_add_scalar2tensorboard("train/feat_loss", np.mean(loss_f_list), self.m_train_iteration)
end_time = time.time()
print("+++ duration +++", end_time-start_time)
self.m_mean_train_loss = np.mean(loss_list)
def f_eval_train_epoch(self, eval_data, network, optimizer, logger_obj):
loss_list = []
recall_list, precision_list, F1_list = [], [], []
rouge_1_f_list, rouge_1_p_list, rouge_1_r_list = [], [], []
rouge_2_f_list, rouge_2_p_list, rouge_2_r_list = [], [], []
rouge_l_f_list, rouge_l_p_list, rouge_l_r_list = [], [], []
bleu_list, bleu_1_list, bleu_2_list, bleu_3_list, bleu_4_list = [], [], [], [], []
self.m_eval_iteration = self.m_train_iteration
logger_obj.f_add_output2IO(" "*10+" eval for train data"+" "*10)
rouge = Rouge()
network.eval()
topk = 3
start_time = time.time()
with torch.no_grad():
for i, (G, index) in enumerate(eval_data):
eval_flag = random.randint(1, 100)
if eval_flag != 2:
continue
G = G.to(self.m_device)
logits = network(G)
snode_id = G.filter_nodes(lambda nodes: nodes.data["dtype"] == 1)
G.nodes[snode_id].data["p"] = logits
glist = dgl.unbatch(G)
loss = self.m_rec_loss(glist)
for j in range(len(glist)):
hyps_j = []
refs_j = []
idx = index[j]
example_j = eval_data.dataset.get_example(idx)
label_sid_list_j = example_j["label_sid"]
gt_sent_num = len(label_sid_list_j)
# print("gt_sent_num", gt_sent_num)
g_j = glist[j]
snode_id_j = g_j.filter_nodes(lambda nodes: nodes.data["dtype"]==1)
N = len(snode_id_j)
p_sent_j = g_j.ndata["p"][snode_id_j]
p_sent_j = p_sent_j.view(-1)
# p_sent_j = p_sent_j.view(-1, 2)
# topk_j, pred_idx_j = torch.topk(p_sent_j[:, 1], min(topk, N))
# topk_j, topk_pred_idx_j = torch.topk(p_sent_j, min(topk, N))
topk_j, topk_pred_idx_j = torch.topk(p_sent_j, gt_sent_num)
topk_pred_snode_id_j = snode_id_j[topk_pred_idx_j]
topk_pred_sid_list_j = g_j.nodes[topk_pred_snode_id_j].data["raw_id"]
topk_pred_logits_list_j = g_j.nodes[topk_pred_snode_id_j].data["p"]
# recall_j, precision_j = get_example_recall_precision(pred_sid_list_j.cpu(), label_sid_list_j, min(topk, N))
print("topk_j", topk_j)
print("label_sid_list_j", label_sid_list_j)
print("topk_pred_idx_j", topk_pred_sid_list_j)
recall_j, precision_j = get_example_recall_precision(topk_pred_sid_list_j.cpu(), label_sid_list_j, gt_sent_num)
recall_list.append(recall_j)
precision_list.append(precision_j)
for sid_k in label_sid_list_j:
refs_j.append(self.m_sid2swords[sid_k])
for sid_k in topk_pred_sid_list_j:
hyps_j.append(self.m_sid2swords[sid_k.item()])
hyps_j = " ".join(hyps_j)
refs_j = " ".join(refs_j)
scores_j = rouge.get_scores(hyps_j, refs_j, avg=True)
rouge_1_f_list.append(scores_j["rouge-1"]["f"])
rouge_1_r_list.append(scores_j["rouge-1"]["r"])
rouge_1_p_list.append(scores_j["rouge-1"]["p"])
rouge_2_f_list.append(scores_j["rouge-2"]["f"])
rouge_2_r_list.append(scores_j["rouge-2"]["r"])
rouge_2_p_list.append(scores_j["rouge-2"]["p"])
rouge_l_f_list.append(scores_j["rouge-l"]["f"])
rouge_l_r_list.append(scores_j["rouge-l"]["r"])
rouge_l_p_list.append(scores_j["rouge-l"]["p"])
# bleu_scores_j = compute_bleu([hyps_j], [refs_j])
bleu_scores_j = compute_bleu([[refs_j.split()]], [hyps_j.split()])
bleu_list.append(bleu_scores_j)
# bleu_1_scores_j, bleu_2_scores_j, bleu_3_scores_j, bleu_4_scores_j = get_bleu([refs_j], [hyps_j])
bleu_1_scores_j, bleu_2_scores_j, bleu_3_scores_j, bleu_4_scores_j = get_sentence_bleu([refs_j.split()], hyps_j.split())
bleu_1_list.append(bleu_1_scores_j)
bleu_2_list.append(bleu_2_scores_j)
bleu_3_list.append(bleu_3_scores_j)
bleu_4_list.append(bleu_4_scores_j)
loss_list.append(loss.item())
end_time = time.time()
duration = end_time - start_time
print("... one epoch", duration)
logger_obj.f_add_scalar2tensorboard("eval/loss", np.mean(loss_list), self.m_eval_iteration)
# logger_obj.f_add_scalar2tensorboard("eval/recall", np.mean(recall_list), self.m_eval_iteration)
self.m_mean_eval_loss = np.mean(loss_list)
self.m_mean_eval_recall = np.mean(recall_list)
self.m_mean_eval_precision = np.mean(precision_list)
self.m_mean_eval_rouge_1_f = np.mean(rouge_1_f_list)
self.m_mean_eval_rouge_1_r = np.mean(rouge_1_r_list)
self.m_mean_eval_rouge_1_p = np.mean(rouge_1_p_list)
self.m_mean_eval_rouge_2_f = np.mean(rouge_2_f_list)
self.m_mean_eval_rouge_2_r = np.mean(rouge_2_r_list)
self.m_mean_eval_rouge_2_p = np.mean(rouge_2_p_list)
self.m_mean_eval_rouge_l_f = np.mean(rouge_l_f_list)
self.m_mean_eval_rouge_l_r = np.mean(rouge_l_r_list)
self.m_mean_eval_rouge_l_p = np.mean(rouge_l_p_list)
self.m_mean_eval_bleu = np.mean(bleu_list)
self.m_mean_eval_bleu_1 = np.mean(bleu_1_list)
self.m_mean_eval_bleu_2 = np.mean(bleu_2_list)
self.m_mean_eval_bleu_3 = np.mean(bleu_3_list)
self.m_mean_eval_bleu_4 = np.mean(bleu_4_list)
logger_obj.f_add_output2IO("%d, NLL_loss:%.4f" % (self.m_eval_iteration, self.m_mean_eval_loss))
logger_obj.f_add_output2IO("recall@%d:%.4f" % (topk, self.m_mean_eval_recall))
logger_obj.f_add_output2IO("precision@%d:%.4f" % (topk, self.m_mean_eval_precision))
logger_obj.f_add_output2IO(
"rouge-1:|f:%.4f |p:%.4f |r:%.4f, rouge-2:|f:%.4f |p:%.4f |r:%.4f, rouge-l:|f:%.4f |p:%.4f |r:%.4f" % (
self.m_mean_eval_rouge_1_f, self.m_mean_eval_rouge_1_p, self.m_mean_eval_rouge_1_r,
self.m_mean_eval_rouge_2_f, self.m_mean_eval_rouge_2_p, self.m_mean_eval_rouge_2_r,
self.m_mean_eval_rouge_l_f, self.m_mean_eval_rouge_l_p, self.m_mean_eval_rouge_l_r))
logger_obj.f_add_output2IO("bleu:%.4f" % (self.m_mean_eval_bleu))
logger_obj.f_add_output2IO("bleu-1:%.4f" % (self.m_mean_eval_bleu_1))
logger_obj.f_add_output2IO("bleu-2:%.4f" % (self.m_mean_eval_bleu_2))
logger_obj.f_add_output2IO("bleu-3:%.4f" % (self.m_mean_eval_bleu_3))
logger_obj.f_add_output2IO("bleu-4:%.4f" % (self.m_mean_eval_bleu_4))
network.train()
def f_eval_epoch(self, eval_data, network, optimizer, logger_obj):
# loss_list = []
# recall_list, precision_list, F1_list = [], [], []
rouge_1_f_list, rouge_1_p_list, rouge_1_r_list = [], [], []
rouge_2_f_list, rouge_2_p_list, rouge_2_r_list = [], [], []
rouge_l_f_list, rouge_l_p_list, rouge_l_r_list = [], [], []
bleu_list, bleu_1_list, bleu_2_list, bleu_3_list, bleu_4_list = [], [], [], [], []
self.m_eval_iteration = self.m_train_iteration
logger_obj.f_add_output2IO(" "*10+" eval the user and item encoder"+" "*10)
rouge = Rouge()
# topk = 3
# start one epoch validation
network.eval()
start_time = time.time()
i = 0 # count batch
with torch.no_grad():
for graph_batch in eval_data:
# eval_flag = random.randint(1,5)
# if eval_flag != 2:
# continue
# start_time = time.time()
# print("... eval ", i)
if i % 100 == 0:
print("... eval ... ", i)
i += 1
graph_batch = graph_batch.to(self.m_device)
# #### logits: batch_size*max_sen_num ####
s_logits, sids, s_masks, target_sids, _, _, _, _, _ = network.eval_forward(graph_batch)
batch_size = s_logits.size(0)
# get batch userid and itemid
uid_batch = graph_batch.u_rawid
iid_batch = graph_batch.i_rawid
# map uid to userid and iid to itemid
userid_batch = [self.m_uid2user[uid_batch[j].item()] for j in range(batch_size)]
itemid_batch = [self.m_iid2item[iid_batch[j].item()] for j in range(batch_size)]
# loss = self.m_rec_loss(glist)
# loss_list.append(loss.item())
# #### topk sentence ####
# logits: batch_size*topk_sent
# #### topk sentence index ####
# pred_sids: batch_size*topk_sent
if self.m_valid_trigram:
# apply trigram blocking for validation
s_topk_logits, s_pred_sids = self.trigram_blocking_sent_prediction(
s_logits, sids, s_masks, batch_size, topk=self.m_select_topk_s, pool_size=None
)
elif self.m_valid_trigram_feat:
# apply trigram + feature unigram blocking for validation
s_topk_logits, s_pred_sids = self.trigram_unigram_blocking_sent_prediction(
s_logits, sids, s_masks, n_win=3, topk=self.m_select_topk_s, pool_size=None
)
else:
# apply original topk selection for validation
s_topk_logits, s_pred_sids = self.origin_blocking_sent_prediction(
s_logits, sids, s_masks, topk=self.m_select_topk_s
)
# topk_logits, topk_pred_snids = torch.topk(s_logits, topk, dim=1)
# pred_sids = sids.gather(dim=1, index=topk_pred_snids)
for j in range(batch_size):
refs_j = []
hyps_j = []
true_userid_j = userid_batch[j]
true_itemid_j = itemid_batch[j]
# for sid_k in target_sids[j]:
# refs_j.append(self.m_sid2swords[sid_k.item()])
# refs_j = " ".join(refs_j)
for sid_k in s_pred_sids[j]:
hyps_j.append(self.m_sid2swords[sid_k.item()])
hyps_j = " ".join(hyps_j)
true_combined_ref = self.d_testset_combined[true_userid_j][true_itemid_j]
# scores_j = rouge.get_scores(hyps_j, refs_j, avg=True)
scores_j = rouge.get_scores(hyps_j, true_combined_ref, avg=True)
rouge_1_f_list.append(scores_j["rouge-1"]["f"])
rouge_1_r_list.append(scores_j["rouge-1"]["r"])
rouge_1_p_list.append(scores_j["rouge-1"]["p"])
rouge_2_f_list.append(scores_j["rouge-2"]["f"])
rouge_2_r_list.append(scores_j["rouge-2"]["r"])
rouge_2_p_list.append(scores_j["rouge-2"]["p"])
rouge_l_f_list.append(scores_j["rouge-l"]["f"])
rouge_l_r_list.append(scores_j["rouge-l"]["r"])
rouge_l_p_list.append(scores_j["rouge-l"]["p"])
# bleu_scores_j = compute_bleu([[refs_j.split()]], [hyps_j.split()])
bleu_scores_j = compute_bleu([[true_combined_ref.split()]], [hyps_j.split()])
bleu_list.append(bleu_scores_j)
# bleu_1_scores_j, bleu_2_scores_j, bleu_3_scores_j, bleu_4_scores_j = get_sentence_bleu(
# [refs_j.split()], hyps_j.split())
bleu_1_scores_j, bleu_2_scores_j, bleu_3_scores_j, bleu_4_scores_j = get_sentence_bleu(
[true_combined_ref.split()], hyps_j.split())
bleu_1_list.append(bleu_1_scores_j)
bleu_2_list.append(bleu_2_scores_j)
bleu_3_list.append(bleu_3_scores_j)
bleu_4_list.append(bleu_4_scores_j)
end_time = time.time()
duration = end_time - start_time
print("... one epoch", duration)
# logger_obj.f_add_scalar2tensorboard("eval/loss", np.mean(loss_list), self.m_eval_iteration)
# logger_obj.f_add_scalar2tensorboard("eval/recall", np.mean(recall_list), self.m_eval_iteration)
# self.m_mean_eval_loss = np.mean(loss_list)
# self.m_mean_eval_recall = np.mean(recall_list)
# self.m_mean_eval_precision = np.mean(precision_list)
self.m_mean_eval_rouge_1_f = np.mean(rouge_1_f_list)
self.m_mean_eval_rouge_1_r = np.mean(rouge_1_r_list)
self.m_mean_eval_rouge_1_p = np.mean(rouge_1_p_list)
self.m_mean_eval_rouge_2_f = np.mean(rouge_2_f_list)
self.m_mean_eval_rouge_2_r = np.mean(rouge_2_r_list)
self.m_mean_eval_rouge_2_p =
|
np.mean(rouge_2_p_list)
|
numpy.mean
|
import numpy as np
from scipy.signal import find_peaks, stft, lfilter, butter, welch
from plotly.subplots import make_subplots
from plotly.colors import n_colors
import plotly.graph_objects as go
from scipy.interpolate import interp1d
class BVPsignal:
"""
Manage (multi-channel, row-wise) BVP signals
"""
nFFT = 2048 # freq. resolution for STFTs
step = 1 # step in seconds
def __init__(self, data, fs, startTime=0, minHz=0.75, maxHz=4., verb=False):
if len(data.shape) == 1:
self.data = data.reshape(1,-1) # 2D array raw-wise
else:
self.data = data
self.numChls = self.data.shape[0] # num channels
self.fs = fs # sample rate
self.startTime = startTime
self.verb = verb
self.minHz = minHz
self.maxHz = maxHz
def getChunk(startTime, winsize=None, numSample=None):
assert startTime >= self.startTime, "Start time error!"
N = self.data.shape[1]
fs = self.fs
Nstart = int(fs*startTime)
# -- winsize > 0
if winsize:
stopTime = startTime + winsize
Nstop = np.min([int(fs*stopTime),N])
# -- numSample > 0
if numSample:
Nstop = np.min([numSample,N])
return self.data[0,Nstart:Nstop]
def hps(self, spect, d=3):
if spect.ndim == 2:
n_win = spect.shape[1]
new_spect = np.zeros_like(spect)
for w in range(n_win):
curr_w = spect[:,w]
w_down_z = np.zeros_like(curr_w)
w_down = curr_w[::d]
w_down_z[0:len(w_down)] = w_down
w_hps = np.multiply(curr_w, w_down_z)
new_spect[:, w] = w_hps
return new_spect
elif spect.ndim == 1:
s_down_z = np.zeros_like(spect)
s_down = spect[::d]
s_down_z[0:len(s_down)] = s_down
w_hps = np.multiply(spect, s_down_z)
return w_hps
else:
raise ValueError("Wrong Dimensionality of the Spectrogram for the HPS")
def spectrogram(self, winsize=5, use_hps=False):
"""
Compute the BVP signal spectrogram restricted to the
band 42-240 BPM by using winsize (in sec) samples.
"""
# -- spect. Z is 3-dim: Z[#chnls, #freqs, #times]
F, T, Z = stft(self.data,
self.fs,
nperseg=self.fs*winsize,
noverlap=self.fs*(winsize-self.step),
boundary='even',
nfft=self.nFFT)
Z = np.squeeze(Z, axis=0)
# -- freq subband (0.75 Hz - 4.0 Hz)
minHz = 0.75
maxHz = 4.0
band = np.argwhere((F > minHz) & (F < maxHz)).flatten()
self.spect = np.abs(Z[band,:]) # spectrum magnitude
self.freqs = 60*F[band] # spectrum freq in bpm
self.times = T # spectrum times
if use_hps:
spect_hps = self.hps(self.spect)
# -- BPM estimate by spectrum
self.bpm = self.freqs[np.argmax(spect_hps,axis=0)]
else:
# -- BPM estimate by spectrum
self.bpm = self.freqs[np.argmax(self.spect,axis=0)]
def getBPM(self, winsize=5):
self.spectrogram(winsize, use_hps=False)
return self.bpm, self.times
def PSD2BPM(self, chooseBest=True, use_hps=False):
"""
Compute power spectral density using Welch’s method and estimate
BPMs from video frames
"""
# -- interpolation for less than 256 samples
c,n = self.data.shape
if n < 256:
seglength = n
overlap = int(0.8*n) # fixed overlapping
else:
seglength = 256
overlap = 200
# -- periodogram by Welch
F, P = welch(self.data, nperseg=seglength, noverlap=overlap, window='hamming',fs=self.fs, nfft=self.nFFT)
# -- freq subband (0.75 Hz - 4.0 Hz)
band = np.argwhere((F > self.minHz) & (F < self.maxHz)).flatten()
self.Pfreqs = 60*F[band]
self.Power = P[:,band]
# -- if c = 3 choose that with the best SNR
if chooseBest:
winner = 0
lobes = self.PDSrippleAnalysis(ch=0)
SNR = lobes[-1]/lobes[-2]
if c == 3:
lobes = self.PDSrippleAnalysis(ch=1)
SNR1 = lobes[-1]/lobes[-2]
if SNR1 > SNR:
SNR = SNR1
winner = 1
lobes = self.PDSrippleAnalysis(ch=2)
SNR1 = lobes[-1]/lobes[-2]
if SNR1 > SNR:
SNR = SNR1
winner = 2
self.Power = self.Power[winner].reshape(1,-1)
# TODO: eliminare?
if use_hps:
p = self.Power[0]
phps = self.hps(p)
'''import matplotlib.pyplot as plt
plt.plot(p)
plt.figure()
plt.plot(phps)
plt.show()'''
Pmax = np.argmax(phps) # power max
self.bpm = np.array([self.Pfreqs[Pmax]]) # freq max
else:
# -- BPM estimate by PSD
Pmax = np.argmax(self.Power, axis=1) # power max
self.bpm = self.Pfreqs[Pmax] # freq max
if '3' in str(self.verb):
lobes = self.PDSrippleAnalysis()
self.displayPSD(lobe1=lobes[-1], lobe2=lobes[-2])
def autocorr(self):
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
# TODO: to handle all channels
x = self.data[0,:]
plot_acf(x)
plt.show()
plot_pacf(x)
plt.show()
def displaySpectrum(self, display=False, dims=3):
"""Show the spectrogram of the BVP signal"""
# -- check if bpm exists
try:
bpm = self.bpm
except AttributeError:
self.spectrogram()
bpm = self.bpm
t = self.times
f = self.freqs
S = self.spect
fig = go.Figure()
fig.add_trace(go.Heatmap(z=S, x=t, y=f, colorscale="viridis"))
fig.add_trace(go.Scatter(x=t, y=bpm, name='Frequency Domain', line=dict(color='red', width=2)))
fig.update_layout(autosize=False, height=420, showlegend=True,
title='Spectrogram of the BVP signal',
xaxis_title='Time (sec)',
yaxis_title='BPM (60*Hz)',
legend=dict(
x=0,
y=1,
traceorder="normal",
font=dict(
family="sans-serif",
size=12,
color="black"),
bgcolor="LightSteelBlue",
bordercolor="Black",
borderwidth=2)
)
fig.show()
def findPeaks(self, distance=None, height=None):
# -- take the first channel
x = self.data[0].flatten()
if distance is None:
distance = self.fs/2
if height is None:
height = np.mean(x)
# -- find peaks with the specified params
self.peaks, _ = find_peaks(x, distance=distance, height=height)
self.peaksTimes = self.peaks/self.fs
self.bpmPEAKS = 60.0/np.diff(self.peaksTimes)
def plotBPMPeaks(self, height=None, width=None):
"""
Plot the the BVP signal and peak marks
"""
# -- find peaks
try:
peaks = self.peaks
except AttributeError:
self.findPeaks()
peaks = self.peaks
#-- signals
y = self.data[0]
n = y.shape[0]
startTime = self.startTime
stopTime = startTime+n/self.fs
x = np.linspace(startTime, stopTime, num=n, endpoint=False)
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=y, name="BVP"))
fig.add_trace(go.Scatter(x=x[peaks], y=y[peaks], mode='markers', name="Peaks"))
if not height:
height=400
if not width:
width=800
fig.update_layout(height=height, width=width, title="BVP signal + peaks",
font=dict(
family="Courier New, monospace",
size=14,
color="#7f7f7f"))
fig.show()
def plot(self, title="BVP signal", height=400, width=800):
"""
Plot the the BVP signal (multiple channels)
"""
#-- signals
y = self.data
c,n = y.shape
startTime = self.startTime
stopTime = startTime+n/self.fs
x =
|
np.linspace(startTime, stopTime, num=n, endpoint=False)
|
numpy.linspace
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visualizing COVID-19 cases in Ontario
# By <NAME>
#
# ### How to run this code
# <font color='red'>In the above ribbon, click **cell** and then click **Run All**</font>
# +
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.widgets import CheckButtons
import requests
from io import StringIO
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# %matplotlib inline
# -
# ## Load the data from the data.ontario.ca website
# Read it into a pandas dataframe, which is like a table)
data_url = 'https://data.ontario.ca/dataset/f4112442-bdc8-45d2-be3c-12efae72fb27/resource/455fd63b-603d-4608-8216-7d8647f43350/download/conposcovidloc.csv'
response = requests.get(data_url)
csv_string = response.content.decode('utf-8')
cases = pd.read_csv(StringIO(csv_string))
cases.head(4) # see the first few columns
# ## Define the population of each phu
phu_populations = {'Algoma Public Health Unit': 114434,
'Brant County Health Unit': 155203,
'Chatham-Kent Health Unit': 106317,
'Durham Region Health Department': 712402,
'Eastern Ontario Health Unit': 208711,
'Grey Bruce Health Unit': 169884,
'Haldimand-Norfolk Health Unit': 114081,
'Haliburton, Kawartha, Pine Ridge District Health Unit': 188937,
'Halton Region Health Department': 619087,
'Hamilton Public Health Services': 592163,
'Hastings and Prince Edward Counties Health Unit': 168493,
'Huron Perth District Health Unit': 139757,
'Kingston, Frontenac and Lennox & Addington Public Health': 212719,
'Lambton Public Health': 130964,
'Leeds, Grenville and Lanark District Health Unit': 173170,
'Middlesex-London Health Unit': 507524,
'Niagara Region Public Health Department': 472485,
'North Bay Parry Sound District Health Unit': 129752,
'Northwestern Health Unit': 87675,
'Ottawa Public Health': 1054656,
'Peel Public Health': 1605952,
'Peterborough Public Health': 147977,
'Porcupine Health Unit': 83441,
'Region of Waterloo, Public Health': 584361,
'Renfrew County and District Health Unit': 108631,
'Simcoe Muskoka District Health Unit': 599589,
'Southwestern Public Health': 211498,
'Sudbury & District Health Unit': 199023,
'Thunder Bay District Health Unit': 149960,
'Timiskaming Health Unit': 32689,
'Toronto Public Health': 3120358,
'Wellington-Dufferin-Guelph Public Health': 311908,
'Windsor-Essex County Health Unit': 424830,
'York Region Public Health Services': 1225797}
# ## Timeline
# ### turn the dates into integers, which are easier to deal with
#
cases['Date'] = cases['Accurate_Episode_Date']
cases = cases.dropna(subset=['Date'])
cases['Date'] = [int(str(s).replace('-', '')) for s in cases['Date']]
# ### get a list of all dates until today
# We want to begin at Feb 15, 2020, but we're looking at cases in 14-day periods, so we'll start the timeline 14 days before (inclusive of) Feb 15.
d1 = datetime.date(2020,2,15)
d2 = datetime.date.today()
datetimes = [(d1 + datetime.timedelta(days=x)) for x in range((d2-d1).days + 1)]
dates = [int(str(s).replace('-', '')) for s in datetimes]
# ## Get the relevant data
# I.e., get the data for each PHU and day from the *cases* dataframe, and store it in a new dataframe called *data*. *data* has a row for each day, and a column for each PHU
#
# +
# initialize dataframe
data = pd.DataFrame()
data['Date'] = dates
days_in_range = 14
overall_begin_date = 20200215 # february 15 2020
phus =
|
np.unique(cases['Reporting_PHU'])
|
numpy.unique
|
import os
import base64
import requests
import glob
import time
import multiprocessing
import numpy as np
from itertools import chain, islice
import ujson
import logging
import shutil
import cv2
from tqdm import tqdm
# cos_sim
from sklearn.metrics.pairwise import cosine_similarity
dir_path = os.path.dirname(os.path.realpath(__file__))
test_cat = os.path.join(dir_path, 'images')
session = requests.Session()
session.trust_env = False
logging.basicConfig(
level='INFO',
format='%(asctime)s %(levelname)s - %(message)s',
datefmt='[%H:%M:%S]',
)
def file2base64(path):
with open(path, mode='rb') as fl:
encoded = base64.b64encode(fl.read()).decode('ascii')
return encoded
def save_crop(data, name):
img = base64.b64decode(data)
with open(name, mode="wb") as fl:
fl.write(img)
fl.close()
def extract_vecs(task):
target = task[0]
server = task[1]
images = dict(data=target)
req = dict(images=images,
threshold=0.6,
extract_ga=True,
extract_embedding=True,
return_face_data=True,
embed_only=False, # If set to true API expects each image to be 112x112 face crop
limit_faces=0, # Limit maximum number of processed faces, 0 = no limit
api_ver='2'
)
resp = session.post(server, json=req, timeout=120)
content = ujson.loads(resp.content)
took = content.get('took')
status = content.get('status')
images = content.get('data')
counts = [len(e.get('faces', [])) for e in images]
a_recode=[]
for im in images:
faces = im.get('faces', [])
return faces
def to_chunks(iterable, size=10):
iterator = iter(iterable)
for first in iterator:
yield chain([first], islice(iterator, size - 1))
if __name__ == "__main__":
ims = 'src/api_trt/test_images'
server = 'http://localhost:18081/extract'
if os.path.exists('crops'):
shutil.rmtree('crops')
os.mkdir('crops')
speeds = []
# Test cam
cap = cv2.VideoCapture(4)
while True:
ret, image = cap.read()
target = [base64.b64encode(cv2.imencode('.jpg', image)[1]).decode()]
print("done pre-prosscess")
target_chunks = to_chunks(target, 1)
task_set = [[list(chunk), server] for i, chunk in enumerate(target_chunks)]
task_set = list(task_set)
print('Encoding images.... Finished')
pool = multiprocessing.Pool(2)
t0 = time.time()
r = pool.map(extract_vecs, task_set)
if r != None:
for i, face in enumerate(r[0]):
bbox = face.get('bbox')
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)
t1 = time.time()
took = t1 - t0
speed = 1 / took
speeds.append(speed)
print("Took: {} ({} im/sec)".format(took, speed))
pool.close()
mean = np.mean(speeds)
median = np.median(speeds)
print(f'mean: {mean} im/sec\n'
f'median: {median}\n'
f'min: {np.min(speeds)}\n'
f'max: {
|
np.max(speeds)
|
numpy.max
|
""" Random Interval Spectral Forest (RISE).
Implementation of Deng's Time Series Forest, with minor changes
"""
__author__ = "<NAME>"
__all__ = ["RandomIntervalSpectralForest","acf","matrix_acf","ps"]
import numpy as np
import pandas as pd
import math
from numpy import random
from copy import deepcopy
from sklearn.ensemble.forest import ForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils.multiclass import class_distribution
class RandomIntervalSpectralForest(ForestClassifier):
"""Random Interval Spectral Forest (RISE).
Random Interval Spectral Forest: stripped down implementation of RISE from Lines 2018:
@article
{lines17hive-cote,
author = {<NAME>, <NAME> and <NAME>},
title = {Time Series Classification with HIVE-COTE: The Hierarchical Vote Collective of Transformation-Based Ensembles},
journal = {ACM Transactions on Knowledge and Data Engineering},
volume = {12},
number= {5},
year = {2018}
Overview: Input n series length m
for each tree
sample a random intervals
take the ACF and PS over this interval, and concatenate features
build tree on new features
ensemble the trees through averaging probabilities.
Need to have a minimum interval for each tree
This is from the python github.
Parameters
----------
n_trees : int, ensemble size, optional (default = 200)
random_state : int, seed for random, integer, optional (default to no seed)
min_interval : int, minimum width of an interval, optional (default = 16)
acf_lag : int, maximum number of autocorellation terms to use (default =100)
acf_min_values : int, never use fewer than this number of terms to fnd a correlation (default =4)
Attributes
----------
n_classes : int, extracted from the data
classifiers : array of shape = [n_trees] of DecisionTree classifiers
intervals : array of shape = [n_trees][2] stores indexes of start and end points for all classifiers
TO DO: handle missing values, unequal length series and multivariate problems
"""
def __init__(self,
n_trees=200,
random_state=None,
min_interval=16,
acf_lag=100,
acf_min_values=4
):
super(RandomIntervalSpectralForest, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_trees)
self.n_trees=n_trees
self.random_state = random_state
random.seed(random_state)
self.min_interval=min_interval
self.acf_lag=acf_lag
self.acf_min_values=acf_min_values
# These are all set in fit
self.n_classes = 0
self.series_length = 0
self.classifiers = []
self.intervals=[]
self.lags=[]
self.classes_ = []
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y) using random intervals and spectral features
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances,series_length] or shape = [n_instances,n_columns]
The training input samples. If a Pandas data frame is passed it must have a single column (i.e. univariate
classification. RISE has no bespoke method for multivariate classification as yet.
y : array-like, shape = [n_instances] The class labels.
Returns
-------
self : object
"""
if isinstance(X, pd.DataFrame):
if X.shape[1] > 1:
raise TypeError("RISE cannot handle multivariate problems yet")
elif isinstance(X.iloc[0, 0], pd.Series):
X = np.asarray([a.values for a in X.iloc[:, 0]])
else:
raise TypeError(
"Input should either be a 2d numpy array, or a pandas dataframe with a single column of Series objects (TSF cannot yet handle multivariate problems")
n_instances, self.series_length = X.shape
self.n_classes = np.unique(y).shape[0]
self.classes_ = class_distribution(np.asarray(y).reshape(-1, 1))[0][0]
self.intervals=np.zeros((self.n_trees, 2), dtype=int)
self.intervals[0][0] = 0
self.intervals[0][1] = self.series_length
for i in range(1, self.n_trees):
self.intervals[i][0]=random.randint(self.series_length - self.min_interval)
self.intervals[i][1]=random.randint(self.intervals[i][0] + self.min_interval, self.series_length)
# Check lag against global properties
if self.acf_lag > self.series_length-self.acf_min_values:
self.acf_lag = self.series_length - self.acf_min_values
if self.acf_lag < 0:
self.acf_lag = 1
self.lags=np.zeros(self.n_trees, dtype=int)
for i in range(0, self.n_trees):
temp_lag=self.acf_lag
if temp_lag > self.intervals[i][1]-self.intervals[i][0]-self.acf_min_values:
temp_lag = self.intervals[i][1] - self.intervals[i][0] - self.acf_min_values
if temp_lag < 0:
temp_lag = 1
self.lags[i] = int(temp_lag)
acf_x = np.empty(shape=(n_instances, self.lags[i]))
ps_len = (self.intervals[i][1] - self.intervals[i][0]) / 2
ps_x = np.empty(shape=(n_instances, int(ps_len)))
for j in range(0, n_instances):
acf_x[j] = acf(X[j,self.intervals[i][0]:self.intervals[i][1]], temp_lag)
ps_x[j] = ps(X[j, self.intervals[i][0]:self.intervals[i][1]])
transformed_x = np.concatenate((acf_x,ps_x),axis=1)
# transformed_x=acf_x
tree = deepcopy(self.base_estimator)
tree.fit(transformed_x, y)
self.classifiers.append(tree)
return self
def predict(self, X):
"""
Find predictions for all cases in X. Built on top of predict_proba
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances, n_columns] or a data frame.
If a Pandas data frame is passed,
Returns
-------
output : array of shape = [n_instances]
"""
probs=self.predict_proba(X)
return [self.classes_[np.argmax(prob)] for prob in probs]
def predict_proba(self, X):
"""
Find probability estimates for each class for all cases in X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances, n_columns]
The training input samples. If a Pandas data frame is passed,
Local variables
----------
n_samps : int, number of cases to classify
n_columns : int, number of attributes in X, must match _num_atts determined in fit
Returns
-------
output : array of shape = [n_instances, n_classes] of probabilities
"""
if isinstance(X, pd.DataFrame):
if X.shape[1] > 1:
raise TypeError("RISE cannot handle multivariate problems yet")
elif isinstance(X.iloc[0, 0], pd.Series):
X = np.asarray([a.values for a in X.iloc[:, 0]])
else:
raise TypeError(
"Input should either be a 2d numpy array, or a pandas dataframe with a single column of Series objects (TSF cannot yet handle multivariate problems")
rows,cols=X.shape
#HERE Do transform again
n_cases, n_columns = X.shape
if n_columns != self.series_length:
raise TypeError(" ERROR number of attributes in the train does not match that in the test data")
sums = np.zeros((X.shape[0],self.n_classes), dtype=np.float64)
for i in range(0, self.n_trees):
acf_x = np.empty(shape=(n_cases, self.lags[i]))
ps_len=(self.intervals[i][1] - self.intervals[i][0]) / 2
ps_x = np.empty(shape=(n_cases,int(ps_len)))
for j in range(0, n_cases):
acf_x[j] = acf(X[j, self.intervals[i][0]:self.intervals[i][1]], self.lags[i])
ps_x[j] = ps(X[j, self.intervals[i][0]:self.intervals[i][1]])
transformed_x=np.concatenate((acf_x,ps_x),axis=1)
sums += self.classifiers[i].predict_proba(transformed_x)
output = sums / (np.ones(self.n_classes) * self.n_estimators)
return output
def acf(x, max_lag):
""" autocorrelation function transform, currently calculated using standard stats method.
We could use inverse of power spectrum, especially given we already have found it, worth testing for speed and correctness
HOWEVER, for long series, it may not give much benefit, as we do not use that many ACF terms
Parameters
----------
x : array-like shape = [interval_width]
max_lag: int, number of ACF terms to find
Return
----------
y : array-like shape = [max_lag]
"""
y = np.zeros(max_lag)
length=len(x)
for lag in range(1, max_lag + 1):
# Do it ourselves to avoid zero variance warnings
s1=np.sum(x[:-lag])
ss1=np.sum(np.square(x[:-lag]))
s2=np.sum(x[lag:])
ss2=np.sum(np.square(x[lag:]))
s1=s1/(length-lag)
s2 = s2 / (length - lag)
y[lag-1] = np.sum((x[:-lag]-s1)*(x[lag:]-s2))
y[lag - 1] = y[lag - 1]/ (length - lag)
v1 = ss1/(length - lag)-s1*s1
v2 = ss2/(length-lag)-s2*s2
if v1 <= 0.000000001 and v2 <= 0.000000001: # Both zero variance, so must be 100% correlated
y[lag - 1]=1
elif v1 <= 0.000000001 or v2 <= 0.000000001: # One zero variance the other not
y[lag - 1] = 0
else:
y[lag - 1] = y[lag - 1]/(math.sqrt(v1)*math.sqrt(v2))
return
|
np.array(y)
|
numpy.array
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Removes Tensorflow debuggin ouputs
import tensorflow as tf
tf.get_logger().setLevel('INFO') # Removes Tensorflow debugging ouputs
from auto_cnn.gan import AutoCNN
from sklearn.metrics import roc_auc_score as roc_auc
import statistics
import random
import numpy as np
import os
import cv2
import json
from os.path import isfile, join
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from tensorflow.keras.callbacks import Callback
import pandas as pd
# Sets the random seeds to make testing more consisent
random.seed(12)
tf.random.set_seed(12)
class RocCallback(Callback):
def __init__(self, training_data, validation_data):
self.x = training_data[0]
self.y = training_data[1]
self.x_val = validation_data[0]
self.y_val = validation_data[1]
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
y_pred_train = self.model.predict(self.x)
roc_train = roc_auc_score(self.y, y_pred_train)
y_pred_val = self.model.predict(self.x_val)
roc_val = roc_auc_score(self.y_val, y_pred_val)
print('\rroc-auc_train: %s - roc-auc_val: %s' % (str(round(roc_train, 4)), str(round(roc_val, 4))),
end=100 * ' ' + '\n')
return
def on_batch_begin(self, batch, logs={}):
return
def on_batch_end(self, batch, logs={}):
return
def auc_f(truth, prediction):
roc_auc_values = []
for predict, true in zip(prediction, truth):
y_true = [0 for _ in range(3)]
y_true[true[0]] = 1
roc_auc_score = roc_auc(y_true=y_true,
y_score=predict)
roc_auc_values.append(roc_auc_score)
roc_auc_value = statistics.mean(roc_auc_values)
return roc_auc_value
def from_json(file_path):
df_train = pd.read_json(file_path)
Xtrain = get_scaled_imgs(df_train)
Ytrain = np.array(df_train['is_iceberg'])
df_train.inc_angle = df_train.inc_angle.replace('na', 0)
idx_tr = np.where(df_train.inc_angle > 0)
Ytrain = Ytrain[idx_tr[0]]
Xtrain = Xtrain[idx_tr[0], ...]
Ytrain_new = []
for y in Ytrain:
new_Y = []
new_Y.append(y)
Ytrain_new.append(y)
Ytrain = np.array(Ytrain_new)
Xtrain, Xtest, Ytrain, Ytest = train_test_split(Xtrain, Ytrain, random_state=1, train_size=0.05)
Xtr_more = get_more_images(Xtrain)
Ytr_more = np.concatenate((Ytrain, Ytrain, Ytrain))
return (Xtr_more, Ytr_more), (Xtest, Ytest)
# return Xtrain, Ytrain, Xtest, Ytest
def get_scaled_imgs(df):
imgs = []
for i, row in df.iterrows():
# make 75x75 image
band_1 = np.array(row['band_1']).reshape(75, 75)
band_2 = np.array(row['band_2']).reshape(75, 75)
band_3 = band_1 + band_2 # plus since log(x*y) = log(x) + log(y)
# Rescale
a = (band_1 - band_1.mean()) / (band_1.max() - band_1.min())
b = (band_2 - band_2.mean()) / (band_2.max() - band_2.min())
c = (band_3 - band_3.mean()) / (band_3.max() - band_3.min())
im = np.dstack((a, b, c))
im = cv2.resize(im, (72, 72), interpolation=cv2.INTER_AREA)
imgs.append(im)
return np.array(imgs)
def get_more_images(imgs):
more_images = []
vert_flip_imgs = []
hori_flip_imgs = []
for i in range(0, imgs.shape[0]):
a = imgs[i, :, :, 0]
b = imgs[i, :, :, 1]
c = imgs[i, :, :, 2]
av = cv2.flip(a, 1)
ah = cv2.flip(a, 0)
bv = cv2.flip(b, 1)
bh = cv2.flip(b, 0)
cv = cv2.flip(c, 1)
ch = cv2.flip(c, 0)
vert_flip_imgs.append(
|
np.dstack((av, bv, cv))
|
numpy.dstack
|
#!usr/bin/python3
# -*- coding : utf8 -*-
import random;
import numpy as np;
import scipy.sparse as sp;
from sklearn.model_selection import KFold;
from sklearn.preprocessing import StandardScaler, LabelEncoder;
def sparse_or_not(x, is_matrix=False):
"""
Convert sparse vector or sparse matrix to ndarray
If sparse, convert it to ndarray and return it
Else, return it
Parameters :
-----------
x : sparse or ndarray
Sparse vector or sparse matrix or ndarray
is_matrix : boolean, default=False
Tell us if x is an vector or matrix
Return :
-------
y : ndarray
"""
if sp.issparse(x):
if is_matrix:
return x.toarray();
else:
return x.toarray().reshape((-1, ));
else:
return x;
def rand_initialisation(X, n_clusters, seed, cste):
"""
Initialize centroids from X randomly
Parameters :
-----------
X : ndarray of shape (n_samples, n_features)
Samples to be clustered
n_clusters : int
Number of clusters / Number of centroids
seed : int
For reproductibility
cste : int
To add to seed for reproductibility
Return :
-------
centroids : ndarray of shape (n_clusters, n_features)
Initial centroids
"""
index = [];
repeat = n_clusters;
# Take one index
if seed is None:
idx = np.random.RandomState().randint(X.shape[0]);
else:
idx = np.random.RandomState(seed+cste).randint(X.shape[0]);
while repeat != 0:
# Let's check that we haven't taken this index yet
if idx not in index:
index.append(idx);
repeat = repeat - 1;
if seed is not None:
idx = np.random.RandomState(seed+cste+repeat).randint(X.shape[0]);
return sparse_or_not(X[index], is_matrix=True);
def kmeans_plus_plus(X, n_clusters, seed, cste):
"""
Initialize centroids from X according heuristic kmeans++
Parameters :
-----------
X : ndarray of shape (n_samples, n_features)
Samples to be clustered
n_clusters : int
Number of clusters / Number of centroids
seed : int
For reproductibility
cste : int
To add to seed for reproductibility
Return :
-------
centroids : ndarray of shape (n_clusters, n_features)
Initial centroids
"""
n_samples, n_features = X.shape;
centroids = [];
# First centroid is randomly selected from the data points X
if seed is None:
centroids.append( sparse_or_not(X[np.random.RandomState()
.randint(X.shape[0])]) );
else:
centroids.append( sparse_or_not(X[np.random.RandomState(seed+cste)
.randint(X.shape[0])]) );
# Let's select remaining "n_clusters - 1" centroids
for cluster_idx in range(1, n_clusters):
# Array that will store distances of data points from nearest centroid
distances = np.zeros((n_samples, ));
for sample_idx in range(n_samples):
minimum = np.inf;
# Let's compute distance of 'point' from each of the previously
# selected centroid and store the minimum distance
for j in range(0, len(centroids)):
dist = np.square( np.linalg.norm(sparse_or_not(X[sample_idx]) - centroids[j]) );
minimum = min(minimum, dist);
distances[sample_idx] = minimum;
centroids.append(sparse_or_not(X[np.argmax(distances)]));
return np.array(centroids);
def choose_byzantines(P, n_byzantines, seed):
"""
Generate machines that will become good and byzantines
Parameters :
-----------
P : int
Number of nodes (machines)
0 is the coordinator (server) ID
{1, 2,..., P-1} workers ID
n_byzantine : int
Number of byzantines nodes
seed : int
For reproductibility
Return :
-------
goods, byzantines : tuple of length 2
goods is the list of good workers
byzantines is the list of bad workers
"""
byzantines = [];
repeat = n_byzantines;
cste = 1;
while repeat != 0:
# Take one index
if seed is None:
x =
|
np.random.RandomState()
|
numpy.random.RandomState
|
import os
import argparse
import matplotlib.pyplot as plt
import numpy as np
def main(args):
path=args.out_path
# losses in train
lossD = np.load(os.path.join(path, "lossD.npy"))
lossH = np.load(os.path.join(path, "lossH.npy"))
lossL = np.load(os.path.join(path, "lossL.npy"))
auc_all = np.load(os.path.join(path, "auc_all.npy"), allow_pickle=True).item()
acc_hm_all = np.load(os.path.join(path, "acc_hm_all.npy"), allow_pickle=True).item()
# rhd
auc_all_rhd = np.array(auc_all['rhd'])
acc_hm_rhd = np.array(acc_hm_all["rhd"])
# stb
auc_all_stb = np.array(auc_all['stb'])
acc_hm_stb = np.array(acc_hm_all["stb"])
# do
auc_all_do = np.array(auc_all['do'])
# eo
auc_all_eo = np.array(auc_all['eo'])
plt.figure(figsize=[50, 50])
plt.subplot(2, 4, 1)
plt.plot(lossH[:, :1], lossH[:, 1:], marker='o', label='lossH')
plt.plot(lossD[:, :1], lossD[:, 1:], marker='*', label='lossD')
plt.plot(lossL[:, :1], lossL[:, 1:], marker='h', label='lossL')
plt.title("LOSSES")
plt.legend(title='Losses Category:')
# rhd
plt.subplot(2, 4, 2)
plt.plot(auc_all_rhd[:, :1], auc_all_rhd[:, 1:], marker='d')
plt.title(
"{}_test || (EPOCH={} , AUC={:0.4f})".format("RHD", np.argmax(auc_all_rhd[:, 1:]) + 1,
np.max(auc_all_rhd[:, 1:])))
plt.subplot(2, 4, 3)
plt.plot(acc_hm_rhd[:, :1], acc_hm_rhd[:, 1:], marker='d')
plt.title(
"{}_test || (EPOCH={} , ACC_HM={:0.4f})".format("RHD", np.argmax(acc_hm_rhd[:, 1:]) + 1,
np.max(acc_hm_rhd[:, 1:])))
# stb
plt.subplot(2, 4, 4)
plt.plot(auc_all_stb[:, :1], auc_all_stb[:, 1:], marker='d')
plt.title(
"{}_test || (EPOCH={} , AUC={:0.4f})".format("STB", np.argmax(auc_all_stb[:, 1:]) + 1,
np.max(auc_all_stb[:, 1:])))
plt.subplot(2, 4, 5)
plt.plot(acc_hm_stb[:, :1], acc_hm_stb[:, 1:], marker='d')
plt.title(
"{}_test || (EPOCH={} , ACC_HM={:0.4f})".format("STB", np.argmax(acc_hm_stb[:, 1:]) + 1,
np.max(acc_hm_stb[:, 1:])))
# do
plt.subplot(2, 4, 6)
plt.plot(auc_all_do[:, :1], auc_all_do[:, 1:], marker='d')
plt.title(
"{}_test || (EPOCH={} , AUC={:0.4f})".format("DO",
|
np.argmax(auc_all_do[:, 1:] + 1)
|
numpy.argmax
|
import numpy as np
from scipy import signal
from scipy.interpolate import splev, splrep
class Record:
sr = 20000 # Sample Rate - 20 kHz
def __init__(self,array):
self.array = array
## Instance Variables
# Control System Features
self.dom_pp = []
self.rec_pp = []
self.dom_bp = []
self.rec_bp = []
self.dom_pt = []
self.rec_pt = []
self.dom_ssv = []
self.rec_ssv = []
self.dom_sse = []
self.rec_sse = []
self.dom_po = []
self.rec_po = []
self.dom_st_s = []
self.rec_st_s = []
self.dom_rt_s = []
self.rec_rt_s = []
self.dom_dt_s = []
self.rec_dt_s = []
# Spectral Analysis Features
self.dom_pulse_data = []
self.rec_pulse_data = []
self.dom_sd = []
self.rec_sd = []
self.dom_snr = []
self.rec_snr = []
self.dom_mdfr = []
self.rec_mdfr = []
self.dom_mnfr = []
self.rec_mnfr = []
# Outlier Check Results
self.outlier_count = []
## Instance Methods
# Control System Processing
self.PeakDetection()
self.PeakTime()
self.SteadyStateValErr()
self.PercentOvershoot()
self.SettlingTime()
self.RiseTime()
self.DelayTime()
# Spectral Analysis Processing
self.RunSpectralAnalysis()
self.total_rec = np.min((len(self.dom_sd), len(self.rec_sd)))
# # Outlier Detection and Removal
# self.OutlierCount()
# self.RemoveOutliers()
# # Build Feature Datastructure
self.features = self.GenerateFeatures()
self.headers = []
self.headers.append('Dom_Peak_Time')
self.headers.append('Dom_Steady_State_Value')
self.headers.append('Dom_Steady_State_Error')
self.headers.append('Dom_Percent_Overshoot')
self.headers.append('Dom_Settling_Time')
self.headers.append('Dom_Rise_Time')
self.headers.append('Dom_Delay_Time')
for i in np.arange(len(self.dom_sd[0])):
self.headers.append('Dom_Spectral_Bin_%d' % i)
self.headers.append('Dom_SNR')
self.headers.append('Dom_Mean_Freq')
self.headers.append('Dom_Median_Freq')
self.headers.append('Rec_Peak_Time')
self.headers.append('Rec_Steady_State_Value')
self.headers.append('Rec_Steady_State_Error')
self.headers.append('Rec_Percent_Overshoot')
self.headers.append('Rec_Settling_Time')
self.headers.append('Rec_Rise_Time')
self.headers.append('Rec_Delay_Time')
for i in np.arange(len(self.rec_sd[0])):
self.headers.append('Rec_Spectral_Bin_%d' % i)
self.headers.append('Rec_SNR')
self.headers.append('Rec_Mean_Freq')
self.headers.append('Rec_Median_Freq')
def PeakDetection(self):
##### PeakDetection
# Input: array - raw signal data for record
# Output: dom_pp - dominant Pulse Peak index
# dom_bp - dominant Before Pulse peak index
# rec_pp - recessive Pulse Peak index
# rec_bp - recessive Before Pulse peak index
### Pulse Peak Detection ###
# Calculate difference array
arr_diff = np.diff(self.array, prepend=self.array[0])
# Perform moving average filter, width=3, x2
w = 3
arr_diff = np.convolve(arr_diff, np.ones(w), 'valid') / w
arr_diff = np.convolve(arr_diff, np.ones(w), 'valid') / w
# Prepend zeros to offset processing delay
arr_diff = np.insert(arr_diff, 0, np.zeros((w-1)*2), axis=0)
# Crossing filter to detect dominant and recessive leading edge zones
dom_pp_ts = (arr_diff > 0.2).astype(float)
rec_pp_ts = (arr_diff < -0.2).astype(float)
# Find peak for each zone (dominant)
a = np.where(dom_pp_ts == 1)[0].astype(float)
b = np.diff(a, prepend=0)
c = np.where(b > 1)[0]
dom_pp = a[c].astype(int)
# Remove errant peaks (dominant)
corr_idx = np.concatenate((np.diff(dom_pp),[np.average(np.diff(dom_pp))]))
if np.min(np.diff(corr_idx)) < 100:
corr_idx = np.where(corr_idx > np.average(corr_idx/4))[0]
dom_pp = dom_pp[corr_idx]
# Find peak for each zone (recessive)
a = np.where(rec_pp_ts == 1)[0].astype(float)
b = np.diff(a, prepend=0)
c = np.where(b > 1)[0]
rec_pp = a[c].astype(int)
# Remove errant peaks (recessive)
corr_idx = np.concatenate((np.diff(rec_pp),[np.average(np.diff(rec_pp))]))
if np.min(np.diff(corr_idx)) < 15:
corr_idx = np.where(corr_idx > np.average(corr_idx/4))[0]
rec_pp = rec_pp[corr_idx]
# Pair dom and rec indices
dom_len = len(dom_pp)
rec_len = len(rec_pp)
dom_is_larger = []
if dom_len > rec_len + 1:
dom_is_larger = 1
elif rec_len > dom_len + 1:
dom_is_larger = 0
if not dom_is_larger == []:
len_min = np.min((dom_len, rec_len))
len_dif = np.abs(dom_len - rec_len) + 1
dif_amt = []
for i in np.arange(len_dif):
if dom_is_larger:
temp = dom_pp[0:dom_len] - rec_pp[i:dom_len+i]
else:
temp = dom_pp[0:dom_len] - rec_pp[i:dom_len+i]
temp = np.abs(temp)
temp = np.sum(temp)
dif_amt.append(temp)
dif_loc = np.where(np.min(dif_amt) == dif_amt)[0]
if dom_is_larger:
dom_pp = dom_pp[dif_loc[0]:rec_len+dif_loc[0]+1]
else:
rec_pp = rec_pp[dif_loc[0]:dom_len+dif_loc[0]+1]
# Create timestamps using indices
dom_pp_ts = np.zeros(dom_pp_ts.size)
dom_pp_ts[dom_pp] = 1
self.dom_pp = np.where(dom_pp_ts == 1)[0]
rec_pp_ts = np.zeros(rec_pp_ts.size)
rec_pp_ts[rec_pp] = 1
self.rec_pp = np.where(rec_pp_ts == 1)[0]
### Pre-Peak Detection ###
# Crossing filter to detect pre-dominant steady state (Before Leading-edge)
dom_bp_ts = np.abs(np.diff(self.array - 2.5, prepend = self.array[0]))
w = 5
dom_bp_ts = np.convolve(dom_bp_ts, np.ones(w), 'valid') / w
dom_bp_ts = np.insert(dom_bp_ts, 0, np.zeros(w-1), axis=0)
dom_bp_ts = 1-(dom_bp_ts > 0.05).astype(float)
# Crossing filter to detect pre-recessive steady state (Before Leading-edge)
rec_bp_ts = np.abs(np.diff(3.5 - self.array, prepend = self.array[0]))
w = 5
rec_bp_ts = np.convolve(rec_bp_ts, np.ones(w), 'valid') / w
rec_bp_ts = np.insert(rec_bp_ts, 0, np.zeros(w-1), axis=0)
rec_bp_ts = 1-(rec_bp_ts > 0.05).astype(float)
## Find the last instance of steady state prior to dominant peaks
jj = np.zeros(dom_pp.size).astype(int)
for k in np.arange(0,dom_pp.size):
# "Dominant-low steady state" indices before peak
j = np.where(dom_bp_ts[0:dom_pp[k]] == 1)
j = j[0]
# Find nearest index before dominant peak
min_idx = j-dom_pp[k]
min_idx = min_idx[np.where(np.min(np.abs(min_idx)) == np.abs(min_idx))[0]]
jj[k] = ((min_idx + dom_pp[k])[0])
# Dominant prior-to-peak steady-state indices
dom_bp_ts2 = np.zeros(dom_bp_ts.size, dtype=int)
dom_bp_ts2[jj] = 1
self.dom_bp = jj
## Find the last instance of steady state prior to recessive peaks
jj = np.zeros(rec_pp.size).astype(int)
for k in np.arange(0,rec_pp.size):
# "Recesive-low steady state" indices before peak
j = np.where(rec_bp_ts[0:rec_pp[k]] == 1)
j = j[0]
# Find nearest index before recessive peak
min_idx = j-rec_pp[k]
min_idx = min_idx[np.where(np.min(np.abs(min_idx)) == np.abs(min_idx))[0]]
jj[k] = ((min_idx + rec_pp[k])[0])
# Recessive prior-to-peak steady-state indices
rec_bp_ts2 = np.zeros(rec_bp_ts.size, dtype=int)
rec_bp_ts2[jj] = 1
self.rec_bp = jj
def PeakTime(self):
##### PeakTime
# Input: dom_pp - dominant Pulse Peak index
# dom_bp - dominant Before Pulse peak index
# rec_pp - recessive Pulse Peak index
# rec_bp - recessive Before Pulse peak index
# sr - sample rate of the raw data
# Output: dom_pt - dominant Peak Time
# rec_pt - recessive Peak Time
self.dom_pt = (self.dom_pp-self.dom_bp)/Record.sr
self.rec_pt = (self.rec_pp-self.rec_bp)/Record.sr
def SteadyStateValErr(self):
##### Steady State Value and Error
# Input: array - raw signal data for record
# dom_bp - dominant Before Pulse peak index
# rec_bp - recessive Before Pulse peak index
# Output: dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# dom_sse - dominant Steady State Error
# rec_sse - recessive Steady State Error
# Perform moving average filter, width=19
w = 19
arr_avg = np.convolve(self.array, np.ones(w), 'valid') / w
arr_avg = np.insert(arr_avg, 0, arr_avg[0]*np.ones(w-1), axis=0)
# Extract Steady State Value from previous Steady State Index
dom_ssv_idx = self.rec_bp
rec_ssv_idx = self.dom_bp
self.dom_ssv = arr_avg[dom_ssv_idx]
self.rec_ssv = arr_avg[rec_ssv_idx]
# Calculate Steady State Error
self.dom_sse = arr_avg[dom_ssv_idx] - 3.5
self.rec_sse = arr_avg[rec_ssv_idx] - 2.5
def PercentOvershoot(self):
##### Percent Overshoot
# Input: array - raw signal data for record
# dom_pp - dominant Before Pulse peak index
# rec_pp - recessive Before Pulse peak index
# dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# Output: dom_po - dominant Percent Overshoot
# rec_po - recessive Percent Overshoot
dom_pv = self.array[self.dom_pp]
rec_pv = self.array[self.rec_pp]
try:
self.dom_po = 100 * (dom_pv - self.dom_ssv) / self.dom_ssv
self.rec_po = 100 * (self.rec_ssv - rec_pv) / self.rec_ssv
except:
self.dom_po = 100 * (dom_pv - np.average(self.dom_ssv)) / np.average(self.dom_ssv)
self.rec_po = 100 * (np.average(self.rec_ssv) - rec_pv) / np.average(self.rec_ssv)
def SettlingTime(self):
##### Settling Time
# Input: array - raw signal data for record
# dom_pp - dominant Before Pulse peak index
# rec_pp - recessive Before Pulse peak index
# dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# sr - sample rate of the raw data
# Output: dom_st_s - dominant Settling Time (s)
# rec_st_s - recessive Settling Time (s)
ss_rng = 0.05 # 5% Steady State Range of 1V Vpp design
# Find index and time of settling point (dominant)
w = 3
arr_avg1 = np.convolve(np.abs(self.array-np.average(self.dom_ssv)), np.ones(w), 'valid') / w
arr_avg1 = np.insert(arr_avg1, 0, arr_avg1[0]*np.ones(w-1), axis=0)
arr_avg11 = np.abs(np.round(arr_avg1,decimals=2))
dom_st_idx = np.where(arr_avg11 <= ss_rng)[0]
dom_st = np.zeros(self.dom_pp.size)
if dom_st_idx.size != 0:
for i in np.arange(self.dom_pp.size):
dom_st_idx[dom_st_idx <= self.dom_pp[i]] = -self.array.size
j = np.where(
np.min(np.abs(dom_st_idx - self.dom_pp[i]))
== np.abs(dom_st_idx - self.dom_pp[i])
)[0][-1]
dom_st[i] = dom_st_idx[j]
dom_st = dom_st.astype(int)
else:
self.dom_st = np.concatenate((self.dom_pp[1:],[self.array.size]))
self.dom_st_s = (dom_st - self.dom_pp)/Record.sr
# Find index and time of settling point (dominant)
w = 3
arr_avg2 = np.convolve(np.average(self.dom_ssv)-self.array, np.ones(w), 'valid') / w
arr_avg2 = np.insert(arr_avg2, 0, arr_avg2[0]*np.ones(w-1), axis=0)
arr_avg22 = np.abs(np.round(arr_avg2,decimals=2))
rec_st_idx = np.where(arr_avg22 <= ss_rng)[0]
rec_st = np.zeros(self.rec_pp.size)
for i in np.arange(self.rec_pp.size):
rec_st_idx[rec_st_idx <= self.rec_pp[i]] = -self.array.size
j = np.where(
np.min(np.abs(rec_st_idx - self.rec_pp[i]))
== np.abs(rec_st_idx - self.rec_pp[i])
)[0][-1]
rec_st[i] = rec_st_idx[j]
rec_st = rec_st.astype(int)
self.rec_st_s = (rec_st - self.rec_pp)/Record.sr
def RiseTime(self):
##### Rise Time
# Input: array - raw signal data for record
# dom_pp - dominant Pulse Peak index
# rec_pp - recessive Pulse Peak index
# dom_bp - dominant Before Pulse peak index
# rec_bp - recessive Before Pulse peak index
# dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# sr - sample rate of the raw data
# Output: dom_rt_s - dominant Settling Time (s)
# rec_rt_s - recessive Settling Time (s)
# Find index and time of rise point (dominant)
dom_rt_ts = (self.array.copy() - np.average(self.rec_ssv) <= 1).astype(int)
dom_rt_idx = np.where(dom_rt_ts == 1)[0]
dom_rt = np.zeros(self.dom_pp.size)
for i in np.arange(self.dom_pp.size):
j = np.where(np.min(np.abs(dom_rt_idx - self.dom_pp[i]))
== np.abs(dom_rt_idx - self.dom_pp[i]))[0][-1]
dom_rt[i] = dom_rt_idx[j]
dom_rt = dom_rt.astype(int)
self.dom_rt_s = (dom_rt - self.dom_bp)/Record.sr
# Find index and time of rise point (recessive)
rec_rt_ts = (-self.array.copy() + np.average(self.dom_ssv) <= 1).astype(int)
rec_rt_idx = np.where(rec_rt_ts == 1)[0]
rec_rt = np.zeros(self.rec_pp.size)
for i in np.arange(self.rec_pp.size):
j = np.where(np.min(np.abs(rec_rt_idx - self.rec_pp[i]))
== np.abs(rec_rt_idx - self.rec_pp[i]))[0][-1]
rec_rt[i] = rec_rt_idx[j]
rec_rt = rec_rt.astype(int)
self.rec_rt_s = (rec_rt - self.rec_bp)/Record.sr
def DelayTime(self):
##### Delay Time
# Input: array - raw signal data for record
# dom_pp - dominant Pulse Peak index
# rec_pp - recessive Pulse Peak index
# dom_bp - dominant Before Pulse peak index
# rec_bp - recessive Before Pulse peak index
# dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# sr - sample rate of the raw data
# Output: dom_rt_s - dominant Settling Time (s)
# rec_rt_s - recessive Settling Time (s)
# Find index and time of delay point (dominant)
dom_dt_ts = (self.array.copy() - np.average(self.rec_ssv) <= 0.5).astype(int)
dom_dt_idx = np.where(dom_dt_ts == 1)[0]
dom_dt = np.zeros(self.dom_pp.size)
for i in np.arange(self.dom_pp.size):
j = np.where(np.min(np.abs(dom_dt_idx - self.dom_pp[i]))
== np.abs(dom_dt_idx - self.dom_pp[i]))[0][-1]
dom_dt[i] = dom_dt_idx[j]
dom_dt = dom_dt.astype(int)
self.dom_dt_s = (dom_dt - self.dom_bp)/Record.sr
# Find index and time of delay point (recessive)
rec_dt_ts = (-self.array.copy() + np.average(self.dom_ssv) <= 0.5).astype(int)
rec_dt_idx = np.where(rec_dt_ts == 1)[0]
rec_dt = np.zeros(self.rec_pp.size)
for i in np.arange(self.rec_pp.size):
j = np.where(np.min(np.abs(rec_dt_idx - self.rec_pp[i]))
== np.abs(rec_dt_idx - self.rec_pp[i]))[0][-1]
rec_dt[i] = rec_dt_idx[j]
rec_dt = rec_dt.astype(int)
self.rec_dt_s = (rec_dt - self.rec_bp)/Record.sr
def RunSpectralAnalysis(self):
##### Spectral Analysis
# Run the following methods:
#
# + Spectral Density Binning
# + Signal-to-Noise Ratio
# + Median Frequency
# + Mean Frequency
#
# Features will be processed for both
# Dominant and Recessive CAN High bits
self.SpectralDensityBinning()
self.SignalToNoiseRatio()
self.MeanMedianFrequency()
def SpectralDensityBinning(self):
##### Bin Spectral Density
index_shift = -5 # Include some steady state info from prev pulse
dom_pp_sd = self.dom_pp.copy() + index_shift
rec_pp_sd = self.rec_pp.copy() + index_shift
# Find the start/end pulse indices
if self.dom_pp[0] <= self.rec_pp[0]:
if len(self.dom_pp) > len(self.rec_pp):
dom_pp_sd = dom_pp_sd[0:-1]
idx_dom_se = np.array([dom_pp_sd,rec_pp_sd])
idx_rec_se = np.array([rec_pp_sd[0:-1],dom_pp_sd[1:]])
else:
if len(self.rec_pp) > len(self.dom_pp):
rec_pp_sd = rec_pp_sd[0:-1]
idx_rec_se = np.array([rec_pp_sd,dom_pp_sd])
idx_dom_se = np.array([dom_pp_sd[0:-1],rec_pp_sd[1:]])
# Remove pulses that don't provide enough steady-state information from the prev pulse
if idx_dom_se[0][0] < -index_shift:
idx_dom_se = np.array([idx_dom_se[0][1:],idx_dom_se[1][1:]])
if idx_rec_se[0][0] < -index_shift:
idx_rec_se = np.array([idx_rec_se[0][1:],idx_rec_se[1][1:]])
# Check for out-or-order index error
if idx_dom_se[0][0] > idx_dom_se[1][0]:
temp1 = np.array([idx_dom_se[1],idx_dom_se[0]])
temp2 = np.array([idx_dom_se[0],idx_rec_se[1]])
idx_dom_se = temp2
idx_rec_se = temp1
# Save dom pulse info to parent method variable dom_pulse_data
for i in np.arange(idx_dom_se.shape[1]):
self.dom_pulse_data.append(self.array[idx_dom_se[0][i]:idx_dom_se[1][i]])
# Save dom pulse info to parent method variable rec_pulse_data
for i in np.arange(idx_rec_se.shape[1]):
self.rec_pulse_data.append(self.array[idx_rec_se[0][i]:idx_rec_se[1][i]])
# Reset indices
idx_dom_se = idx_dom_se - index_shift
idx_rec_se = idx_rec_se - index_shift
# Bin power densities
def binned_sd(Pxx_den, nbins):
bs = Pxx_den.size/nbins
bs = round(bs)
Pxx_hist = []
for i in np.arange(nbins):
idx_s = i*bs
idx_e = (i+1)*bs
if idx_e >= Pxx_den.size:
idx_e = Pxx_den.size - 1
Pxx_hist.append(np.average(Pxx_den[idx_s:idx_e]))
Pxx_hist = np.nan_to_num(Pxx_hist)
return Pxx_hist
# Select bin sizes
bin_sel = 2
dom_nbin = [15,13,10] # Bin size limited by pulse length
# Perform binning of spectral density
self.dom_sd = []
for i in np.arange(len(self.dom_pulse_data)):
f, pd = signal.welch(self.dom_pulse_data[i], Record.sr, nperseg=len(self.dom_pulse_data[i]));
self.dom_sd.append(binned_sd(pd, dom_nbin[bin_sel]))
rec_nbin = [10, 8, 5] # Bin size limited by pulse length
self.rec_sd = []
for i in np.arange(len(self.rec_pulse_data)):
f, pd = signal.welch(self.rec_pulse_data[i], Record.sr, nperseg=len(self.rec_pulse_data[i]));
self.rec_sd.append(binned_sd(pd, rec_nbin[bin_sel]))
def SignalToNoiseRatio(self):
index_shift = -5
self.dom_snr = []
for i in np.arange(len(self.dom_pulse_data)):
cur_array = self.dom_pulse_data[i]
signl = (np.arange(len(cur_array)) > -index_shift-1).astype(float)*np.average(self.dom_ssv) + \
(np.arange(len(cur_array)) <= -index_shift-1).astype(float)*np.average(self.rec_ssv)
noise = signl - cur_array
f, s_pd = signal.welch(signl, Record.sr, nperseg=len(signl));
f, n_pd = signal.welch(noise, Record.sr, nperseg=len(noise));
Ps = sum(s_pd)
Pn = sum(n_pd)
if Pn == 0:
self.rec_snr.append(np.nan)
continue
self.dom_snr.append(10*np.log10(Ps/Pn))
self.rec_snr = []
for i in np.arange(len(self.rec_pulse_data)):
cur_array = self.rec_pulse_data[i]
signl = (np.arange(len(cur_array)) > -index_shift-2).astype(float)*np.average(self.rec_ssv) + \
(np.arange(len(cur_array)) <= -index_shift-2).astype(float)*np.average(self.dom_ssv)
noise = signl - cur_array
f, s_pd = signal.welch(signl, Record.sr, nperseg=len(signl))
f, n_pd = signal.welch(noise, Record.sr, nperseg=len(noise))
Ps = sum(s_pd)
Pn = sum(n_pd)
if Pn == 0:
self.rec_snr.append(np.nan)
continue
self.rec_snr.append(10*np.log10(Ps/Pn))
def MeanMedianFrequency(self):
self.dom_mdfr = []
self.rec_mdfr = []
self.dom_mnfr = []
self.rec_mnfr = []
self.dom_mnfr = []
self.dom_mdfr = []
for i in np.arange(len(self.dom_pulse_data)):
cur_pulse = self.dom_pulse_data[i]
f, pd = signal.welch(cur_pulse, Record.sr, nperseg=len(cur_pulse))
spl = splrep(f, pd, k=1)
x2 = np.arange(f[0], f[-1],0.01)
y2 = splev(x2, spl)
y21 = y2/np.sum(y2) # Normalize spectra
y22 = np.cumsum(y21) # Cummulative sum (CDF for SPD)
y23 = y22-0.5 # Subtract 50% of energy
y24 = abs(y23) # Abs value to create a minima
y25 = np.where(np.min(y24) == y24)[0][-1] # Locate minima index
self.dom_mdfr.append(x2[y25]) # Retrieve minima frequency
self.dom_mnfr.append(np.sum(pd*f)/np.sum(pd))
self.rec_mnfr = []
self.rec_mdfr = []
for i in np.arange(len(self.rec_pulse_data)):
cur_pulse = self.rec_pulse_data[i]
f, pd = signal.welch(cur_pulse, Record.sr, nperseg=len(cur_pulse))
spl = splrep(f, pd, k=1)
x2 = np.arange(f[0], f[-1],0.01)
y2 = splev(x2, spl)
y21 = y2/np.sum(y2) # Normalize spectra
y22 = np.cumsum(y21) # Cummulative sum (CDF for SPD)
y23 = y22-0.5 # Subtract 50% of energy
y24 = abs(y23) # Abs value to create a minima
y25 = np.where(np.min(y24) == y24)[0][-1] # Locate minima index
self.rec_mdfr.append(x2[y25]) # Retrieve minima frequency
self.rec_mnfr.append(np.sum(pd*f)/np.sum(pd))
def OutlierCount(self):
##### Outlier Count
# Calculates the standard deviation for each feature and creates a binary
# mask of pulses that exceed the standard deviation threshold
# Binary masks are added to determine total number of deviations per pulse
# across all features
std = 1.5 # Threshold
def fix_size_disparity(in1, in2):
if in1.size > in2.size:
in2 = np.concatenate((in2,np.zeros(in1.size - in2.size))).astype(int)
elif in2.size > in1.size:
in1 = np.concatenate((in1,np.zeros(in2.size - in1.size))).astype(int)
return in1, in2
# Outlier check and size correction
self.dom_pp, self.rec_pp = fix_size_disparity(self.dom_pp, self.rec_pp)
self.dom_bp, self.rec_bp = fix_size_disparity(self.dom_bp, self.rec_bp)
self.dom_pt, self.rec_pt = fix_size_disparity(self.dom_pt, self.rec_pt)
dom_pt_out = (np.abs(self.dom_pt-np.average(self.dom_pt)) >
std*np.std(self.dom_pt)).astype(int)
rec_pt_out = (np.abs(self.rec_pt-np.average(self.rec_pt)) >
std*np.std(self.rec_pt)).astype(int)
pt_out = dom_pt_out + rec_pt_out
self.dom_ssv, self.rec_ssv = fix_size_disparity(self.dom_ssv, self.rec_ssv)
dom_ssv_out = (np.abs(self.dom_ssv-np.average(self.dom_ssv)) >
std*np.std(self.dom_ssv)).astype(int)
rec_ssv_out = (np.abs(self.rec_ssv-np.average(self.rec_ssv)) >
std*np.std(self.rec_ssv)).astype(int)
ssv_out = dom_ssv_out + rec_ssv_out
self.dom_sse, self.rec_sse = fix_size_disparity(self.dom_sse, self.rec_sse)
dom_sse_out = (np.abs(self.dom_sse-np.average(self.dom_sse)) >
std*np.std(self.dom_sse)).astype(int)
rec_sse_out = (np.abs(self.rec_sse-np.average(self.rec_sse)) >
std*np.std(self.rec_sse)).astype(int)
sse_out = dom_sse_out + rec_sse_out
self.dom_po, self.rec_po = fix_size_disparity(self.dom_po, self.rec_po)
dom_po_out = (np.abs(self.dom_po-np.average(self.dom_po)) >
std*np.std(self.dom_po)).astype(int)
rec_po_out = (np.abs(self.rec_po-np.average(self.rec_po)) >
std*np.std(self.rec_po)).astype(int)
po_out = dom_po_out + rec_po_out
self.dom_st_s, self.rec_st_s = fix_size_disparity(self.dom_st_s, self.rec_st_s)
dom_st_s_out = (np.abs(self.dom_st_s-np.average(self.dom_st_s)) >
std*np.std(self.dom_st_s)).astype(int)
rec_st_s_out = (np.abs(self.rec_st_s-np.average(self.rec_st_s)) >
std*np.std(self.rec_st_s)).astype(int)
st_s_out = dom_st_s_out + rec_st_s_out
self.dom_rt_s, self.rec_rt_s = fix_size_disparity(self.dom_rt_s, self.rec_rt_s)
dom_rt_s_out = (np.abs(self.dom_rt_s-np.average(self.dom_rt_s)) >
std*np.std(self.dom_rt_s)).astype(int)
rec_rt_s_out = (np.abs(self.rec_rt_s-np.average(self.rec_rt_s)) >
std*np.std(self.rec_rt_s)).astype(int)
rt_s_out = dom_rt_s_out + rec_rt_s_out
self.dom_dt_s, self.rec_dt_s = fix_size_disparity(self.dom_dt_s, self.rec_dt_s)
dom_dt_s_out = (np.abs(self.dom_dt_s-np.average(self.dom_dt_s)) >
std*np.std(self.dom_dt_s)).astype(int)
rec_dt_s_out = (np.abs(self.rec_dt_s-np.average(self.rec_dt_s)) >
std*np.std(self.rec_dt_s)).astype(int)
dt_s_out = dom_dt_s_out + rec_dt_s_out
self.outlier_count = pt_out + ssv_out + sse_out + \
po_out + st_s_out + rt_s_out + dt_s_out
return self.outlier_count
def RemoveOutliers(self):
##### Remove Outlier Pulses
# Checks outlier count for each pulse and removes pulses that exceed
# the deviation threshold
dev = 6
noutlier_idx = np.where(self.outlier_count < dev + 1)[0]
self.dom_pp = self.dom_pp[noutlier_idx]
self.rec_pp = self.rec_pp[noutlier_idx]
self.dom_bp = self.dom_bp[noutlier_idx]
self.rec_bp = self.rec_bp[noutlier_idx]
self.dom_pt = self.dom_pt[noutlier_idx]
self.rec_pt = self.rec_pt[noutlier_idx]
self.dom_ssv = self.dom_ssv[noutlier_idx]
self.rec_ssv = self.rec_ssv[noutlier_idx]
self.dom_sse = self.dom_sse[noutlier_idx]
self.rec_sse = self.rec_sse[noutlier_idx]
self.dom_po = self.dom_po[noutlier_idx]
self.rec_po = self.rec_po[noutlier_idx]
self.dom_st_s = self.dom_st_s[noutlier_idx]
self.rec_st_s = self.rec_st_s[noutlier_idx]
self.dom_rt_s = self.dom_rt_s[noutlier_idx]
self.rec_rt_s = self.rec_rt_s[noutlier_idx]
self.dom_dt_s = self.dom_dt_s[noutlier_idx]
self.rec_dt_s = self.rec_dt_s[noutlier_idx]
self.OutlierCount()
def summary(self):
print('Peak Time (s):')
print(' dom: ', self.dom_pt)
print(' avg: ', np.average(self.dom_pt))
print(' std: ', np.std(self.dom_pt))
print(' dev: ', np.abs(self.dom_pt-np.average(self.dom_pt)))
# print(' out: ', dom_pt_out)
print(' rec: ', self.rec_pt)
print(' avg: ', np.average(self.rec_pt))
print(' std: ', np.std(self.rec_pt))
print(' dev: ', np.abs(self.rec_pt-np.average(self.rec_pt)))
# print(' out: ', rec_pt_out)
print('')
print('Steady State Value (V):')
print(' dom: ', self.dom_ssv)
print(' avg: ', np.average(self.dom_ssv))
print(' std: ', np.std(self.dom_ssv))
print(' dev: ', np.abs(self.dom_ssv-np.average(self.dom_ssv)))
# print(' out: ', dom_ssv_out)
print(' rec: ', self.rec_ssv)
print(' avg: ', np.average(self.rec_ssv))
print(' std: ', np.std(self.rec_ssv))
print(' dev: ', np.abs(self.rec_ssv-np.average(self.rec_ssv)))
# print(' out: ', rec_ssv_out)
print('')
print('Steady State Error (V):')
print(' dom: ', self.dom_sse)
print(' avg: ', np.average(self.dom_sse))
print(' std: ', np.std(self.dom_sse))
print(' dev: ', np.abs(self.dom_sse-np.average(self.dom_sse)))
# print(' out: ', dom_sse_out)
print(' rec: ', self.rec_sse)
print(' avg: ', np.average(self.rec_sse))
print(' std: ', np.std(self.rec_sse))
print(' dev: ', np.abs(self.rec_sse-np.average(self.rec_sse)))
# print(' out: ', rec_sse_out)
print('')
print('Percent Overshoot')
print(' dom: ', self.dom_po)
print(' avg: ', np.average(self.dom_po))
print(' std: ',
|
np.std(self.dom_po)
|
numpy.std
|
import pybullet
import numpy as np
import copy
from causal_world.utils.rotation_utils import rotate_points, cyl2cart, \
cart2cyl, euler_to_quaternion
from causal_world.configs.world_constants import WorldConstants
class SilhouetteObject(object):
def __init__(self, pybullet_client_ids, name, size, position, orientation,
color):
"""
This is the base object for a silhouette in the arena.
:param pybullet_client_ids: (list) list of pybullet client ids.
:param name: (str) specifies the name of the silhouette object
:param size: (list float) specifies the size of the object.
:param position: (list float) x, y, z position.
:param orientation: (list float) quaternion.
:param color: (list float) RGB values.
"""
self._pybullet_client_ids = pybullet_client_ids
self._name = name
self._type_id = None
self._size = size
self._color = color
self._alpha = 0.3
self._position = position
self._orientation = orientation
self._block_ids = []
self._shape_ids = []
self._define_type_id()
self._volume = None
self._set_volume()
self._init_object()
self._lower_bounds = dict()
self._upper_bounds = dict()
self._lower_bounds[self._name + "_type"] = \
np.array([self._type_id])
self._lower_bounds[self._name + "_cartesian_position"] = \
np.array([-0.5, -0.5, 0])
self._lower_bounds[self._name + "_cylindrical_position"] = \
np.array([0, 0, 0])
self._lower_bounds[self._name + "_orientation"] = \
np.array([-10] * 4)
self._lower_bounds[self._name + "_size"] = \
np.array([0.03, 0.03, 0.03])
self._lower_bounds[self._name + "_color"] = \
np.array([0] * 3)
#decision: type id is not normalized
self._upper_bounds[self._name + "_type"] = \
np.array([self._type_id])
self._upper_bounds[self._name + "_cartesian_position"] = \
np.array([0.5] * 3)
self._upper_bounds[self._name + "_cylindrical_position"] = \
np.array([0.20, np.pi, 0.5])
self._upper_bounds[self._name + "_orientation"] = \
np.array([10] * 4)
self._upper_bounds[self._name + "_size"] = \
np.array([0.1, 0.1, 0.1])
self._upper_bounds[self._name + "_color"] = \
np.array([1] * 3)
self._state_variable_names = []
self._state_variable_names = [
'type', 'cartesian_position', 'cylindrical_position', 'orientation',
'size', 'color'
]
self._state_variable_sizes = []
self._state_size = 0
for state_variable_name in self._state_variable_names:
self._state_variable_sizes.append(
self._upper_bounds[self._name + "_" +
state_variable_name].shape[0])
self._state_size += self._state_variable_sizes[-1]
self._add_state_variables()
return
def _set_volume(self):
"""
sets the volume of the goal using the size attribute.
:return:
"""
self._volume = self._size[0] * self._size[1] * self._size[2]
return
def _add_state_variables(self):
"""
used to add state variables to the silhouette object.
:return:
"""
return
def _create_object(self, pybullet_client_id,
**kwargs):
"""
:param pybullet_client_id: (int) pybullet client id to be used when
creating the gaol itself.
:param kwargs: (params) parameters to be used when creating the goal
using the corresponding pybullet goal creation
parameters
:return:
"""
raise NotImplementedError("the creation function is not defined "
"yet")
def _define_type_id(self):
"""
Defines the type id of the goal itself.
:return:
"""
raise NotImplementedError("the define type id function "
"is not defined yet")
def _init_object(self):
"""
Used to initialize the goal, by creating it in the arena.
:return:
"""
for pybullet_client_id in self._pybullet_client_ids:
shape_id, block_id =\
self._create_object(pybullet_client_id)
self._block_ids.append(block_id)
self._shape_ids.append(shape_id)
self._set_color(self._color)
return
def reinit_object(self):
"""
Used to remove the goal from the arena and creating it again.
:return:
"""
self.remove()
self._init_object()
return
def remove(self):
"""
Used to remove the goal from the arena.
:return:
"""
for i in range(0, len(self._pybullet_client_ids)):
pybullet.removeBody(self._block_ids[i],
physicsClientId=self._pybullet_client_ids[i])
self._block_ids = []
self._shape_ids = []
return
def _set_color(self, color):
"""
:param color: (list) color RGB normalized from 0 to 1.
:return:
"""
for i in range(len(self._pybullet_client_ids)):
pybullet.changeVisualShape(
self._block_ids[i],
-1,
rgbaColor=np.append(color, self._alpha),
physicsClientId=self._pybullet_client_ids[i])
return
def set_pose(self, position, orientation):
"""
:param position: (list) cartesian x,y,z positon of the center of the
gaol shape.
:param orientation: (list) quaternion x,y,z,w of the goal itself.
:return:
"""
position[-1] += WorldConstants.FLOOR_HEIGHT
for i in range(0, len(self._pybullet_client_ids)):
pybullet.resetBasePositionAndOrientation(
self._block_ids[i],
position,
orientation,
physicsClientId=self._pybullet_client_ids[i])
return
def get_state(self, state_type='dict'):
"""
:param state_type: (str) specifying 'dict' or 'list'
:return: (list) returns either a dict or a list specifying the state
variables of the goal shape.
"""
if state_type == 'dict':
state = dict()
position, orientation = \
pybullet.getBasePositionAndOrientation(
self._block_ids[0],
physicsClientId =
self._pybullet_client_ids[0])
position = np.array(position)
position[-1] -= WorldConstants.FLOOR_HEIGHT
state["type"] = self._type_id
state["cartesian_position"] =
|
np.array(position)
|
numpy.array
|
"""
This file contains experimental versions of downgridding xy-grids.
Conclusions:
- **Downgrid xy-grid**. It seems that usage of `UnivariateSpline` (combined
with custom greedy downgridding based on probability penalty logic to ensure
exact number of elements in output xy-grid) delivers best compromise between
computation time and accuracy. This is implemented in `downgrid_spline()`.
However, some benchmarks are done in 'downgrid_cont_benchmarks.py'.
- **Downgrid xp-grid**. Use iterative greedy removing of points based on
penalty. Implemented in `downgrid_xp` (probably should be simplified to not
use `downgrid_metricpenalty()`), as part of other approaches to
xy-downgridding.
"""
import numpy as np
from scipy.integrate import quad
from scipy.interpolate import UnivariateSpline, interp1d
from scipy.linalg import solve_banded
from scipy.optimize import root_scalar
import scipy.stats as ss
import matplotlib.pyplot as plt
import matplotlib.colors
import matplotlib.cm as cm
from randomvars import Cont
from randomvars.options import config
# %% Downgrid xy-grid
# Version 1. Downgrid by iteratively removing points. At one iteration point is
# picked so that its removing will lead to **the smallest absolute change in
# square** compared to current (at the beginning of iteration) xy-grid.
def downgrid_probpenalty(x, y, n_grid_out, plot_step=10):
x_orig, y_orig = x, y
cur_net_change = 0
# Iteratively remove one-by-one x-values with the smallest penalty
for i in range(len(x) - n_grid_out):
penalty = compute_penalty(x, y)
# Pick best index as the one which delivers the smallest change in
# total square compared to the current iteration
min_ind = np.argmin(penalty)
if (i + 1) % plot_step == 0:
plt.plot(x, penalty, "best index")
plt.title(f"Length of x = {len(x)}")
plt.show()
plt.plot(x_orig, y_orig, label="original")
plt.plot(x, y, "-o", label="current")
plt.plot(x[min_ind], y[min_ind], "o", label="best index")
plt.title(f"Density. Length of x = {len(x)}")
plt.legend()
plt.show()
# print(f"Delete x={x[min_ind]}")
x = np.delete(x, min_ind)
y = np.delete(y, min_ind)
y = y / trapez_integral(x, y)
return x, y / trapez_integral(x, y)
def compute_penalty(x, y):
# Compute current neighboring square of every x-grid: square of left
# trapezoid (if no, then zero) plus square of right trapezoid (if no, then
# zero).
trapezoids_ext = np.concatenate(([0], 0.5 * np.diff(x) * (y[:-1] + y[1:]), [0]))
square_cur = trapezoids_ext[:-1] + trapezoids_ext[1:]
# Compute new neighboring square after removing corresponding x-value and
# replacing two segments with one segment connecting neighboring xy-points.
# The leftmost and rightmost x-values are removed without any segment
# replacement.
square_new = np.concatenate(([0], 0.5 * (x[2:] - x[:-2]) * (y[:-2] + y[2:]), [0]))
# Compute penalty as value of absolute change in total square if
# corresponding x-point will be removed.
return np.abs(square_cur - square_new)
# Version 1.5. Downgrid by iteratively removing points. At one iteration point
# is picked so that its removing will lead to **the best balancing of removed
# square**.
def downgrid_probpenalty_1half(x, y, n_grid_out, plot_step=10):
x_orig, y_orig = x, y
cur_net_change = 0
# Iteratively remove one-by-one x-values with the smallest penalty
for i in range(len(x) - n_grid_out):
penalty = compute_penalty_1half(x, y)
# Pick best index as the one which best balances current net change
# from the input.
best_ind = np.argmin(np.abs(cur_net_change + penalty))
cur_net_change = cur_net_change + penalty[best_ind]
if (i + 1) % plot_step == 0:
plt.plot(x, penalty, "best index")
plt.title(f"Length of x = {len(x)}")
plt.show()
plt.plot(x_orig, y_orig, label="original")
plt.plot(x, y, "-o", label="current")
plt.plot(x[best_ind], y[best_ind], "o", label="best index")
plt.title(f"Density. Length of x = {len(x)}")
plt.legend()
plt.show()
# print(f"Delete x={x[best_ind]}")
x = np.delete(x, best_ind)
y = np.delete(y, best_ind)
y = y / trapez_integral(x, y)
return x, y / trapez_integral(x, y)
def compute_penalty_1half(x, y):
# Compute current neighboring square of every x-grid: square of left
# trapezoid (if no, then zero) plus square of right trapezoid (if no, then
# zero).
trapezoids_ext = np.concatenate(([0], 0.5 * np.diff(x) * (y[:-1] + y[1:]), [0]))
square_cur = trapezoids_ext[:-1] + trapezoids_ext[1:]
# Compute new neighboring square after removing corresponding x-value and
# replacing two segments with one segment connecting neighboring xy-points.
# The leftmost and rightmost x-values are removed without any segment
# replacement.
square_new = np.concatenate(([0], 0.5 * (x[2:] - x[:-2]) * (y[:-2] + y[2:]), [0]))
# Compute penalty as value for which total square will change if
# corresponding x-point will be removed.
return square_new - square_cur
# Version 2. Downgrid by iteratively removing points. At one iteration point is
# picked so that its removing after renormalization results into **the smallest
# absolute difference between input reference CDF and new CDF at removed
# point**.
def downgrid_probpenalty_2(x, y, n_grid_out, plot_step=10):
x_orig, y_orig = x, y
cump_ref = trapez_integral_cum(x, y)
# Iteratively remove one-by-one x-values with the smallest penalty
for i in range(len(x_orig) - n_grid_out):
penalty = compute_penalty_2(x, y, cump_ref)
min_ind = np.argmin(penalty)
# print(f"Delete x={x[min_ind]}")
if (i + 1) % plot_step == 0:
plt.plot(x, penalty, "-o")
plt.plot(x[min_ind], penalty[min_ind], "or")
plt.title(f"Penalty. Length of x = {len(x)}")
plt.show()
plt.plot(x_orig, y_orig, label="original")
plt.plot(x, y, "-o", label="current")
plt.plot(x[min_ind], y[min_ind], "o", label="min. penalty")
plt.title(f"Density. Length of x = {len(x)}")
plt.legend()
plt.show()
x, y, cump_ref = delete_index(x, y, cump_ref, min_ind)
return x, y / trapez_integral(x, y)
def compute_penalty_2(x, y, cump_ref):
integral_cum = trapez_integral_cum(x, y)
sq_total = integral_cum[-1]
sq_inter = np.diff(trapez_integral_cum(x, y))
# Nearest two-squares of inner x-points (sum of squares of two nearest
# intervals)
sq_twointer_before = sq_inter[:-1] + sq_inter[1:]
# Squares after removing x-point (for inner x-points only)
sq_twointer_after = 0.5 * (y[:-2] + y[2:]) * (x[2:] - x[:-2])
# Coefficient of stretching
alpha = sq_total / (sq_total + (sq_twointer_after - sq_twointer_before))
# Compute penalty as difference between input reference cump and cump after
# removing corresponding x-points
dx = np.diff(x)[:-1]
dx2 = x[2:] - x[:-2]
dy2 = y[2:] - y[:-2]
penalty_inner = np.abs(
cump_ref[1:-1]
- alpha * (0.5 * dy2 * dx ** 2 / dx2 + y[:-2] * dx + cump_ref[:-2])
)
return np.concatenate(([cump_ref[1]], penalty_inner, [1 - cump_ref[-2]]))
# Version 3 (**VERY SLOW**). Downgrid by iteratively removing points. At one
# iteration point is picked so that its removing after renormalization results
# into the smallest functional distance between input refernce CDF and new CDF.
def downgrid_probpenalty_3(x, y, n_grid_out, plot_step=10, method="L2"):
x_orig, y_orig = x, y
cdf_spline_ref = xy_to_cdf_spline(x, y)
# Iteratively remove one-by-one x-values with the smallest penalty
for i in range(len(x_orig) - n_grid_out):
penalty = compute_penalty_3(x, y, cdf_spline_ref, method=method)
min_ind = np.argmin(penalty)
print(f"Delete x={x[min_ind]}")
if (i + 1) % plot_step == 0:
plt.plot(x, penalty, "-o")
plt.plot(x[min_ind], penalty[min_ind], "or")
plt.title(f"Penalty. Length of x = {len(x)}")
plt.show()
plt.plot(x_orig, y_orig, label="original")
plt.plot(x, y, "-o", label="current")
plt.plot(x[min_ind], y[min_ind], "o", label="min. penalty")
plt.title(f"Density. Length of x = {len(x)}")
plt.legend()
plt.show()
x = np.delete(x, min_ind)
y = np.delete(y, min_ind)
y = y / trapez_integral(x, y)
return x, y
def compute_penalty_3(x, y, cdf_spline_ref, method="L2"):
sq_total = trapez_integral(x, y)
res = []
for i in range(len(x)):
x_cur = np.delete(x, i)
y_cur = np.delete(y, i)
y_cur = y_cur * sq_total / trapez_integral(x_cur, y_cur)
cdf_spline = xy_to_cdf_spline(x_cur, y_cur)
res.append(fun_distance(cdf_spline, cdf_spline_ref, method=method))
return np.array(res)
# Version from spline
def downgrid_spline(x, y, n_grid_out, s_big=2, s_small=1e-16):
cdf_vals = trapez_integral_cum(x, y)
cdf_spline = UnivariateSpline(x=x, y=cdf_vals, s=np.inf, k=2)
cur_s = s_big
scale_factor = 0.5
while cur_s > s_small:
n_knots = len(cdf_spline.get_knots())
if n_knots >= n_grid_out:
break
cdf_spline.set_smoothing_factor(cur_s)
cur_s *= scale_factor
x_res, y_res = cdf_spline_to_xy(cdf_spline)
n_excess_points = len(x_res) - n_grid_out
if n_excess_points < 0:
# If output xy-grid has not enough points, use all input grid and later
# possibly remove points
x_res, y_res = x, y
n_excess_points = len(x_res) - n_grid_out
if n_excess_points > 0:
# Remove excess points if `s` got too small
for _ in range(n_excess_points):
x_res, y_res = remove_point_from_xy(x_res, y_res)
return x_res, y_res
def remove_point_from_xy(x, y):
# This uses "version 1" removing approach
penalty = compute_penalty(x, y)
# Pick best index (accept edges) as the one which delivers the smallest
# change in total square compared to the current iteration
min_ind = np.argmin(penalty[1:-1]) + 1
x = np.delete(x, min_ind)
y = np.delete(y, min_ind)
y = y / trapez_integral(x, y)
return x, y
def cdf_spline_to_xy(spline):
dens_spline = spline.derivative()
x = dens_spline.get_knots()
y = np.clip(dens_spline(x), 0, None)
y = y / trapez_integral(x, y)
return x, y
# Downgrid xp-grid
def downgrid_xp(x, p, n_grid_out, metric="L2", plot_step=10, remove_edges=True):
c = np.concatenate(([0], np.cumsum(p)))
x_down, c_down = downgrid_metricpenalty(
x=x,
c=c,
n_grid_out=n_grid_out,
metric=metric,
plot_step=plot_step,
remove_edges=remove_edges,
)
p_down = np.diff(c_down)
return x_down, p_down
def downgrid_metricpenalty(
x, c, n_grid_out, metric="L2", plot_step=10, remove_edges=True
):
"""
Here `c` - constant values on intervals (-inf, x[0]), [x[0], x[1]), ...,
[x[-2], x[-1]), and [x[-1], +inf) between `x[:-1]` and `x[1:]` (this also
means that `len(c) == len(x) + 1`)
"""
x_orig, c_orig = x, c
x_grid = np.linspace(x[0], x[-1], 1001)
for i in range(len(x) - n_grid_out):
penalty = compute_metricpenalty(x, c, metric)
# Pick best index as the one which delivers the smallest penalty
if remove_edges:
min_ind = np.argmin(penalty)
else:
min_ind = np.argmin(penalty[1:-1]) + 1
if (i + 1) % plot_step == 0:
plt.plot(x, penalty, "-o")
plt.plot(x[min_ind], penalty[min_ind], "or")
plt.title(f"Penalty. Length of x = {len(x)}")
plt.show()
plt.plot(
x_grid,
interp1d(x_orig, c_orig[1:], kind="previous")(x_grid),
label="original",
)
plt.plot(
x_grid,
interp1d(
x,
c[1:],
kind="previous",
bounds_error=False,
fill_value=(c[0], c[-1]),
)(x_grid),
label="current",
)
plt.plot(x[min_ind], c[min_ind], "o", label="best index")
plt.title(f"Piecewise constant. Length of x = {len(x)}")
plt.legend()
plt.show()
# print(f"Delete x={x[min_ind]}")
x, c = delete_xc_index(x, c, min_ind, metric=metric)
return x, c
def compute_metricpenalty(x, c, metric):
dc_abs = np.abs(np.diff(c))
dx = np.diff(x)
if metric == "L2":
inner_penalty = dx[:-1] * dx[1:] * dc_abs[1:-1] / (dx[:-1] + dx[1:])
if metric == "L1":
inner_penalty = dc_abs[1:-1] * np.minimum(dx[:-1], dx[1:])
res = np.concatenate(([dc_abs[0] * dx[0]], inner_penalty, [dc_abs[-1] * dx[-1]]))
return np.sqrt(2) * res if metric == "L2" else res
def delete_xc_index(x, c, ind, metric):
if ind == 0:
c = np.delete(c, 1)
elif ind == (len(x) - 1):
c = np.delete(c, ind)
else:
if metric == "L2":
alpha = (x[ind] - x[ind - 1]) / (x[ind + 1] - x[ind - 1])
elif metric == "L1":
mid = 0.5 * (x[ind - 1] + x[ind + 1])
# alpha = 1 if x < mid; alpha = 0.5 if x = mid; alpha = 0 if x > mid
alpha = 0.5 * (0.0 + (mid < x[ind]) + (mid <= x[ind]))
# Avoid modifing original array
c_right = c[ind + 1]
c = np.delete(c, ind + 1)
c[ind] = alpha * c[ind] + (1 - alpha) * c_right
x = np.delete(x, ind)
return x, c
# Version from xp. Retype xy-grid to xp-grid, downgrid xp-grid, retype back to
# xy-grid.
def downgrid_fromxp(x, y, n_grid_out, plot_step=10, metric="L2"):
p = p_from_xy(x, y)
x_down, p_down = downgrid_xp(
x,
p,
n_grid_out=n_grid_out,
metric=metric,
plot_step=plot_step,
remove_edges=False,
)
# Clip possible negative values
y_down = np.clip(y_from_xp(x_down, p_down), 0, None)
y_down = y_down / trapez_integral(x_down, y_down)
return x_down, y_down
def y_from_xp(x, p, metric="L2"):
metric_coeffs = {"L1": 0.75, "L2": 2 / 3, "Linf": 0.5}
coeff = metric_coeffs[metric]
dx = np.diff(x)
dx_lead = np.concatenate([dx, [0]])
dx_lag = np.concatenate([[0], dx])
banded_matrix = 0.5 * np.array(
[dx_lag * (1 - coeff), (dx_lag + dx_lead) * coeff, dx_lead * (1 - coeff)]
)
return solve_banded(l_and_u=(1, 1), ab=banded_matrix, b=p)
def p_from_xy(x, y, metric="L2"):
metric_coeffs = {"L1": 0.75, "L2": 2 / 3, "Linf": 0.5}
coeff = metric_coeffs[metric]
cump = trapez_integral_cum(x, y)
dx = np.diff(x)
# This is missing last value, which is 1
disc_cump = cump[:-1] + 0.5 * dx * (coeff * y[:-1] + (1 - coeff) * y[1:])
p = np.diff(disc_cump, prepend=0, append=1)
return p
# Version from slopes. Convert xy-grid to xslope-grid, downgrid with discrete
# downgridding, convert back to xy-grid.
def downgrid_fromslopes(x, y, n_grid_out, metric="L2", plot_step=10):
slopes = np.diff(y) / np.diff(x)
c = np.concatenate(([0], slopes, [0]))
x_down, c_down = downgrid_metricpenalty(
x, c, n_grid_out=n_grid_out, metric=metric, plot_step=plot_step
)
y0 = np.interp(x_down[0], x, y)
y_down = np.concatenate(([y0], y0 + np.cumsum(c_down[1:-1] * np.diff(x_down))))
y_down = np.clip(y_down, 0, None)
y_down = y_down / trapez_integral(x_down, y_down)
return x_down, y_down
# Helper functions
def trapez_integral_cum(x, y):
"""Compute cumulative integral with trapezoidal formula.
Element of output represents cumulative probability **before** its left "x"
edge.
"""
res = np.cumsum(0.5 * np.diff(x) * (y[:-1] + y[1:]))
return np.concatenate([[0], res])
def trapez_integral(x, y):
return np.sum(0.5 * np.diff(x) * (y[:-1] + y[1:]))
def delete_index(x, y, cump_ref, ind):
x = np.delete(x, ind)
y =
|
np.delete(y, ind)
|
numpy.delete
|
# Copyright 2021 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
# and Applied Computer Vision Lab, Helmholtz Imaging Platform
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from batchgenerators.augmentations.spatial_transformations import augment_rot90, augment_resize, augment_transpose_axes
class AugmentTransposeAxes(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.data_3D = np.random.random((2, 4, 5, 6))
self.seg_3D = np.random.random(self.data_3D.shape)
def test_transpose_axes(self):
n_iter = 1000
tmp = 0
for i in range(n_iter):
data_out, seg_out = augment_transpose_axes(self.data_3D, self.seg_3D, axes=(1, 0))
if np.array_equal(data_out, np.swapaxes(self.data_3D, 1, 2)):
tmp += 1
self.assertAlmostEqual(tmp, n_iter/2., delta=10)
class AugmentResize(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.data_3D = np.random.random((2, 12, 14, 31))
self.seg_3D = np.random.random(self.data_3D.shape)
def test_resize(self):
data_resized, seg_resized = augment_resize(self.data_3D, self.seg_3D, target_size=15)
mean_resized = float(np.mean(data_resized))
mean_original = float(np.mean(self.data_3D))
self.assertAlmostEqual(mean_original, mean_resized, places=2)
self.assertTrue(all((data_resized.shape[i] == 15 and seg_resized.shape[i] == 15) for i in
range(1, len(data_resized.shape))))
def test_resize2(self):
data_resized, seg_resized = augment_resize(self.data_3D, self.seg_3D, target_size=(7, 5, 6))
mean_resized = float(np.mean(data_resized))
mean_original = float(np.mean(self.data_3D))
self.assertAlmostEqual(mean_original, mean_resized, places=2)
self.assertTrue(all([i == j for i, j in zip(data_resized.shape[1:], (7, 5, 6))]))
self.assertTrue(all([i == j for i, j in zip(seg_resized.shape[1:], (7, 5, 6))]))
class AugmentRot90(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.data_3D = np.random.random((2, 4, 5, 6))
self.seg_3D = np.random.random(self.data_3D.shape)
self.num_rot = [1]
def test_rotation_checkerboard(self):
data_2d_checkerboard = np.zeros((1, 2, 2))
data_2d_checkerboard[0, 0, 0] = 1
data_2d_checkerboard[0, 1, 1] = 1
data_rotated_list = []
n_iter = 1000
for i in range(n_iter):
d_r, _ = augment_rot90(np.copy(data_2d_checkerboard), None, num_rot=[4,1], axes=[0, 1])
data_rotated_list.append(d_r)
data_rotated_np = np.array(data_rotated_list)
sum_data_list = np.sum(data_rotated_np, axis=0)
a = np.unique(sum_data_list)
self.assertAlmostEqual(a[0], n_iter/2, delta=20)
self.assertTrue(len(a) == 2)
def test_rotation(self):
data_rotated, seg_rotated = augment_rot90(np.copy(self.data_3D), np.copy(self.seg_3D), num_rot=self.num_rot,
axes=[0, 1])
for i in range(self.data_3D.shape[1]):
self.assertTrue(np.array_equal(self.data_3D[:, i, :, :], np.flip(data_rotated[:, :, i, :], axis=1)))
self.assertTrue(np.array_equal(self.seg_3D[:, i, :, :], np.flip(seg_rotated[:, :, i, :], axis=1)))
def test_randomness_rotation_axis(self):
tmp = 0
for j in range(100):
data_rotated, seg_rotated = augment_rot90(np.copy(self.data_3D), np.copy(self.seg_3D), num_rot=self.num_rot,
axes=[0, 1, 2])
if np.array_equal(self.data_3D[:, 0, :, :], np.flip(data_rotated[:, :, 0, :], axis=1)):
tmp += 1
self.assertAlmostEqual(tmp, 33, places=2)
def test_rotation_list(self):
num_rot = [1, 3]
data_rotated, seg_rotated = augment_rot90(np.copy(self.data_3D), np.copy(self.seg_3D), num_rot=num_rot,
axes=[0, 1])
tmp = 0
for i in range(self.data_3D.shape[1]):
# check for normal and inverse rotations
normal_rotated = np.array_equal(self.data_3D[:, i, :, :], data_rotated[:, :, -i-1, :])
inverse_rotated = np.array_equal(self.data_3D[:, i, :, :], np.flip(data_rotated[:, :, i, :], axis=1))
if normal_rotated:
tmp += 1
self.assertTrue(normal_rotated or inverse_rotated)
self.assertTrue(np.array_equal(self.seg_3D[:, i, :, :], seg_rotated[:, :, -i - 1, :]) or
np.array_equal(self.seg_3D[:, i, :, :],
|
np.flip(seg_rotated[:, :, i, :], axis=1)
|
numpy.flip
|
import numpy as np
import pytest
from dask_histogram.bins import (
BinsStyle,
RangeStyle,
bins_range_styles,
bins_style,
normalize_bins_range,
)
def test_bins_styles_scalar():
# Valid
assert bins_style(ndim=1, bins=5) is BinsStyle.SingleScalar
assert bins_style(ndim=2, bins=(2, 5)) is BinsStyle.MultiScalar
assert bins_style(ndim=2, bins=[3, 4]) is BinsStyle.MultiScalar
# Invalid
with pytest.raises(
ValueError,
match="Total number of bins definitions must be equal to the dimensionality of the histogram.",
):
bins_style(ndim=3, bins=[2, 3])
with pytest.raises(
ValueError,
match="Total number of bins definitions must be equal to the dimensionality of the histogram.",
):
bins_style(ndim=4, bins=[2, 3, 4, 7, 8])
def test_bins_styles_sequence():
assert bins_style(ndim=1, bins=np.array([1, 2, 3])) is BinsStyle.SingleSequence
assert bins_style(ndim=1, bins=[1, 2, 3]) is BinsStyle.SingleSequence
assert bins_style(ndim=1, bins=(4, 5, 6)) is BinsStyle.SingleSequence
assert bins_style(ndim=2, bins=[[1, 2, 3], [4, 5, 7]]) is BinsStyle.MultiSequence
bins = [[1, 2, 6, 7], [1, 2, 3], [4, 7, 11, 12, 13]]
assert bins_style(ndim=3, bins=bins) is BinsStyle.MultiSequence
bins = (np.array([1.1, 2.2]), np.array([2.2, 4.4, 6.6]))
assert BinsStyle.MultiSequence is bins_style(ndim=2, bins=bins)
with pytest.raises(
ValueError,
match="Total number of bins definitions must be equal to the dimensionality of the histogram.",
):
bins_style(ndim=1, bins=[[1, 2], [4, 5]])
with pytest.raises(
ValueError,
match="Total number of bins definitions must be equal to the dimensionality of the histogram.",
):
bins_style(ndim=3, bins=[[1, 2], [4, 5]])
with pytest.raises(
ValueError,
match="Total number of bins definitions must be equal to the dimensionality of the histogram.",
):
bins = (np.array([1.1, 2.2]), np.array([2.2, 4.4, 6.6]))
bins_style(ndim=3, bins=bins)
def test_bins_style_cannot_determine():
bins = 3.3
with pytest.raises(ValueError, match="Could not determine bin style from bins=3.3"):
bins_style(ndim=1, bins=bins)
def test_bins_range_styles():
bs, rs = bins_range_styles(ndim=2, bins=(3, 4), range=((0, 1), (0, 1)))
assert bs is BinsStyle.MultiScalar
assert rs is RangeStyle.MultiPair
bs, rs = bins_range_styles(ndim=1, bins=10, range=(0, 1))
assert bs is BinsStyle.SingleScalar
assert rs is RangeStyle.SinglePair
bs, rs = bins_range_styles(ndim=2, bins=[[1, 2, 3], [4, 5, 6]], range=None)
assert bs is BinsStyle.MultiSequence
assert rs is RangeStyle.IsNone
bs, rs = bins_range_styles(ndim=1, bins=[1, 2, 3], range=None)
assert bs is BinsStyle.SingleSequence
assert rs is RangeStyle.IsNone
bins = np.array([[1, 2, 3], [2, 5, 6]])
bs, rs = bins_range_styles(ndim=2, bins=bins, range=None)
assert bs is BinsStyle.MultiSequence
assert rs is RangeStyle.IsNone
with pytest.raises(
ValueError,
match="range cannot be None when bins argument is a scalar or sequence of scalars.",
):
bins_range_styles(ndim=1, bins=3, range=None)
with pytest.raises(
ValueError,
match="range cannot be None when bins argument is a scalar or sequence of scalars.",
):
bins_range_styles(ndim=2, bins=3, range=None)
with pytest.raises(
ValueError,
match="range cannot be None when bins argument is a scalar or sequence of scalars.",
):
bins_range_styles(ndim=2, bins=(3, 8), range=None)
with pytest.raises(
ValueError,
match="For a single scalar bin definition, one range tuple must be defined.",
):
bins_range_styles(ndim=1, bins=5, range=((2, 3), (4, 5)))
def test_normalize_bins_range():
# 1D, scalar bins, single range
ndim = 1
bins, range = 5, (3, 3)
bins, range = normalize_bins_range(ndim, bins, range)
assert bins == (5,)
assert range == ((3, 3),)
# 1D, sequence bins, no range
ndim = 1
bins, range = [1, 2, 3], None
bins, range = normalize_bins_range(ndim, bins, range)
assert bins == ([1, 2, 3],)
assert range == (None,)
# 2D, singel scalar bins, single range
ndim = 2
bins, range = 5, (3, 3)
bins, range = normalize_bins_range(ndim, bins, range)
assert bins == (5, 5)
assert range == ((3, 3), (3, 3))
# 2D, sequence bins, no range
ndim = 2
bins, range = [[1, 2, 3], [4, 5, 6]], None
bins, range = normalize_bins_range(ndim, bins, range)
assert bins == [[1, 2, 3], [4, 5, 6]]
assert range == (None, None)
# 2D, numpy arrays as bins, no range
ndim = 2
bins, range = (np.array([1, 2, 3]), np.array([4, 5, 6])), None
bins, range = normalize_bins_range(ndim, bins, range)
assert len(bins) == 2
np.testing.assert_array_equal(bins[0], np.array([1, 2, 3]))
np.testing.assert_array_equal(bins[1], np.array([4, 5, 6]))
# 3D, single multidim numpy array as bins, no range
ndim = 3
bins, range = np.array([[1, 2, 3], [4, 5, 6], [1, 5, 6]]), None
bins, range = normalize_bins_range(ndim, bins, range)
assert len(bins) == 3
assert range == (None, None, None)
np.testing.assert_array_equal(bins[0], np.array([1, 2, 3]))
np.testing.assert_array_equal(bins[1], np.array([4, 5, 6]))
np.testing.assert_array_equal(bins[2], np.array([1, 5, 6]))
np.testing.assert_array_equal(bins,
|
np.array([[1, 2, 3], [4, 5, 6], [1, 5, 6]])
|
numpy.array
|
"""main
Main file to run to create figures 2-5 in the paper.
All values are in SI units (m, s, T, etc) unless otherwise noted.
Dependencies:
sigpy (https://sigpy.readthedocs.io/en/latest/mri_rf.html)
numpy
scipy
matplotlib
Author: <NAME>
Last Modified: 6/2/21
"""
import numpy as np
import sigpy as sp
import sigpy.mri as mr
import sigpy.mri.rf as rf
import sigpy.plot as pl
import scipy.signal as signal
import matplotlib.pyplot as plt
from scipy.special import *
from scipy.integrate import odeint
import matplotlib.gridspec as gridspec
import csv
from various_constants import *
from pulse_generator_functions import *
from simulation_functions import *
from plotting_functions import *
#################################################################
# Make Figure 2: equivalent single-photon, two-photon, and freq mod slice select
# Make a grid of subplots and label the rows and columns
fig = plt.figure(figsize=(20, 10))
cols = 2+SEQUENCE_PLOT_END # Number of columns used for pulse sequence plot
outer = gridspec.GridSpec(4, cols, wspace=1, hspace=0.3)
ax = plt.Subplot(fig, outer[cols])
t = ax.text(0.7,0.2, 'Single-Photon', fontsize=CATEGORY_SIZE, rotation=90)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[2*cols])
t = ax.text(0.7,0.2, 'Two-Photon', fontsize=CATEGORY_SIZE, rotation=90)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[3*cols])
t = ax.text(0.7,-0.1, 'Frequency Modulation', fontsize=CATEGORY_SIZE, rotation=90)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[1:SEQUENCE_PLOT_END])
t = ax.text(0.5,0, 'Pulse Sequence', fontsize=CATEGORY_SIZE)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[SEQUENCE_PLOT_END])
t = ax.text(0.5,0, 'Simulation Slice Profile', fontsize=CATEGORY_SIZE)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[SEQUENCE_PLOT_END+1])
t = ax.text(0.5,0, 'Experimental Slice Profile', fontsize=CATEGORY_SIZE)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
# 2a) Make single photon version
pulse = slr_pulse(N, TB, FA, name='2a')
bz_pulse = np.zeros(N)
gz_pulse = np.zeros(N)
sim_duration = PULSE_DURATION*1.5
t = np.linspace(0,sim_duration,WAVEFORM_RES)
RF = np.zeros(len(t), dtype=complex)
B1z = np.zeros(len(t))
Gz = np.zeros(len(t))
for i in range(len(t)):
RF[i] = bxy_waveform(t[i], SLICE_PEAK, PULSE_DURATION, pulse)
B1z[i] = bz_waveform(t[i], SLICE_PEAK, PULSE_DURATION, np.zeros(N))
Gz[i] = gz_waveform(t[i], SLICE_PEAK, PULSE_DURATION, gz_pulse)
plot_waveform(fig, outer[cols+1:cols+SEQUENCE_PLOT_END], t, np.abs(RF), -1*np.angle(RF), B1z, Gz)
if PRINT_MAX_VALS:
print('2a max B1xy: ' + str(np.max(np.abs(pulse))))
M = np.array([0, 0, M0])
t = np.linspace(0, sim_duration, 101)
final_m = np.zeros(XRES, dtype=complex)
x_vals = np.linspace(-XLIM,XLIM,XRES)
for i in range(XRES):
x = x_vals[i]
y = 0
sol = odeint(bloch, M, t, args=(x, y, pulse, bz_pulse, gz_pulse, SLICE_PEAK, PULSE_DURATION),atol=1e-7, rtol=1e-11, hmax=2e-6, mxstep=5000)
final_m[i] = np.complex(sol[-1,0], sol[-1,1])
plot_sim(fig, outer[cols+SEQUENCE_PLOT_END], x_vals, np.abs(final_m), -1*np.angle(final_m))
plot_experiment(fig, outer[cols+SEQUENCE_PLOT_END+1], '2a.npy')
# 2b) Make two-photon version
pulse = slr_pulse(N, TB, FA, freq=FZ, phase=g*B1Z_AMP/(2*np.pi*FZ)-np.pi/2, name='2b') / (j1(g/(2*np.pi*FZ) * B1Z_AMP))
bz_pulse = B1Z_AMP * np.sin(2e-6*np.arange(N)*2*np.pi*FZ)
t = np.linspace(0,sim_duration,WAVEFORM_RES)
RF = np.zeros(len(t), dtype=complex)
B1z = np.zeros(len(t))
Gz = np.zeros(len(t))
for i in range(len(t)):
RF[i] = bxy_waveform(t[i], SLICE_PEAK, PULSE_DURATION, pulse)
B1z[i] = bz_waveform(t[i], SLICE_PEAK, PULSE_DURATION, bz_pulse)
Gz[i] = gz_waveform(t[i], SLICE_PEAK, PULSE_DURATION, gz_pulse)
plot_waveform(fig, outer[2*cols+1:2*cols+SEQUENCE_PLOT_END], t, np.abs(RF), -1*np.angle(RF), B1z, Gz)
if PRINT_MAX_VALS:
print('2b max B1xy: ' + str(np.max(np.abs(pulse))))
M = np.array([0, 0, M0])
t = np.linspace(0, sim_duration, 101)
final_m = np.zeros(XRES, dtype=complex)
x_vals = np.linspace(-XLIM,XLIM,XRES)
for i in range(XRES):
x = x_vals[i]
y = 0
sol = odeint(bloch, M, t, args=(x, y, pulse, bz_pulse, gz_pulse, SLICE_PEAK, PULSE_DURATION),atol=1e-7, rtol=1e-11, hmax=2e-6, mxstep=5000)
final_m[i] = np.complex(sol[-1,0], sol[-1,1])
plot_sim(fig, outer[2*cols+SEQUENCE_PLOT_END], x_vals, np.abs(final_m), -1*np.angle(final_m))
plot_experiment(fig, outer[2*cols+SEQUENCE_PLOT_END+1], '2b.npy')
# 2c) Make frequency modulated version
pulse = fm_pulse(N, TB, FA, FZ, B1Z_AMP, phase=g*B1Z_AMP/(2*np.pi*FZ)-np.pi/2, name='2c')
bz_pulse = np.zeros(N)
t = np.linspace(0,sim_duration,WAVEFORM_RES)
RF = np.zeros(len(t), dtype=complex)
B1z = np.zeros(len(t))
Gz = np.zeros(len(t))
for i in range(len(t)):
RF[i] = bxy_waveform(t[i], SLICE_PEAK, PULSE_DURATION, pulse)
B1z[i] = bz_waveform(t[i], SLICE_PEAK, PULSE_DURATION, bz_pulse)
Gz[i] = gz_waveform(t[i], SLICE_PEAK, PULSE_DURATION, gz_pulse)
plot_waveform(fig, outer[3*cols+1:3*cols+SEQUENCE_PLOT_END], t, np.abs(RF), -1*np.angle(RF), B1z, Gz)
if PRINT_MAX_VALS:
print('2c max B1xy: ' + str(np.max(np.abs(pulse))))
M = np.array([0, 0, M0])
t = np.linspace(0, sim_duration, 101)
final_m = np.zeros(XRES, dtype=complex)
x_vals = np.linspace(-XLIM,XLIM,XRES)
for i in range(XRES):
x = x_vals[i]
y = 0
sol = odeint(bloch, M, t, args=(x, y, pulse, bz_pulse, gz_pulse, SLICE_PEAK, PULSE_DURATION),atol=1e-7, rtol=1e-11, hmax=2e-6, mxstep=5000)
final_m[i] = np.complex(sol[-1,0], sol[-1,1])
plot_sim(fig, outer[3*cols+SEQUENCE_PLOT_END], x_vals, np.abs(final_m), -1*np.angle(final_m))
plot_experiment(fig, outer[3*cols+SEQUENCE_PLOT_END+1], '2c.npy')
plt.savefig("figure2.pdf")
#################################################################
# Make Figure 3: slice shifting
# Make a grid of subplots and label the rows and columns
fig = plt.figure(figsize=(20, 10))
cols = 2+SEQUENCE_PLOT_END
outer = gridspec.GridSpec(4, cols, wspace=1, hspace=0.3)
ax = plt.Subplot(fig, outer[cols])
t = ax.text(0.7,0.3, r'$\omega_{xy}$ Shift', fontsize=CATEGORY_SIZE, rotation=90)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[2*cols])
t = ax.text(0.7,0.2, r'Constant $B_{1z}$', fontsize=CATEGORY_SIZE, rotation=90)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[3*cols])
t = ax.text(0.7,0.3, r'$\omega_{z}$ Shift', fontsize=CATEGORY_SIZE, rotation=90)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[1:SEQUENCE_PLOT_END])
t = ax.text(0.5,0, 'Pulse Sequence', fontsize=CATEGORY_SIZE)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[SEQUENCE_PLOT_END])
t = ax.text(0.5,0, 'Simulation Slice Profile', fontsize=CATEGORY_SIZE)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[SEQUENCE_PLOT_END+1])
t = ax.text(0.5,0, 'Experimental Slice Profile', fontsize=CATEGORY_SIZE)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
# 3a) Shifting using wxy offset
f_offset = 2*TB/PULSE_DURATION
if PRINT_MAX_VALS:
print('3 frequency offset: ' + str(f_offset))
pulse = slr_pulse(N, TB, FA, freq=(FZ-f_offset), phase=g*B1Z_AMP/(2*np.pi*FZ)-np.pi/2, name='3a') / (j1(g/(2*np.pi*FZ) * B1Z_AMP)) # this is actually increasing the frequency
bz_pulse = B1Z_AMP * np.sin(2e-6*np.arange(N)*2*np.pi*FZ)
t = np.linspace(0,sim_duration,WAVEFORM_RES)
RF = np.zeros(len(t), dtype=complex)
B1z = np.zeros(len(t))
Gz = np.zeros(len(t))
for i in range(len(t)):
RF[i] = bxy_waveform(t[i], SLICE_PEAK, PULSE_DURATION, pulse)
B1z[i] = bz_waveform(t[i], SLICE_PEAK, PULSE_DURATION, bz_pulse)
Gz[i] = gz_waveform(t[i], SLICE_PEAK, PULSE_DURATION, gz_pulse)
plot_waveform(fig, outer[cols+1:cols+SEQUENCE_PLOT_END], t, np.abs(RF), -1*np.angle(RF), B1z, Gz)
if PRINT_MAX_VALS:
print('3a max B1xy: ' + str(np.max(np.abs(pulse))))
M = np.array([0, 0, M0])
t = np.linspace(0, sim_duration, 101)
final_m = np.zeros(XRES, dtype=complex)
x_vals = np.linspace(-XLIM,XLIM,XRES)
for i in range(XRES):
x = x_vals[i]
y = 0
sol = odeint(bloch, M, t, args=(x, y, pulse, bz_pulse, gz_pulse, SLICE_PEAK, PULSE_DURATION),atol=1e-7, rtol=1e-11, hmax=2e-6, mxstep=5000)
final_m[i] = np.complex(sol[-1,0], sol[-1,1])
plot_sim(fig, outer[cols+SEQUENCE_PLOT_END], x_vals, np.abs(final_m), -1*np.angle(final_m))
plot_experiment(fig, outer[cols+SEQUENCE_PLOT_END+1], '3a.npy')
# 3b) Shifting using constant B1z
pulse = slr_pulse(N, TB, FA, freq=FZ, phase=g*B1Z_AMP/(2*np.pi*FZ)-np.pi/2, name='3b') / (j1(g/(2*np.pi*FZ) * B1Z_AMP))
bz_pulse = B1Z_AMP * np.sin(2e-6*np.arange(N)*2*np.pi*FZ)
t = np.linspace(0,sim_duration,WAVEFORM_RES)
RF = np.zeros(len(t), dtype=complex)
B1z = np.zeros(len(t))
Gz = np.zeros(len(t))
for i in range(len(t)):
RF[i] = bxy_waveform(t[i], SLICE_PEAK, PULSE_DURATION, pulse)
B1z[i] = bz_waveform(t[i], SLICE_PEAK, PULSE_DURATION, bz_pulse, dc_value=-2*np.pi*f_offset/g) # subtract to get the same direction as adding wxy
Gz[i] = gz_waveform(t[i], SLICE_PEAK, PULSE_DURATION, gz_pulse)
plot_waveform(fig, outer[2*cols+1:2*cols+SEQUENCE_PLOT_END], t, np.abs(RF), -1*np.angle(RF), B1z, Gz)
if PRINT_MAX_VALS:
print('3b max B1xy: ' + str(np.max(np.abs(pulse))))
print('3b rise-time: ' + str(SLICE_PEAK/SLEW_LIMIT))
if WRITE_WAVEFORM_FILES:
make_b1z_csv(bz_pulse, SLICE_PEAK, PULSE_DURATION, '3b.csv', dc_value=-2*np.pi*f_offset/g)
M = np.array([0, 0, M0])
t = np.linspace(0, sim_duration, 101)
final_m = np.zeros(XRES, dtype=complex)
x_vals = np.linspace(-XLIM,XLIM,XRES)
for i in range(XRES):
x = x_vals[i]
y = 0
sol = odeint(bloch, M, t, args=(x, y, pulse, bz_pulse, gz_pulse, SLICE_PEAK, PULSE_DURATION, 0, -2*np.pi*f_offset/g),atol=1e-7, rtol=1e-11, hmax=2e-6, mxstep=5000)
final_m[i] = np.complex(sol[-1,0], sol[-1,1])
plot_sim(fig, outer[2*cols+SEQUENCE_PLOT_END], x_vals, np.abs(final_m), -1*np.angle(final_m))
plot_experiment(fig, outer[2*cols+SEQUENCE_PLOT_END+1], '3b.npy')
# 3c) Shifitng using wz offset
pulse = slr_pulse(N, TB, FA, freq=FZ, phase=g*B1Z_AMP/(2*np.pi*FZ)-np.pi/2, name='3c') / (j1(g/(2*np.pi*(FZ+f_offset)) * B1Z_AMP)) # compensate for different wz freq
bz_pulse = B1Z_AMP * np.sin(2e-6*np.arange(N)*2*np.pi*(FZ+f_offset))
t = np.linspace(0,sim_duration,WAVEFORM_RES)
RF = np.zeros(len(t), dtype=complex)
B1z = np.zeros(len(t))
Gz = np.zeros(len(t))
for i in range(len(t)):
RF[i] = bxy_waveform(t[i], SLICE_PEAK, PULSE_DURATION, pulse)
B1z[i] = bz_waveform(t[i], SLICE_PEAK, PULSE_DURATION, bz_pulse)
Gz[i] = gz_waveform(t[i], SLICE_PEAK, PULSE_DURATION, gz_pulse)
plot_waveform(fig, outer[3*cols+1:3*cols+SEQUENCE_PLOT_END], t, np.abs(RF), -1*np.angle(RF), B1z, Gz)
if PRINT_MAX_VALS:
print('3c max B1xy: ' + str(np.max(np.abs(pulse))))
M = np.array([0, 0, M0])
t = np.linspace(0, sim_duration, 101)
final_m = np.zeros(XRES, dtype=complex)
x_vals = np.linspace(-XLIM,XLIM,XRES)
for i in range(XRES):
x = x_vals[i]
y = 0
sol = odeint(bloch, M, t, args=(x, y, pulse, bz_pulse, gz_pulse, SLICE_PEAK, PULSE_DURATION),atol=1e-7, rtol=1e-11, hmax=2e-6, mxstep=5000)
final_m[i] = np.complex(sol[-1,0], sol[-1,1])
plot_sim(fig, outer[3*cols+SEQUENCE_PLOT_END], x_vals, np.abs(final_m), -1*np.angle(final_m))
plot_experiment(fig, outer[3*cols+SEQUENCE_PLOT_END+1], '3c.npy')
plt.savefig("figure3.pdf")
#################################################################
# Make Figure 4: Modulation using B1z or B1xy
# Make a grid of subplots and label the rows and columns
fig = plt.figure(figsize=(20, 10))
cols = 2+SEQUENCE_PLOT_END
outer = gridspec.GridSpec(4, cols, wspace=1, hspace=0.3)
ax = plt.Subplot(fig, outer[cols])
t = ax.text(0.7,0.1, r'$B_{1xy}$ Mod Two-Photon', fontsize=CATEGORY_SIZE-2, rotation=90)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[2*cols])
t = ax.text(0.7,0.1, r'$B_{1z}$ Mod Two-Photon', fontsize=CATEGORY_SIZE-2, rotation=90)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[3*cols])
t = ax.text(0.7,0.1, 'Both Mod Two-Photon', fontsize=CATEGORY_SIZE-2, rotation=90)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[1:SEQUENCE_PLOT_END])
t = ax.text(0.5,0, 'Pulse Sequence', fontsize=CATEGORY_SIZE)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[SEQUENCE_PLOT_END])
t = ax.text(0.5,0, 'Simulation Slice Profile', fontsize=CATEGORY_SIZE)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
ax = plt.Subplot(fig, outer[SEQUENCE_PLOT_END+1])
t = ax.text(0.5,0, 'Experimental Slice Profile', fontsize=CATEGORY_SIZE)
t.set_ha('center')
ax.axis("off")
fig.add_subplot(ax)
# 4a) Two-photon slice selection using B1xy modulation
pulse = slr_pulse(N_FIG4, TB_FIG4, FA, freq=FZ, name='4a') / (j1(g/(2*np.pi*FZ) * B1Z_AMP))
bz_pulse = -1*B1Z_AMP * np.cos(2e-6*np.arange(N_FIG4)*2*np.pi*FZ)
sim_duration_fig4 = PULSE_DURATION_FIG4*1.5
if WRITE_WAVEFORM_FILES:
write_rf_pulse_for_heartvista(pulse, '4a')
t = np.linspace(0,sim_duration_fig4,WAVEFORM_RES)
RF = np.zeros(len(t), dtype=complex)
B1z = np.zeros(len(t))
Gz = np.zeros(len(t))
for i in range(len(t)):
RF[i] = bxy_waveform(t[i], SLICE_PEAK_FIG4, PULSE_DURATION_FIG4, pulse)
B1z[i] = bz_waveform(t[i], SLICE_PEAK_FIG4, PULSE_DURATION_FIG4, bz_pulse)
Gz[i] = gz_waveform(t[i], SLICE_PEAK_FIG4, PULSE_DURATION_FIG4, gz_pulse)
plot_waveform(fig, outer[cols+1:cols+SEQUENCE_PLOT_END], t, np.abs(RF), -1*np.angle(RF), B1z, Gz, zoom_time=[5.9, 6.1])
if PRINT_MAX_VALS:
print('4a max B1xy: ' + str(np.max(np.abs(pulse))))
M = np.array([0, 0, M0])
t = np.linspace(0, sim_duration_fig4, 101)
final_m = np.zeros(XRES, dtype=complex)
x_vals = np.linspace(-XLIM,XLIM,XRES)
for i in range(XRES):
x = x_vals[i]
y = 0
sol = odeint(bloch, M, t, args=(x, y, pulse, bz_pulse, gz_pulse, SLICE_PEAK_FIG4, PULSE_DURATION_FIG4),atol=1e-7, rtol=1e-11, hmax=2e-6, mxstep=5000)
final_m[i] = np.complex(sol[-1,0], sol[-1,1])
plot_sim(fig, outer[cols+SEQUENCE_PLOT_END], x_vals, np.abs(final_m), -1*np.angle(final_m))
plot_experiment(fig, outer[cols+SEQUENCE_PLOT_END+1], '4a.npy')
# 4b) Two-photon slice selection using B1z modulation
max_bessel_arg = g*B1Z_AMP/(2*np.pi*FZ)
pulse = slr_pulse(N_FIG4, TB_FIG4, FA, freq=0)
scale = np.max(np.abs(pulse)) / j1(max_bessel_arg)
pulse = pulse / np.max(np.abs(pulse)) * j1(max_bessel_arg)
pulse_bz1 = np.zeros(len(pulse))
from scipy.optimize import minimize
def diff(x,a):
yt = j1(x)
return (yt - a )**2
for i in range(len(pulse)):
res = minimize(diff, 0, args=(np.real(pulse[i])), bounds=[(-max_bessel_arg, max_bessel_arg)])
pulse_bz1[i] = res.x[0]
bz_pulse = -1*pulse_bz1 * 2*np.pi*FZ/g * np.cos(2e-6*np.arange(N_FIG4)*2*np.pi*FZ) # lets make this a cos to get rid of phase differences
if WRITE_WAVEFORM_FILES:
make_b1z_csv(bz_pulse, SLICE_PEAK, PULSE_DURATION_FIG4, '4b.csv')
pulse = np.zeros(len(pulse), dtype=complex)
for i in range(len(pulse)):
pulse[i] = scale * np.complex(np.cos(2e-6*i*2*np.pi*FZ), np.sin(2e-6*i*2*np.pi*FZ))
if WRITE_WAVEFORM_FILES:
write_rf_pulse_for_heartvista(pulse, '4b')
t = np.linspace(0,sim_duration_fig4,WAVEFORM_RES)
RF = np.zeros(len(t), dtype=complex)
B1z = np.zeros(len(t))
Gz = np.zeros(len(t))
for i in range(len(t)):
RF[i] = bxy_waveform(t[i], SLICE_PEAK_FIG4, PULSE_DURATION_FIG4, pulse)
B1z[i] = bz_waveform(t[i], SLICE_PEAK_FIG4, PULSE_DURATION_FIG4, bz_pulse)
Gz[i] = gz_waveform(t[i], SLICE_PEAK_FIG4, PULSE_DURATION_FIG4, gz_pulse)
plot_waveform(fig, outer[2*cols+1:2*cols+SEQUENCE_PLOT_END], t, np.abs(RF), -1*np.angle(RF), B1z, Gz, zoom_time=[5.9, 6.1])
if PRINT_MAX_VALS:
print('4b max B1xy: ' + str(np.max(np.abs(pulse))))
M = np.array([0, 0, M0])
t = np.linspace(0, sim_duration_fig4, 101)
final_m = np.zeros(XRES, dtype=complex)
x_vals = np.linspace(-XLIM,XLIM,XRES)
for i in range(XRES):
x = x_vals[i]
y = 0
sol = odeint(bloch, M, t, args=(x, y, pulse, bz_pulse, gz_pulse, SLICE_PEAK_FIG4, PULSE_DURATION_FIG4),atol=1e-7, rtol=1e-11, hmax=2e-6, mxstep=5000)
final_m[i] = np.complex(sol[-1,0], sol[-1,1])
plot_sim(fig, outer[2*cols+SEQUENCE_PLOT_END], x_vals, np.abs(final_m), -1*np.angle(final_m))
plot_experiment(fig, outer[2*cols+SEQUENCE_PLOT_END+1], '4b.npy')
# 4c) Two-photon slice selection using both B1xy and B1z modulation
pulse = slr_pulse(N_FIG4, TB_FIG4, FA, freq=0) / j1(g/(2*np.pi*FZ) * B1Z_AMP)
for i in range(len(pulse)):
if i<N_FIG4/2:
pulse[i] = scale
else:
bz_pulse[i] = -1*B1Z_AMP * np.cos(2e-6*i*2*np.pi*FZ)
pulse[i] = pulse[i] * np.complex(np.cos(2e-6*i*2*np.pi*FZ), np.sin(2e-6*i*2*np.pi*FZ))
if WRITE_WAVEFORM_FILES:
write_rf_pulse_for_heartvista(pulse, '4c')
make_b1z_csv(bz_pulse, SLICE_PEAK_FIG4, PULSE_DURATION_FIG4, '4c.csv')
t = np.linspace(0,sim_duration_fig4,WAVEFORM_RES)
RF = np.zeros(len(t), dtype=complex)
B1z = np.zeros(len(t))
Gz = np.zeros(len(t))
for i in range(len(t)):
RF[i] = bxy_waveform(t[i], SLICE_PEAK_FIG4, PULSE_DURATION_FIG4, pulse)
B1z[i] = bz_waveform(t[i], SLICE_PEAK_FIG4, PULSE_DURATION_FIG4, bz_pulse)
Gz[i] = gz_waveform(t[i], SLICE_PEAK_FIG4, PULSE_DURATION_FIG4, gz_pulse)
plot_waveform(fig, outer[3*cols+1:3*cols+SEQUENCE_PLOT_END], t,
|
np.abs(RF)
|
numpy.abs
|
'''Spectral Modelling'''
from __future__ import print_function, division
import numpy as np
import numpy.lib.recfunctions as rf
from mla.spectral import *
from mla.timing import *
import scipy.stats
from mla import tools
class PSinjector(object):
r'''injector of point source'''
def __init__(self, spectrum, mc , signal_time_profile = None , background_time_profile = (0,1)):
r'''initial the injector with a spectum and signal_time_profile. background_time_profile can be generic_profile or the time range.
args:
Spectrum: object inherited from BaseSpectrum.
mc: Monte Carlo simulation set
signal_time_profile(optional):Object inherited from generic_profile.Default is the same as background_time_profile.
background_time_profile(optional):Object inherited from generic_profile.Default is a uniform_profile with time range from 0 to 1.
'''
self.spectrum = spectrum
self.mc = mc
if isinstance(background_time_profile,generic_profile):
self.background_time_profile = background_time_profile
else:
self.background_time_profile = uniform_profile(background_time_profile[0],background_time_profile[1])
if signal_time_profile == None:
self.signal_time_profile = self.background_time_profile
else:
self.signal_time_profile = signal_time_profile
return
def set_backround(self, background ,grl ,background_window = 14):
r'''Setting the background information which will later be used when drawing data as background
args:
background:Background data
grl:The good run list
background_window: The time window(days) that will be used to estimated the background rate and drawn sample from.Default is 14 days
'''
start_time = self.background_time_profile.get_range()[0]
fully_contained = (grl['start'] >= start_time-background_window) &\
(grl['stop'] < start_time)
start_contained = (grl['start'] < start_time-background_window) &\
(grl['stop'] > start_time-background_window)
background_runs = (fully_contained | start_contained)
if not np.any(background_runs):
print("ERROR: No runs found in GRL for calculation of "
"background rates!")
raise RuntimeError
background_grl = grl[background_runs]
# Get the number of events we see from these runs and scale
# it to the number we expect for our search livetime.
n_background = background_grl['events'].sum()
n_background /= background_grl['livetime'].sum()
n_background *= self.background_time_profile.effective_exposure()
self.n_background = n_background
self.background = background
return
def draw_data(self):
r'''Draw data sample
return:
background: background sample
'''
n_background_observed = np.random.poisson(self.n_background)
background = np.random.choice(self.background, n_background_observed).copy()
background['time'] = self.background_time_profile.random(len(background))
return background
def update_spectrum(self, spectrum):
r"""Updating the injection spectrum.
args:
spectrum: Object inherited from BaseSpectrum.
"""
self.spectrum = spectrum
return
def add_background(self, background ,grl):
r''' Add Background data into the injector such that it can also inject background
args:
background: background dataset.
grl: Good run list.
'''
self.background = background
self.background_rate = len(background)/
|
np.sum(grl['livetime'])
|
numpy.sum
|
import logging
import numpy as np
from collections import OrderedDict
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano.tensor.nnet import conv2d, ConvOp
from theano.gpuarray.blas import GpuCorrMM
from theano.gpuarray.basic_ops import gpu_contiguous
from blocks.bricks.cost import SquaredError
from blocks.bricks.cost import CategoricalCrossEntropy, MisclassificationRate
from blocks.graph import add_annotation, Annotation
from blocks.roles import add_role, PARAMETER, WEIGHT, BIAS
from utils import shared_param, AttributeDict
from nn import maxpool_2d, global_meanpool_2d, BNPARAM, softmax_n
logger = logging.getLogger('main.model')
floatX = theano.config.floatX
class LadderAE():
def __init__(self, p):
self.p = p
self.init_weights_transpose = False
self.default_lr = p.lr
self.shareds = OrderedDict()
self.rstream = RandomStreams(seed=p.seed)
self.rng = np.random.RandomState(seed=p.seed)
n_layers = len(p.encoder_layers)
assert n_layers > 1, "Need to define encoder layers"
assert n_layers == len(p.denoising_cost_x), (
"Number of denoising costs does not match with %d layers: %s" %
(n_layers, str(p.denoising_cost_x)))
def one_to_all(x):
""" (5.,) -> 5 -> (5., 5., 5.)
('relu',) -> 'relu' -> ('relu', 'relu', 'relu')
"""
if type(x) is tuple and len(x) == 1:
x = x[0]
if type(x) is float:
x = (np.float32(x),) * n_layers
if type(x) is str:
x = (x,) * n_layers
return x
p.decoder_spec = one_to_all(p.decoder_spec)
p.f_local_noise_std = one_to_all(p.f_local_noise_std)
acts = one_to_all(p.get('act', 'relu'))
assert n_layers == len(p.decoder_spec), "f and g need to match"
assert (n_layers == len(acts)), (
"Not enough activations given. Requires %d. Got: %s" %
(n_layers, str(acts)))
acts = acts[:-1] + ('softmax',)
def parse_layer(spec):
""" 'fc:5' -> ('fc', 5)
'5' -> ('fc', 5)
5 -> ('fc', 5)
'convv:3:2:2' -> ('convv', [3,2,2])
"""
if type(spec) is not str:
return "fc", spec
spec = spec.split(':')
l_type = spec.pop(0) if len(spec) >= 2 else "fc"
spec = map(int, spec)
spec = spec[0] if len(spec) == 1 else spec
return l_type, spec
enc = map(parse_layer, p.encoder_layers)
self.layers = list(enumerate(zip(enc, p.decoder_spec, acts)))
def weight(self, init, name, cast_float32=True, for_conv=False):
weight = self.shared(init, name, cast_float32, role=WEIGHT)
if for_conv:
return weight.dimshuffle('x', 0, 'x', 'x')
return weight
def bias(self, init, name, cast_float32=True, for_conv=False):
b = self.shared(init, name, cast_float32, role=BIAS)
if for_conv:
return b.dimshuffle('x', 0, 'x', 'x')
return b
def shared(self, init, name, cast_float32=True, role=PARAMETER, **kwargs):
p = self.shareds.get(name)
if p is None:
p = shared_param(init, name, cast_float32, role, **kwargs)
self.shareds[name] = p
return p
def counter(self):
name = 'counter'
p = self.shareds.get(name)
update = []
if p is None:
p_max_val = np.float32(10)
p = self.shared(np.float32(1), name, role=BNPARAM)
p_max = self.shared(p_max_val, name + '_max', role=BNPARAM)
update = [(p, T.clip(p + np.float32(1),
|
np.float32(0)
|
numpy.float32
|
import importlib
import os.path
import hydra
import numpy as np
import torch
import torch.nn.functional as F
import torchvision
from omegaconf import DictConfig
from skimage.io import imsave
from torch.utils.data import DataLoader
from psa.tool import imutils
from psa.voc12 import data
@hydra.main(config_path='./conf', config_name="infer_aff")
def run_app(cfg: DictConfig) -> None:
os.makedirs(cfg.out_rw, exist_ok=True)
model = getattr(importlib.import_module(cfg.network), 'Net')()
model.load_state_dict(torch.load(cfg.weights, map_location=torch.device('cpu')))
model.eval()
infer_dataset = data.VOC12ImageDataset(cfg.infer_list, voc12_root=cfg.voc12_root,
transform=torchvision.transforms.Compose(
[np.asarray,
model.normalize,
imutils.HWC_to_CHW]))
infer_data_loader = DataLoader(infer_dataset, shuffle=False, num_workers=cfg.num_workers, pin_memory=True)
for iter, (name, img) in enumerate(infer_data_loader):
name = name[0]
print(iter)
orig_shape = img.shape
padded_size = (int(np.ceil(img.shape[2] / 8) * 8), int(np.ceil(img.shape[3] / 8) * 8))
p2d = (0, padded_size[1] - img.shape[3], 0, padded_size[0] - img.shape[2])
img = F.pad(img, p2d)
dheight = int(np.ceil(img.shape[2] / 8))
dwidth = int(np.ceil(img.shape[3] / 8))
cam = np.load(os.path.join(cfg.cam_dir, name + '.npy'), allow_pickle=True).item()
cam_full_arr =
|
np.zeros((21, orig_shape[2], orig_shape[3]), np.float32)
|
numpy.zeros
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
"""Module description:
Create world lf0 and vuv feature labels for .wav files.
"""
# System imports.
import argparse
import glob
import logging
import math
import os
import sys
from collections import OrderedDict
import numpy as np
# Third-party imports.
import pyworld
import soundfile
# Local source tree imports.
from idiaptts.misc.normalisation.MeanStdDevExtractor import MeanStdDevExtractor
from idiaptts.src.data_preparation.LabelGen import LabelGen
from idiaptts.misc.utils import makedirs_safe, interpolate_lin, compute_deltas
class LF0LabelGen(LabelGen):
"""Create LF0 feature labels for .wav files."""
f0_silence_threshold = 20
lf0_zero = 0
dir_lf0 = "lf0"
dir_deltas = "lf0"
dir_vuv = "vuv"
ext_lf0 = ".lf0"
ext_deltas = ".lf0_deltas"
ext_vuv = ".vuv"
logger = logging.getLogger(__name__)
def __init__(self, dir_labels, add_deltas=False):
"""
Prepare a numpy array with the LF0 and V/UV labels for each frame for each utterance extracted by WORLD.
If add_delta is false each frame has only the LF0 value, otherwise its deltas and double deltas are added.
:param dir_labels: While using it as a database dir_labels has to contain the prepared labels.
:param add_deltas: Determines if labels contain deltas and double deltas.
"""
# Attributes.
self.dir_labels = dir_labels
self.add_deltas = add_deltas
self.norm_params = None
def __getitem__(self, id_name):
"""Return the preprocessed sample with the given id_name."""
sample = self.load_sample(id_name, self.dir_labels)
sample = self.preprocess_sample(sample)
return sample
@staticmethod
def trim_end_sample(sample, length, reverse=False):
"""
Trim the end of a sample by the given length. If reverse is True, the front of the sample is trimmed.
This function is called after preprocess_sample.
"""
if length == 0:
return sample
if reverse:
return sample[length:, ...]
else:
return sample[:-length, ...]
def preprocess_sample(self, sample, norm_params=None):
"""
Normalise one sample (by default to 0 mean and variance 1). This function should be used within the
batch loading of PyTorch.
:param sample: The sample to pre-process.
:param norm_params: Use this normalisation parameters instead of self.norm_params.
:return: Pre-processed sample.
"""
if norm_params is not None:
mean, std_dev = norm_params
elif self.norm_params is not None:
mean, std_dev = self.norm_params
else:
self.logger.error("Please give norm_params argument or call get_normaliations_params() before.")
return None
return np.float32((sample - mean) / std_dev)
def postprocess_sample(self, sample, norm_params=None):
"""
Denormalise one sample. This function is used after inference of a network.
:param sample: The sample to post-process.
:param norm_params: Use this normalisation parameters instead of self.norm_params.
:return: Post-processed sample.
"""
if norm_params is not None:
mean, std_dev = norm_params
elif self.norm_params is not None:
mean, std_dev = self.norm_params
else:
self.logger.error("Please give norm_params argument or call get_normaliations_params() before.")
return None
sample = np.copy((sample * std_dev) + mean)
return sample
@staticmethod
def load_sample(id_name, dir_out, add_deltas=False):
"""
Load LF0 and V/UV features from dir_out.
:param id_name: Id of the sample.
:param dir_out: Directory containing the sample.
:param add_deltas: Determines if deltas and double deltas are expected.
:return: Numpy array with dimensions num_frames x len(lf0, vuv).
"""
logging.debug("Load WORLD features for " + id_name)
lf0 = LF0LabelGen.load_lf0(id_name, dir_out, add_deltas)
vuv = LF0LabelGen.load_vuv(id_name, dir_out)
labels = np.concatenate((lf0, vuv), axis=1)
return labels
@staticmethod
def load_lf0(id_name, dir_out, add_deltas=False):
"""Loads LF0 features from dir_out."""
if add_deltas:
with open(os.path.join(dir_out, LF0LabelGen.dir_lf0, id_name + LF0LabelGen.ext_deltas), 'rb') as f:
lf0 = np.fromfile(f, dtype=np.float32)
lf0 = np.reshape(lf0, [-1, 3])
else:
with open(os.path.join(dir_out, LF0LabelGen.dir_lf0, id_name + LF0LabelGen.ext_lf0), 'rb') as f:
lf0 = np.fromfile(f, dtype=np.float32)
lf0 = np.reshape(lf0, [-1, 1])
return lf0
@staticmethod
def load_vuv(id_name, dir_out):
"""Loads V/UV features from dir_out."""
dim_vuv = 1
with open(os.path.join(dir_out, LF0LabelGen.dir_vuv, id_name + LF0LabelGen.ext_vuv), 'rb') as f:
vuv = np.fromfile(f, dtype=np.float32)
vuv = np.reshape(vuv, [-1, dim_vuv])
return vuv
@staticmethod
def convert_to_world_features(sample):
lf0 = sample[:, 0]
vuv = np.copy(sample[:, -1])
vuv[vuv < 0.5] = 0.0
vuv[vuv >= 0.5] = 1.0
return lf0, vuv
def get_normalisation_params(self, dir_out, file_name=None):
"""
Read the mean std_dev values from a file.
Save them in self.norm_params.
:param dir_out: Directory containing the normalisation file.
:param file_name: Prefix of normalisation file.
Expects file to be named <file_name-><MeanStdDevExtractor.file_name_appendix>.bin
:return: Tuple of normalisation parameters (mean, std_dev).
"""
full_file_name = (file_name + "-" if file_name is not None else "") + MeanStdDevExtractor.file_name_appendix + ".bin"
if not self.add_deltas:
# Collect all means and std_devs in a list.
all_mean = list()
all_std_dev = list()
# Load normalisation parameters for all features.
mean, std_dev = MeanStdDevExtractor.load(os.path.join(dir_out, self.dir_lf0, full_file_name))
all_mean.append(np.atleast_2d(mean))
all_std_dev.append(np.atleast_2d(std_dev))
# Manually set vuv normalisation parameters.
# Note that vuv normalisation parameters are not saved in gen_data method (except for add_deltas=True).
all_mean.append(np.atleast_2d(0.0))
all_std_dev.append(np.atleast_2d(1.0))
# for dir_feature in [self.dir_lf0, self.dir_vuv]:
# mean, std_dev = MeanStdDevExtractor.load(os.path.join(dir_out, dir_feature, full_file_name))
# all_mean.append(np.atleast_2d(mean))
# all_std_dev.append(np.atleast_2d(std_dev))
# Save the concatenated normalisation parameters locally.
self.norm_params = np.concatenate(all_mean, axis=1), np.concatenate(all_std_dev, axis=1)
else:
# Save the normalisation parameters locally.
# VUV normalisation parameters are manually set to mean=0 and std_dev=1 in gen_data method.
self.norm_params = MeanStdDevExtractor.load(os.path.join(dir_out, self.dir_deltas, full_file_name))
return self.norm_params
def gen_data(self, dir_in, dir_out=None, file_id_list="", id_list=None, add_deltas=False, return_dict=False):
"""
Prepare LF0 and V/UV features from audio files. If add_delta is false each numpy array has the dimension
num_frames x 2 [f0, vuv], otherwise the deltas and double deltas are added between
the features resulting in num_frames x 4 [lf0(3*1), vuv].
:param dir_in: Directory where the .wav files are stored for each utterance to process.
:param dir_out: Main directory where the labels and normalisation parameters are saved to subdirectories.
If None, labels are not saved.
:param file_id_list: Name of the file containing the ids. Normalisation parameters are saved using
this name to differentiate parameters between subsets.
:param id_list: The list of utterances to process.
Should have the form uttId1 \\n uttId2 \\n ...\\n uttIdN.
If None, all file in audio_dir are used.
:param add_deltas: Add deltas and double deltas to all features except vuv.
:param return_dict: If true, returns an OrderedDict of all samples as first output.
:return: Returns two normalisation parameters as tuple. If return_dict is True it returns
all processed labels in an OrderedDict followed by the two normalisation parameters.
"""
# Fill file_id_list by .wav files in dir_in if not given and set an appropriate file_id_list_name.
if id_list is None:
id_list = list()
filenames = glob.glob(os.path.join(dir_in, "*.wav"))
for filename in filenames:
id_list.append(os.path.splitext(os.path.basename(filename))[0])
file_id_list_name = "all"
else:
file_id_list_name = os.path.splitext(os.path.basename(file_id_list))[0]
# Create directories in dir_out if it is given.
if dir_out is not None:
if add_deltas:
makedirs_safe(os.path.join(dir_out, LF0LabelGen.dir_deltas))
else:
makedirs_safe(os.path.join(dir_out, LF0LabelGen.dir_lf0))
makedirs_safe(os.path.join(dir_out, LF0LabelGen.dir_vuv))
# Create the return dictionary if required.
if return_dict:
label_dict = OrderedDict()
# Create normalisation computation units.
norm_params_ext_lf0 = MeanStdDevExtractor()
# norm_params_ext_vuv = MeanStdDevExtractor()
norm_params_ext_deltas = MeanStdDevExtractor()
logging.info("Extract WORLD LF0 features for " + "[{0}]".format(", ".join(str(i) for i in id_list)))
for file_name in id_list:
logging.debug("Extract WORLD LF0 features from " + file_name)
# Load audio file and extract features.
audio_name = os.path.join(dir_in, file_name + ".wav")
raw, fs = soundfile.read(audio_name)
_f0, t = pyworld.dio(raw, fs) # Raw pitch extraction. TODO: Use magphase here?
f0 = pyworld.stonemask(raw, _f0, t, fs) # Pitch refinement.
# Compute lf0 and vuv information.
lf0 = np.log(f0, dtype=np.float32)
lf0[lf0 <= math.log(LF0LabelGen.f0_silence_threshold)] = LF0LabelGen.lf0_zero
lf0, vuv = interpolate_lin(lf0)
if add_deltas:
# Compute the deltas and double deltas for all features.
lf0_deltas, lf0_double_deltas = compute_deltas(lf0)
# Combine them to a single feature sample.
labels = np.concatenate((lf0, lf0_deltas, lf0_double_deltas, vuv), axis=1)
# Save into return dictionary and/or file.
if return_dict:
label_dict[file_name] = labels
if dir_out is not None:
labels.tofile(os.path.join(dir_out, LF0LabelGen.dir_deltas, file_name + LF0LabelGen.ext_deltas))
# Add sample to normalisation computation unit.
norm_params_ext_deltas.add_sample(labels)
else:
# Save into return dictionary and/or file.
if return_dict:
label_dict[file_name] = np.concatenate((lf0, vuv), axis=1)
if dir_out is not None:
lf0.tofile(os.path.join(dir_out, LF0LabelGen.dir_lf0, file_name + LF0LabelGen.ext_lf0))
vuv.astype(np.float32).tofile(os.path.join(dir_out, LF0LabelGen.dir_vuv, file_name + LF0LabelGen.ext_vuv))
# Add sample to normalisation computation unit.
norm_params_ext_lf0.add_sample(lf0)
# norm_params_ext_vuv.add_sample(vuv)
# Save mean and std dev of all features.
if not add_deltas:
norm_params_ext_lf0.save(os.path.join(dir_out, LF0LabelGen.dir_lf0, file_id_list_name))
# norm_params_ext_vuv.save(os.path.join(dir_out, LF0LabelGen.dir_vuv, file_id_list_name))
else:
# Manually set vuv normalisation parameters before saving.
norm_params_ext_deltas.sum_frames[-1] = 0.0 # Mean = 0.0
norm_params_ext_deltas.sum_squared_frames[-1] = norm_params_ext_deltas.sum_length # Variance = 1.0
norm_params_ext_deltas.save(os.path.join(dir_out, LF0LabelGen.dir_deltas, file_id_list_name))
# Get normalisation parameters.
if not add_deltas:
norm_lf0 = norm_params_ext_lf0.get_params()
# norm_vuv = norm_params_ext_vuv.get_params()
norm_first =
|
np.concatenate((norm_lf0[0], (0.0,)), axis=0)
|
numpy.concatenate
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import binned_statistic, sem
import matplotlib as mpl
mpl.rcParams['mathtext.fontset'] = 'cm'
colors = [color['color'] for color in list(plt.rcParams['axes.prop_cycle'])]
import sys
# compute fast/slow state index
def calc_timings(data):
index_high = []
index_low = []
temp_index_high = []
temp_index_low = []
bound = False
for i, (time, step, out, number_bound, _) in enumerate(data):
if number_bound == 0:
temp_index_high.append(i)
if not bound and i != 0:
temp_index_low.append(i)
index_low.append(temp_index_low)
temp_index_low = []
bound=True
else:
temp_index_low.append(i)
if bound:
temp_index_high.append(i)
index_high.append(temp_index_high)
temp_index_high = []
bound=False
if bound:
temp_index_high.append(i)
index_high.append(temp_index_high)
temp_index_high = []
else:
temp_index_low.append(i)
index_low.append(temp_index_low)
temp_index_low = []
return index_high, index_low
R=0.1
Ron_list = []
kOff_list = [0.001212, 0.012122, 0.121224, 1.212245, 12.122449,
121.22449, 1212.244898,12122.44898, 121224.489796,
1212244.897959, 12122448.979592, 121224489.795918]
for kOff in kOff_list:
kA = 1.0/R
kOn = kA * kOff
dx=7.0
D0=270000.0
Dhop = 2.0*D0/(dx*dx)
fractionFree = 1.0/(1.0 + kA)
khop = Dhop/fractionFree
Ron = kOn/khop
Ron_list.append(Ron)
tau_high_list = []
tau_low_list = []
flux_high_list = []
flux_low_list = []
prob_list = []
n_high_list = []
n_low_list = []
flux_eqn1 = []
flux_eqn2 = []
phi_slow = []
phi_free = []
tau_slow = []
tau_free = []
width = 8
for id,Ron in enumerate(Ron_list):
data = np.loadtxt('../Sim/Data/TimeSeries_Width'+str(width)+'R'+str(R)+'koff'+str(kOff_list[id])+'ID' + str(id))
time, step, out, number_bound, number_total = list(data.T)
index_high, index_low = calc_timings(data)
total_time = time[-1] - time[0]
pscale = np.sqrt((Ron*khop + (khop*Ron/kA))/khop) * (1.0 + 1.0/kA)
nu_s = 0.55
n_plug = (width-1)/2.0
n_bound = 1 + nu_s * n_plug/(1 + 1.0/kA)
phi_slow.append( (1.0 + n_plug) / (n_bound/(khop*Ron/kA)) )
phi_free.append(khop/2.0/width)
n_free = (width+1)/2.0
tau_slow.append(n_bound/ (khop*Ron/kA) )
tau_free.append(1.0/(Ron*khop * n_free))
flux_eqn1.append(2.0*D0/(dx**2)*pscale/width/np.sqrt(3) * (np.arctan(1 + 2.0/pscale) - np.arctan(1) ) )
tau_high = np.array(list(map(lambda x: time[x][-1] - time[x][0], index_high)))
tau_low = np.array(list(map(lambda x: time[x][-1] - time[x][0], index_low)))
n_high = np.array(list(map(lambda x: out[x].sum(), index_high)))
n_low = np.array(list(map(lambda x: out[x].sum(), index_low)))
n_high = n_high[tau_high > 0]
n_low = n_low[tau_low > 0]
tau_high = tau_high[tau_high > 0]
tau_low = tau_low[tau_low > 0]
flux_high = (n_high / tau_high)
flux_low = (n_low / tau_low)
# duration-weighted flux
flux_high_av = np.nansum(flux_high * tau_high) / np.nansum(tau_high)
flux_low_av = np.nansum(flux_low * tau_low) / np.nansum(tau_low)
flux_high_list.append(flux_high_av)
flux_low_list.append(flux_low_av)
tau_high_av = np.mean(tau_high)
tau_low_av = np.mean(tau_low)
tau_high_list.append(tau_high_av)
tau_low_list.append(tau_low_av)
total_time_high = np.sum(tau_high/total_time)
total_time_low = np.sum(tau_low/total_time)
prob_list.append(total_time_high/(total_time_low + total_time_high))
print(total_time_high + total_time_low, total_time_high, tau_low_av, tau_high_av, flux_low_av, flux_high_av)
n_high_av = np.mean(n_high)
n_low_av = np.mean(n_low)
n_high_list.append(n_high_av)
n_low_list.append(n_low_av)
fig,ax = plt.subplots(1,2,figsize=(9.75,4))
ax = ax.flatten()
Ron_list = np.array(Ron_list)
tau_high_list = np.array(tau_high_list)
tau_low_list = np.array(tau_low_list)
flux_high_list = np.array(flux_high_list)
flux_low_list = np.array(flux_low_list)
Rx = width**(-3.0)
ax[0].loglog(Ron_list[Ron_list < Rx], tau_low_list[Ron_list < Rx],marker='s',color = colors[1], label = '',markersize = 8,linestyle = '')
ax[0].loglog(Ron_list[Ron_list < Rx], tau_high_list[Ron_list < Rx],marker='o',color = colors[0],label = '',markersize = 8,linestyle = '')
ax[0].loglog(Ron_list[Ron_list >= Rx], tau_low_list[Ron_list >= Rx],marker='s',color = colors[1], label = 'slow-plugged',markersize = 8,linestyle = '')
ax[0].loglog(Ron_list[Ron_list >= Rx], tau_high_list[Ron_list >= Rx],marker='o',color = colors[0],label = 'free-flowing',markersize = 8,linestyle = '')
ax[0].set_ylabel(r'$\langle\tau\rangle$',fontsize=19,rotation=0,labelpad=14)
ax[0].set_xlabel(r'$R_{\mathrm{on}}$',fontsize=20)
ax[1].loglog(Ron_list[Ron_list < Rx], flux_high_list[Ron_list < Rx],marker='o',color = colors[0],label = 'high',markersize = 8,linestyle = '')
ax[1].loglog(Ron_list[Ron_list < Rx], flux_low_list[Ron_list < Rx],marker = 's',color = colors[1], label = 'low',markersize = 8,linestyle = '')
ax[1].loglog(Ron_list[Ron_list >= Rx], flux_high_list[Ron_list >= Rx],marker='o',color = colors[0],label = 'high',markersize = 8,linestyle = '')
ax[1].loglog(Ron_list[Ron_list >= Rx], flux_low_list[Ron_list >= Rx],marker = 's',color = colors[1], label = 'low',markersize = 8,linestyle = '')
ax[1].set_ylabel(r'$\langle \Phi\rangle$',fontsize=20,rotation=0,labelpad=7)
ax[1].set_xlabel(r'$R_{\mathrm{on}}$',fontsize=20)
Ron_list=
|
np.array(Ron_list)
|
numpy.array
|
from .regression import Regression
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import PolynomialFeatures
from imageio import imread
import os
FIGURE_DIR = "./figures"
DATA_DIR = "./data"
if not os.path.exists(FIGURE_DIR):
os.mkdir(FIGURE_DIR)
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
def image_path(fig_id):
return os.path.join(FIGURE_DIR, fig_id)
def data_path(DATA_file):
return os.path.join(DATA_DIR, DATA_file)
def save_fig(fig_id):
fn = image_path(fig_id) + ".png"
if os.path.exists(fn):
overwrite = str(
input(
"The file "
+ fn
+ " already exists,"
+ "\ndo you wish to overwrite it [y/n/new_file_name]?\n"
)
)
if overwrite == "y":
plt.savefig(fn, format="png")
plt.close()
print(fn + " was overwritten.")
elif overwrite == "n":
print("Figure was not saved.")
elif fn != overwrite:
fn = image_path(overwrite) + ".png" # user specified filename
plt.savefig(fn, format="png")
plt.close()
print("New file: " + fn + " written.")
# if user types new_file_name = fn, the original file is preserved,
# and NOT overwritten.
else:
plt.savefig(fn, format="png")
plt.close()
return None
def create_polynomial_design_matrix(X_data, degree):
"""
X_data = [x_data y_data]
Create polynomial design matrix on the form where columns are:
X = [1 x y x**2 xy y**2 x**3 x**2y ... ]
"""
X = PolynomialFeatures(degree).fit_transform(X_data)
return X
def get_polynomial_coefficients(degree=5):
"""
Return a list with coefficient names,
[1 x y x^2 xy y^2 x^3 ...]
"""
names = ["1"]
for exp in range(1, degree + 1): # 0, ..., degree
for x_exp in range(exp, -1, -1):
y_exp = exp - x_exp
if x_exp == 0:
x_str = ""
elif x_exp == 1:
x_str = r"$x$"
else:
x_str = rf"$x^{x_exp}$"
if y_exp == 0:
y_str = ""
elif y_exp == 1:
y_str = r"$y$"
else:
y_str = rf"$y^{y_exp}$"
names.append(x_str + y_str)
return names
def franke_function(x, y):
"""
Info:
The Franke function f(x, y). The inputs are elements or vectors with
elements in the domain of [0, 1].
Inputs:
x, y: array, int or float. Must be of same shape.
Output:
f(x,y), same type and shape as inputs.
"""
if np.shape(x) != np.shape(y):
raise ValueError("x and y must be of same shape!")
term1 = 0.75 * np.exp(-(0.25 * (9 * x - 2) ** 2) - 0.25 * ((9 * y - 2) ** 2))
term2 = 0.75 * np.exp(-((9 * x + 1) ** 2) / 49.0 - 0.1 * (9 * y + 1))
term3 = 0.5 * np.exp(-(9 * x - 7) ** 2 / 4.0 - 0.25 * ((9 * y - 3) ** 2))
term4 = -0.2 * np.exp(-(9 * x - 4) ** 2 - (9 * y - 7) ** 2)
return term1 + term2 + term3 + term4
# Ordinary least squares analysis
def ols_franke_function(X, y, x1, x2, variance, degree=5, svd=False, k=5):
"""
Info:
Make a plot of the sample data and an Ordinary Least Squares fit on
the data. The purpose here is only to illustrate the fit, and the
data is NOT yet split in training/test data.
Input:
* X: design matrix
* y: sample data
* x1: sample data
* x2: sample data
* degree=5: polynomial degree of regression.
* svd=False: if set to true the matrix inversion of (X.T @ X) will
be inverted with SVD
* k=5: number of k subsets for k-fold CV
Output:
The function produces a plot of the data and corresponding regression.
"""
# OLS
model = Regression(X, y)
model.ols_fit(svd=svd)
y_pred = model.predict(X)
N = X.shape[0]
mse = model.mean_squared_error(y, y_pred)
r2 = model.r2_score(y, y_pred)
# PLOT
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
# plot the original data
data = ax.scatter(x1, x2, y, marker="^", alpha=0.04)
ax.set_xlabel(r"$x_1$")
ax.set_ylabel(r"$x_2$")
ax.set_zlabel("y")
# make a surface plot on a smooth meshgrid, given OLS model
l = np.linspace(0, 1, 1001)
x1_mesh, x2_mesh = np.meshgrid(l, l)
x1_flat, x2_flat = x1_mesh.flatten(), x2_mesh.flatten()
X_mesh = np.column_stack((x1_flat, x2_flat))
y_pred = model.predict(create_polynomial_design_matrix(X_mesh, degree))
y_pred_mesh = np.reshape(y_pred, x1_mesh.shape)
surface = ax.plot_surface(x1_mesh, x2_mesh, y_pred_mesh, cmap=mpl.cm.coolwarm)
fake_surf = mpl.lines.Line2D(
[0], [0], linestyle="none", c="r", marker="s", alpha=0.5
)
fake_data = mpl.lines.Line2D(
[0], [0], linestyle="none", c="b", marker="^", alpha=0.2
)
ax.legend(
[fake_surf, fake_data],
[
f"OLS surface fit with polynomial degree {degree}\n"
+ "Training MSE = "
+ f"{mse:1.3f}"
+ r", $R^2$ = "
+ f"{r2:1.3f}",
f"{N} Data points with variance = {variance:1.3f}",
],
numpoints=1,
loc=1,
)
fig.colorbar(surface, shrink=0.5)
save_fig("ols_franke_function")
return None
def ols_test_size_analysis(X, y, variance, svd=False):
"""
Info:
Analyse the MSE and R2 as a function of test size
Input:
* X: design matrix
* y: y data
* variance: Var(y) in noise of franke function, only used for plot title
* svd=False: if set to true the matrix inversion of (X.T @ X) will
be inverted with SVD
Output:
Produces and saves a plot
"""
N = X.shape[0]
n = 17
test_sizes = np.linspace(0.1, 0.9, n)
mse = np.zeros(n)
r2 = np.zeros(n)
model = Regression(X, y)
# Collect MSE and R2 as a function of test_size
for i in range(n):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_sizes[i], random_state=i
)
model.update_X(X_train)
model.update_y(y_train)
model.ols_fit(svd=svd)
y_pred = model.predict(X_test)
mse[i] = model.mean_squared_error(y_test, y_pred)
r2[i] = model.r2_score(y_test, y_pred)
# Plot the results
plt.subplot(211)
plt.title(f"Test Size Analysis, Data points = {N}, Variance = {variance:1.3f}")
plt.plot(test_sizes, mse, label="Mean Squared Error")
plt.ylabel("MSE")
plt.grid()
plt.legend()
plt.subplot(212)
plt.plot(test_sizes, r2, label="R squared score")
plt.ylabel(r"$R^2$")
plt.grid()
plt.legend()
plt.xlabel("Test Size")
save_fig("ols_test_size_analysis")
return None
def ols_beta_variance(X_data, y, variance=1.0, degree=5):
"""
Info:
plot the beta-coefficients with errorbars corresponding to one sigma
(standard deviation) = sqrt(variance)
Input:
* X_data: x1, x2 coordinates of data
* y: y coordinates of data
* variance=1: sigma**2 of noise in data
* degree=5: polynomial degree for design matrix
Output:
Produces and saves a plot
"""
p = X.shape[1]
XTXinv = np.linalg.inv(X.T @ X)
x = np.linspace(0, p - 1, p)
beta = XTXinv @ X.T @ y
beta_err = np.diag(XTXinv) * np.sqrt(variance)
names = get_polynomial_coefficients(degree)
# PLOT
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.errorbar(
x, beta, yerr=beta_err, fmt="b.", capsize=3, label=r"$\beta_i\pm\sigma$"
)
ax.set_title(r"$\beta_i$ confidence intervals")
ax.set_xticks(x.tolist())
ax.set_xticklabels(names, fontdict={"fontsize": 7})
plt.ylabel(r"$\beta$")
plt.xlabel(r"$\beta$ coeff. terms")
plt.grid()
plt.legend()
save_fig("beta_st_dev")
return None
def ols_k_fold_analysis(X, y, variance, largest_k, svd=False):
"""
Info:
Analyse the MSE and R2 as a function of k
Input:
* X: design matrix
* y: y data
* variance: Var(y) in noise of franke function, only used for plot title
* largest_k: integer where k = [2, 3, ..., largest_k]
* svd=False: if set to true the matrix inversion of (X.T @ X) will
be inverted with SVD
Output:
Produces and saves a plot
"""
N = X.shape[0]
if largest_k < 2:
raise ValueError("largest k must be >= 2.")
n = largest_k - 1
k_arr = np.linspace(2, largest_k, n, dtype=np.int64)
mse = np.zeros(n)
r2 = np.zeros(n)
model = Regression(X, y)
# Collect MSE and R2 as a function of test_size
for i in range(n):
mse[i], r2[i] = model.k_fold_cross_validation(k_arr[i], "ols", svd=svd)
# Plot the results
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
plt.title(f"K-fold Cross Validation, Data points = {N}, Variance = {variance:1.3f}")
plt.plot(k_arr, mse, label="Mean Squared Error")
plt.ylabel("MSE")
plt.grid()
plt.legend()
ax1.set_yticklabels([f"{x:1.4f}" for x in ax1.get_yticks().tolist()])
ax2 = fig.add_subplot(2, 1, 2)
plt.plot(k_arr, r2, label="R squared score")
plt.ylabel(r"$R^2$")
plt.grid()
plt.legend()
plt.xlabel("k")
ax2.set_yticklabels([f"{x:1.4f}" for x in ax2.get_yticks().tolist()])
save_fig("ols_k_fold_analysis")
return None
def ols_degree_analysis(X_data, y, min_deg, max_deg, variance, svd=False, k=5):
"""
Info:
Analyse the MSE as a function of degree
Input:
* X: design matrix
* y: y data
* min_deg: maximum polynomial degree
* max_deg: minimum polynomial degree
* variance: Var(y) in noise of franke function, only used for plot title
* svd=False: if set to true the matrix inversion of (X.T @ X) will
be inverted with SVD
* k=5: number of k subsets for k-fold CV
Output:
Produces and saves a plot
"""
N = X_data.shape[0]
n = max_deg - min_deg + 1
mse_k_fold = np.zeros(n)
mse_train = np.zeros(n)
degree_arr = np.linspace(min_deg, max_deg, n, dtype=np.int64)
model = Regression(X_data, y)
min_MSE = variance * 5
min_deg = 0
# Collect MSE and R2 as a function of test_size
for i in range(n):
deg = degree_arr[i]
X = create_polynomial_design_matrix(X_data, deg)
model.update_X(X)
model.ols_fit(svd=svd)
y_pred = model.predict(X)
mse_train[i] = model.mean_squared_error(y, y_pred)
mse_k_fold[i], r2 = model.k_fold_cross_validation(k, "ols", svd=svd)
if mse_k_fold[i] < min_MSE:
min_deg = deg
min_MSE = mse_k_fold[i]
# Plot the results
fig = plt.figure()
plt.title(f"Data points = {N}, Variance = {variance:1.4f}")
plt.plot(degree_arr, mse_train, label=f"MSE: training data")
plt.plot(degree_arr, mse_k_fold, label=f"MSE: {k}-fold CV")
plt.plot(min_deg, min_MSE, "ro", label=f"Min. MSE = {min_MSE:1.4f}")
plt.plot()
plt.ylabel("Prediction Error")
plt.xlabel("Polynomial degree")
plt.grid()
plt.legend()
save_fig("degree_analysis_ols")
return None
def ols_degree_and_n_analysis(
max_log_N, max_degree, test_size=0.33, st_dev=0.5, svd=False, k=5
):
"""
Info:
Study the prediction error vs. degree and number of data points, and also
the prediction error will be evaluated on BOTH the training set and
the test set. This should illustrate the Bias-Variance tradeoff (optimal
degree of complexity) and how the training set gets overfitted.
Input:
* max_log_N: Largest exponent of 10 (int > 3). The number of data points
will be N = 10**(log N), where
- log N = [3, 4, ...]
- N = [100, 1000, 10 000, ...]
* max_degree: Largest polynomial degree for OLS regression.
Integer between 0 and 20
* test_size: fraction of data which will be used in test set
* st_dev: standard deviation on noise in franke function
* svd=False: if set to true the matrix inversion of (X.T @ X) will
be inverted with SVD
* k=5: number of k subsets for k-fold CV
Output:
Produces and saves a plot
"""
if max_log_N <= 3:
raise ValueError("max_log_N must be an integer > 3")
if max_degree < 0 or max_degree > 20:
raise ValueError("max_degree should be an integer between 0 and 20")
degree_arr = np.linspace(2, max_degree, max_degree - 1, dtype=np.int64)
N_arr = np.logspace(3, max_log_N, (max_log_N - 2), dtype=np.int64)
log_N_arr = np.linspace(3, max_log_N, (max_log_N - 2), dtype=np.int64)
log_N_mesh, degree_mesh = np.meshgrid(log_N_arr, degree_arr)
mse_test_mesh = np.zeros(log_N_mesh.shape)
mse_train_mesh =
|
np.zeros(log_N_mesh.shape)
|
numpy.zeros
|
# coding: utf-8
# In[1]:
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
import numpy as np
from keras.models import load_model
from keras.models import model_from_json
import sys
import mecab
import MeCab
model = load_model('LSTM_Tweet.h5')
# path = get_file('nietzsche.txt', origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt')
path = "/home/MakeTweet/MakedataByTwimeMachine/maguroTweetimprove.txt"
#回復アイテム といった風に一行ずつ
readfiletext = open(path,"r",encoding="utf-8").read().lower()
print('corpus length:', len(readfiletext))
#'書', '替'とか使う漢字が入っているとおもう
#形態素解析をここで使う
mecabText = mecab.mecab_list(readfiletext)
mecabTextLen = len(mecabText)
print(mecabTextLen)
chars = sorted(list(set(mecabText)))
print('total chars:', len(chars))
#'便': 349, '係': 350,とかになっていたのを形態素解析にして単語ずつにしている。
char_indices = dict((c, i) for i, c in enumerate(chars))
#'便', 350: '係', 351:とかになっていたのを形態素解析にして単語ずつにしている。
indices_char = dict((i, c) for i, c in enumerate(chars))
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return
|
np.argmax(probas)
|
numpy.argmax
|
# import necessary libraries
import numpy as np
from scipy.sparse import csr_matrix
from pathlib import Path
import sys
sys.path.append(
"/users/rohan/news-classification/ranking-featured-writing/rankfromsets"
)
import os
import argparse
from data_processing.articles import Articles
from models.models import InnerProduct
import data_processing.dictionaries as dictionary
import scipy
import json
import pandas as pd
import time
# get arguments for script and parse
def expand_path(string):
return Path(os.path.expandvars(string))
parser = argparse.ArgumentParser(
description="Train model on article data and test evaluation"
)
parser.add_argument(
"--model_matrix_dir",
type=expand_path,
required=True,
help="This is required to load model matrices.",
)
parser.add_argument(
"--data_matrix_path",
type=expand_path,
required=True,
help="This is required to load data matrices",
)
parser.add_argument(
"--dict_dir", type=expand_path, required=True, help="Path to data to be ranked."
)
parser.add_argument(
"--list_output_dir",
type=expand_path,
required=True,
help="The place to store the generated html.",
)
parser.add_argument(
"--real_data_path",
type=expand_path,
required=True,
help="Mapped and filtered data to generate html with.",
)
parser.add_argument(
"--dataset_name",
type=str,
required=True,
help="Indicates which dataset for demo this is.",
)
parser.add_argument(
"--amount", type=int, default=75, help="Quantity of articles to include in list!"
)
parser.add_argument(
"--emb_size", type=int, default=10, help="Embedding Size of Model Used"
)
args = parser.parse_args()
# load dictionaries
dict_dir = Path(args.dict_dir)
final_word_ids, final_url_ids, final_publication_ids = dictionary.load_dictionaries(
dict_dir
)
print("Dictionaries loaded.")
# load numeric data for calculations
publication_emb = np.asarray(
[
0.77765566,
0.76451594,
0.75550663,
-0.7732487,
0.7341457,
0.7216135,
-0.7263404,
0.73897207,
-0.720818,
0.73908365,
],
dtype=np.float32,
)
publication_bias = 0.43974462
word_article_path = args.data_matrix_path
word_articles = scipy.sparse.load_npz(word_article_path)
word_emb_path = args.model_matrix_dir / "word_emb.npy"
word_emb =
|
np.load(word_emb_path)
|
numpy.load
|
import numpy as np
import scipy
from scipy.stats import t, norm
def ztest_notch(greater, smaller, data):
ngreater, nsmaller = data[greater].sum(), data[smaller].sum()
n_bins_greater, n_bins_smaller = len(greater), len(smaller)
ntotal = ngreater + nsmaller
p_hat = float(ngreater)/ntotal
p_hat_var = p_hat*(1-p_hat)/ntotal
p_null = float(n_bins_greater)/(n_bins_greater+n_bins_smaller)
z = (p_hat - p_null)/np.sqrt(p_hat_var)
pval = 1 - norm.cdf(z)
return pval
def ttest_notch(greater, smaller, data):
data = data.astype('float32')
data += 0.5
N = data.sum()
if N == 0:
return 1
ngreater, nsmaller = data[greater], data[smaller]
#if np.any(ngreater <= 2) or np.any(nsmaller <= 2):
# return 1
pg = ngreater/N
siggsq = pg*(1-pg)/N
pg = pg.sum()/pg.size
siggsq = siggsq.sum()/siggsq.size**2
# print(pg, siggsq)
ps = nsmaller/N
sigssq = ps*(1-ps)/N
ps = ps.sum()/ps.size
sigssq = sigssq.sum()/sigssq.size**2
tstat = (pg - ps)/np.sqrt(siggsq + sigssq)
df = (siggsq + sigssq)**2/((siggsq)**2/(N-1) + (sigssq)**2/(N-1) )
# print(pg, ps, siggsq, sigssq)
return (1 - t.cdf(tstat, df))
def findnotchpairs(correlograms, mc, CONFIG):
nunits = correlograms.shape[0]
numbins = correlograms.shape[2]
baseline = np.arange(0, 20)
baseline = np.concatenate([baseline, np.arange(numbins - 20, numbins)])
centrebins = [numbins//2-1, numbins//2, numbins//2+1]
offbins = np.arange(correlograms.shape[2]//2-13, correlograms.shape[2]//2-3)
offbins = np.concatenate([offbins, np.arange(numbins//2+3, numbins//2+13)])
# baseline = np.arange(correlograms.shape[2]//2-29, correlograms.shape[2]//2-8)
# baseline = np.concatenate([baseline, np.arange(correlograms.shape[2]//2+8, correlograms.shape[2]//2+28)])
# centrebins = [correlograms.shape[2]//2]
# offbins = np.arange(correlograms.shape[2]//2-7, correlograms.shape[2]//2-2)
# offbins = np.concatenate([offbins, np.arange(correlograms.shape[2]//2+2, correlograms.shape[2]//2+7)])
notchpairs = []
for unit1 in range(nunits):
idx = np.in1d(mc,
|
np.arange(49)
|
numpy.arange
|
"""Module is for data (time series and anomaly list) processing.
"""
from typing import Dict, List, Optional, Tuple, Union, overload
import numpy as np
import pandas as pd
def validate_series(
ts: Union[pd.Series, pd.DataFrame],
check_freq: bool = True,
check_categorical: bool = False,
) -> Union[pd.Series, pd.DataFrame]:
"""Validate time series.
This functoin will check some common critical issues of time series that
may cause problems if anomaly detection is performed without fixing them.
The function will automatically fix some of them and raise errors for the
others.
Issues will be checked and automatically fixed include:
- Time index is not monotonically increasing;
- Time index contains duplicated time stamps (fix by keeping first values);
- (optional) Time index attribute `freq` is missed while the index follows
a frequency;
- (optional) Time series include categorical (non-binary) label columns
(to fix by converting categorical labels into binary indicators).
Issues will be checked and raise error include:
- Wrong type of time series object (must be pandas Series or DataFrame);
- Wrong type of time index object (must be pandas DatetimeIndex).
Parameters
----------
ts: pandas Series or DataFrame
Time series to be validated.
check_freq: bool, optional
Whether to check time index attribute `freq` is missed. Default: True.
check_categorical: bool, optional
Whether to check time series include categorical (non-binary) label
columns. Default: False.
Returns
-------
pandas Series or DataFrame
Validated time series.
"""
ts = ts.copy()
# check input type
if not isinstance(ts, (pd.Series, pd.DataFrame)):
raise TypeError("Input is not a pandas Series or DataFrame object")
# check index type
if not isinstance(ts.index, pd.DatetimeIndex):
raise TypeError(
"Index of time series must be a pandas DatetimeIndex object."
)
# check duplicated
if any(ts.index.duplicated(keep="first")):
ts = ts[ts.index.duplicated(keep="first") == False]
# check sorted
if not ts.index.is_monotonic_increasing:
ts.sort_index(inplace=True)
# check time step frequency
if check_freq:
if (ts.index.freq is None) and (ts.index.inferred_freq is not None):
ts = ts.asfreq(ts.index.inferred_freq)
# convert categorical labels into binary indicators
if check_categorical:
if isinstance(ts, pd.DataFrame):
ts = pd.get_dummies(ts)
if isinstance(ts, pd.Series):
seriesName = ts.name
ts = pd.get_dummies(
ts.to_frame(),
prefix="" if seriesName is None else seriesName,
prefix_sep="" if seriesName is None else "_",
)
if len(ts.columns) == 1:
ts = ts[ts.columns[0]]
ts.name = seriesName
return ts
def validate_events(
event_list: List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]],
point_as_interval: bool = False,
) -> List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]:
"""Validate event list.
This function will check and fix some common issues in an event list (a
list of time windows), including invalid time window, overlapped time
windows, unsorted events, etc.
Parameters
----------
event_list: list
A list of events, where an event is a pandas Timestamp if it is
instantaneous or a 2-tuple of pandas Timestamps if it is a closed time
interval.
point_as_interval: bool, optional
Whether to return all instantaneous event as a close interval with
identicial start point and end point. Default: False.
Returns
-------
list:
A validated list of events.
"""
if not isinstance(event_list, list):
raise TypeError("Argument `event_list` must be a list.")
for event in event_list:
if not (
isinstance(event, pd.Timestamp)
or (
isinstance(event, tuple)
and (len(event) == 2)
and all([isinstance(event[i], pd.Timestamp) for i in [0, 1]])
)
):
raise TypeError(
"Every event in the list must be a pandas Timestamp, "
"or a 2-tuple of Timestamps."
)
time_window_ends = [] # type: List[pd.Timestamp]
time_window_type = [] # type: List[int]
for time_window in event_list:
if isinstance(time_window, tuple):
if time_window[0] <= time_window[1]:
time_window_ends.append(time_window[0])
time_window_type.append(+1)
time_window_ends.append(time_window[1])
time_window_type.append(-1)
else:
time_window_ends.append(time_window)
time_window_type.append(+1)
time_window_ends.append(time_window)
time_window_type.append(-1)
time_window_end_series = pd.Series(
time_window_type, index=pd.DatetimeIndex(time_window_ends), dtype=int
) # type: pd.Series
time_window_end_series.sort_index(kind="mergesort", inplace=True)
time_window_end_series = time_window_end_series.cumsum()
status = 0
merged_event_list = (
[]
) # type: List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]
for t, v in time_window_end_series.iteritems(): # type: pd.Timestamp, int
if (status == 0) and (v > 0):
start = t # type: pd.Timestamp
status = 1
if (status == 1) and (v <= 0):
end = t # type: pd.Timestamp
merged_event_list.append([start, end])
status = 0
for i in range(1, len(merged_event_list)):
this_start = merged_event_list[i][0] # type: pd.Timestamp
this_end = merged_event_list[i][1] # type: pd.Timestamp
last_start = merged_event_list[i - 1][0] # type: pd.Timestamp
last_end = merged_event_list[i - 1][1] # type: pd.Timestamp
if last_end + pd.Timedelta("1ns") >= this_start:
merged_event_list[i] = [last_start, this_end]
merged_event_list[i - 1] = None
merged_event_list = [
w[0] if (w[0] == w[1] and not point_as_interval) else tuple(w)
for w in merged_event_list
if w is not None
]
return merged_event_list
@overload
def to_events(
labels: pd.Series,
freq_as_period: bool = True,
merge_consecutive: Optional[bool] = None,
) -> List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]:
...
@overload
def to_events( # type: ignore
labels: pd.DataFrame,
freq_as_period: bool = True,
merge_consecutive: Optional[bool] = None,
) -> Dict[str, List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]]:
...
def to_events(
labels: Union[pd.Series, pd.DataFrame],
freq_as_period: bool = True,
merge_consecutive: Optional[bool] = None,
) -> Union[
List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]],
Dict[str, List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]],
]:
"""Convert binary label series to event list.
Parameters
----------
labels: pandas Series or DataFrame
Binary series of anomaly labels. If a DataFrame, each column is
regarded as a type of anomaly independently.
freq_as_period: bool, optional
Whether to regard time index with regular frequency (i.e. attribute
`freq` of time index is not None) as time intervals.
For example, DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03',
'2017-01-04', '2017-01-05'], dtype='datetime64[ns]', freq='D') has
daily frequency. If freq_as_period=True, each time point in the index
represents that day (24 hours). Otherwsie, each time point represents
the instantaneous time instance of 00:00:00 on that day.
Default: True.
merge_consecutive: bool, optional
Whether to merge consecutive events into a single time window. If not
specified, it is on automatically if the input time index has a regular
frequency and freq_as_period=True, and it is off otherwise. Default:
None.
Returns
-------
list or dict
- If input is a Series, output is a list of events where an event is a
pandas Timestamp if it is instantaneous or a 2-tuple of pandas
Timestamps if it is a closed time interval.
- If input is a DataFrame, every column is treated as an independent
binary series, and output is a dict where keys are column names and
values are event lists.
"""
if isinstance(labels, pd.Series):
labels = validate_series(
labels, check_freq=False, check_categorical=False
)
labels = labels == 1
if merge_consecutive is None:
if freq_as_period and (labels.index.freq is not None):
merge_consecutive = True
else:
merge_consecutive = False
if not merge_consecutive:
if freq_as_period and (labels.index.freq is not None):
period_end = pd.date_range(
start=labels.index[1],
periods=len(labels.index),
freq=labels.index.freq,
) - pd.Timedelta(
"1ns"
) # type: pd.DatetimeIndex
return [
(start, end) if start != end else start
for start, end in zip(
list(labels.index[labels]), list(period_end[labels])
)
]
else:
return list(labels.index[labels])
else:
labels_values = labels.values.astype(int).reshape(
-1, 1
) # type: np.ndarray
mydiff = np.vstack(
[
labels_values[0, :] - 0,
np.diff(labels_values, axis=0),
0 - labels_values[-1, :],
]
) # type: np.ndarray
starts =
|
np.argwhere(mydiff == 1)
|
numpy.argwhere
|
'''
setting up the datasets for ggnn
'''
import init_path
# system import
from copy import deepcopy
# compute
import numpy as np
import random
# torch import
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.utils.data as data_utils
import torchvision.transforms as transforms
# local imports
class VerifyGraphDataset(data_utils.Dataset):
def __init__(self, samples, args, valset=False):
self.samples = samples
self.args = args
if not valset:
self.valset = False
self.graph_size = args.graph_size
else:
self.valset = True
self.graph_size = args.val_graph_size
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
'''
'''
g1 = self.samples[index]
# the isomorphic graph
g1_prime = deepcopy(g1)
g1_prime.generate_isomorphic()
# randomly sample another graph
g2 = Graph(self.args, valset_graph=self.valset)
while g2.same_connection(g1):
g2 = Graph(self.args, valset_graph=self.valset)
# need to return initial state, annotation, and adjacency matrix
# initial states are random
g1_state = np.random.rand(self.graph_size, self.args.state_dim)
g1_prime_state = np.random.rand(self.graph_size, self.args.state_dim)
g2_state = np.random.rand(self.graph_size, self.args.state_dim)
# g1_state = np.ones( (self.args.graph_size, self.args.state_dim) )
# g1_prime_state = np.ones( (self.args.graph_size, self.args.state_dim) )
# g2_state = np.ones( (self.args.graph_size, self.args.state_dim) )
# annotation state is generated by the given attributes
g1_anno = np.stack(g1.attr_list)
g1_prime_anno = np.stack(g1_prime.attr_list)
g2_anno = np.stack(g2.attr_list)
# adjacency matrix
g1_mat = compact2sparse_representation(g1.mat, self.args.edge_type)
g1_prime_mat = compact2sparse_representation(g1_prime.mat, self.args.edge_type)
g2_mat = compact2sparse_representation(g2.mat, self.args.edge_type)
# if random.randint(0, 1) == 1: # use same positive example
# label = np.array([1]).astype('float32')
# state_mat = np.dstack( (g1_state, g1_prime_state) )
# state_mat = np.transpose(state_mat, (2, 0, 1))
# anno_mat = np.dstack( (g1_anno, g1_prime_anno) )
# anno_mat = np.transpose(anno_mat, (2, 0, 1))
# adj_mat = np.dstack( ( g1_mat, g1_prime_mat) )
# adj_mat = np.transpose(adj_mat, (2, 0, 1))
# else:
# label = np.array([0]).astype('float32')
# state_mat = np.dstack( (g1_state, g2_state) )
# state_mat = np.transpose(state_mat, (2, 0, 1))
# anno_mat = np.dstack( (g1_anno, g2_anno) )
# anno_mat = np.transpose(anno_mat, (2, 0, 1))
# adj_mat = np.dstack( ( g1_mat, g2_mat) )
# adj_mat = np.transpose(adj_mat, (2, 0, 1))
label = np.array([1, 0]).astype('float32')
state_mat = np.dstack( (g1_state, g1_prime_state, g1_state, g2_state) )
state_mat = np.transpose(state_mat, (2, 0, 1))
anno_mat = np.dstack( (g1_anno, g1_prime_anno, g1_anno, g2_anno) )
anno_mat = np.transpose(anno_mat, (2, 0, 1))
adj_mat = np.dstack( ( g1_mat, g1_prime_mat, g1_mat, g2_mat) )
adj_mat = np.transpose(adj_mat, (2, 0, 1))
return state_mat.astype('float32'), anno_mat.astype('float32'), \
adj_mat.astype('float32'), label
def get_dloader(args, num, val_set=False, test=False):
'''
'''
data = generate_graph(num, args, val_set)
dataset = VerifyGraphDataset(data, args, valset=val_set)
if test:
dataloader = data_utils.DataLoader(dataset,
batch_size=args.bs
)
else:
dataloader = data_utils.DataLoader(dataset,
args.bs, pin_memory=False,
num_workers=args.num_workers
)
return dataloader
def generate_graph(num, args, val_set):
''' generate <num> number of graphs for GGNN
return a list of Graph structures
'''
graph_list = []
for i in range(num):
graph_list.append(
Graph(args, valset_graph=val_set)
)
pass
return graph_list
def compact2sparse_representation(mat, total_edge_type):
''' convert a compact adjacent matrix to a sparse matrix
'''
N, _ = mat.shape
total_edge_type = total_edge_type
sparse_mat = np.zeros((N, N * total_edge_type * 2))
# fill the in's and out's in the sparse matrix
for i in range(N):
for j in range(N):
if mat[i, j] == 0: continue
edge_type = mat[i, j]
_from = i
_to = j
# fill in
in_x = j
in_y = i + N * (edge_type - 1)
sparse_mat[int(in_x), int(in_y)] = 1
# fill out
# need to skip all the in's
out_x = i
out_y = N * total_edge_type + j + N * (edge_type - 1)
sparse_mat[int(out_x), int(out_y)] = 1
return sparse_mat.astype('int')
def adj_mat2list(mat):
''' convert an adjacency matrix to a list
return:
a dictionary - key: node id
val: node id that connects to
'''
N, _ = mat.shape
adj_list = {}
for i in range(N):
adj_list[i] = np.where(mat[i] != 0)[0].tolist()
return adj_list
def adj_list2mat(adj_list):
''' convert an adjacency list to a matrix
input:
a dictionary of size N describing the connection
relationship
return:
a numpy array (N x N) describing the connection
between nodes
'''
N = len(adj_list)
mat = np.zeros((N, N))
for node, connected_nodes in adj_list.items():
for x in connected_nodes:
mat[node][x] = 1
return mat.astype(int)
def generate_isomorphic_graph(adj_list):
''' @brief: generate an isomorphic graph based on
the adjacency matrix
return: a new adj_list that is isomorphic
to the graph described by the adj_list
'''
N = len(adj_list)
# shuffle the list to get a new one
original_order = list(range(N))
shuffled_order = list(range(N))
random.shuffle( shuffled_order )
# construct a mapping from the old index to the new index
old2new_index = {}
for i, x in enumerate(shuffled_order):
old2new_index[x] = original_order[i]
# construct the new adj_list
new_adj_list = {}
for i, x in enumerate(shuffled_order):
new_adj_list[i] = []
for y in adj_list[x]:
new_adj_list[i].append( old2new_index[y] )
return new_adj_list
class Graph():
def __init__(self, args, valset_graph=False):
self.args = args
if valset_graph:
self.graph_size = self.args.val_graph_size
else:
self.graph_size = self.args.graph_size
self.mat = self._generate_connection(
self.graph_size, self.args.edge_type,
self.args.connection_rate
)
self.attr_list = self._generate_node_attr(
self.graph_size, self.args.node_type,
self.args.random_embed
)
self.graph_size = args.graph_size
def __eq__(self, other):
if self.graph_size != other.graph_size: return False
if np.array_equal(self.mat, other.mat) == False: return False
for i in range(self.args.graph_size):
if np.array_equal(self.attr_list[i], other.attr_list[i]) \
== False:
return False
return True
def generate_isomorphic(self):
''' recreate a new adjacency matrix
and replace the current
'''
cur_adj_list = adj_mat2list(self.mat)
iso_adj_list = generate_isomorphic_graph(cur_adj_list)
self.mat = adj_list2mat(iso_adj_list)
return
def same_connection(self, other):
''' 2 graphs having the same connection
edge type is also the same
'''
if np.array_equal(self.mat, other.mat): return True
return False
def resample_attr(self):
''' resample new attributes
'''
self.attr_list = self._generate_node_attr(
self.args.graph_size, self.args.node_type,
self.args.random_embed
)
return None
def _generate_connection(self, size, edge_type=3, connect_rate=0.1):
''' generated a connected graph
in the from of adjacency matrix
'''
mat = np.zeros((size, size))
node_id = 0
while node_id < size:
connected = False
# flip coin on column
for i in range(size):
if i == node_id: continue
if
|
np.random.binomial(1, connect_rate, 1)
|
numpy.random.binomial
|
import os
import sys
import numpy as np
import time
import glob
import pickle
from sklearn.neighbors import KDTree
import tensorflow as tf
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
DATA_DIR = os.path.join('/data/dataset/', 'SensatUrban')
if not os.path.exists(DATA_DIR):
raise IOError(f"{DATA_DIR} not found!")
from utils.ply import read_ply, write_ply
from .custom_dataset import CustomDataset, grid_subsampling, tf_batch_subsampling, tf_batch_neighbors
class SensatUrbanDataset(CustomDataset):
def __init__(self, config, input_threads=8):
"""Class to handle SensatUrban dataset for scene segmentation task.
Args:
config: config file
input_threads: the number elements to process in parallel
"""
super(SensatUrbanDataset, self).__init__()
self.config = config
self.num_threads = input_threads
self.trainval = config.get('trainval', False)
# Dict from labels to names
self.path = DATA_DIR
self.label_to_names = {0: 'Ground', 1: 'High Vegetation', 2: 'Buildings', 3: 'Walls',
4: 'Bridge', 5: 'Parking', 6: 'Rail', 7: 'traffic Roads', 8: 'Street Furniture',
9: 'Cars', 10: 'Footpath', 11: 'Bikes', 12: 'Water'}
# Initiate a bunch of variables concerning class labels
self.init_labels()
config.num_classes = self.num_classes
# Number of input threads
self.num_threads = input_threads
self.all_files = np.sort(glob.glob(os.path.join(self.path, 'original_block_ply', '*.ply')))
self.val_file_name = ['birmingham_block_1',
'birmingham_block_5',
'cambridge_block_10',
'cambridge_block_7']
self.test_file_name = ['birmingham_block_2', 'birmingham_block_8',
'cambridge_block_15', 'cambridge_block_22',
'cambridge_block_16', 'cambridge_block_27']
# Some configs
self.num_gpus = config.num_gpus
self.first_subsampling_dl = config.first_subsampling_dl
self.in_features_dim = config.in_features_dim
self.num_layers = config.num_layers
self.downsample_times = config.num_layers - 1
self.density_parameter = config.density_parameter
self.batch_size = config.batch_size
self.augment_scale_anisotropic = config.augment_scale_anisotropic
self.augment_symmetries = config.augment_symmetries
self.augment_rotation = config.augment_rotation
self.augment_scale_min = config.augment_scale_min
self.augment_scale_max = config.augment_scale_max
self.augment_noise = config.augment_noise
self.augment_color = config.augment_color
self.epoch_steps = config.epoch_steps
self.validation_size = config.validation_size
self.in_radius = config.in_radius
# initialize
self.num_per_class = np.zeros(self.num_classes)
self.ignored_labels = np.array([])
self.val_proj = []
self.val_labels = []
self.test_proj = []
self.test_labels = []
self.possibility = {}
self.min_possibility = {}
self.input_trees = {'training': [], 'validation': [], 'test': []}
self.input_colors = {'training': [], 'validation': [], 'test': []}
self.input_labels = {'training': [], 'validation': [], 'test': []}
self.input_names = {'training': [], 'validation': [], 'test': []}
# input subsampling
self.load_sub_sampled_clouds(self.first_subsampling_dl)
self.batch_limit = self.calibrate_batches()
print("batch_limit: ", self.batch_limit)
self.neighborhood_limits = [26, 31, 38, 41, 39]
self.neighborhood_limits = [int(l * self.density_parameter // 5) for l in self.neighborhood_limits]
print("neighborhood_limits: ", self.neighborhood_limits)
# Get generator and mapping function
gen_function, gen_types, gen_shapes = self.get_batch_gen('training')
gen_function_val, _, _ = self.get_batch_gen('validation')
gen_function_test, _, _ = self.get_batch_gen('test')
map_func = self.get_tf_mapping()
self.train_data = tf.data.Dataset.from_generator(gen_function, gen_types, gen_shapes)
self.train_data = self.train_data.map(map_func=map_func, num_parallel_calls=self.num_threads)
self.train_data = self.train_data.prefetch(10)
self.val_data = tf.data.Dataset.from_generator(gen_function_val, gen_types, gen_shapes)
self.val_data = self.val_data.map(map_func=map_func, num_parallel_calls=self.num_threads)
self.val_data = self.val_data.prefetch(10)
self.test_data = tf.data.Dataset.from_generator(gen_function_test, gen_types, gen_shapes)
self.test_data = self.test_data.map(map_func=map_func, num_parallel_calls=self.num_threads)
self.test_data = self.test_data.prefetch(10)
# create a iterator of the correct shape and type
iter = tf.data.Iterator.from_structure(self.train_data.output_types, self.train_data.output_shapes)
self.flat_inputs = [None] * self.num_gpus
for i in range(self.num_gpus):
self.flat_inputs[i] = iter.get_next()
# create the initialisation operations
self.train_init_op = iter.make_initializer(self.train_data)
self.val_init_op = iter.make_initializer(self.val_data)
self.test_init_op = iter.make_initializer(self.test_data)
@staticmethod
def get_num_class_from_label(labels, total_class):
num_pts_per_class = np.zeros(total_class, dtype=np.int32)
# original class distribution
val_list, counts = np.unique(labels, return_counts=True)
for idx, val in enumerate(val_list):
num_pts_per_class[val] += counts[idx]
return num_pts_per_class
@staticmethod
def get_class_weights(num_per_class, sqrt=True):
# # pre-calculate the number of points in each category
frequency = num_per_class / float(sum(num_per_class))
if sqrt:
ce_label_weight = 1 / np.sqrt(frequency + 0.02)
else:
ce_label_weight = 1 / (frequency + 0.02)
return np.expand_dims(ce_label_weight, axis=0)
def load_sub_sampled_clouds(self, sub_grid_size):
tree_path = os.path.join(DATA_DIR, 'grid_{:.3f}'.format(sub_grid_size))
for i, file_path in enumerate(self.all_files):
t0 = time.time()
cloud_name = file_path.split('/')[-1][:-4]
if cloud_name in self.test_file_name:
cloud_split = 'test'
elif cloud_name in self.val_file_name:
if self.trainval:
cloud_split = 'training'
else:
cloud_split = 'validation'
else:
cloud_split = 'training'
# Name of the input files
kd_tree_file = os.path.join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name))
sub_ply_file = os.path.join(tree_path, '{:s}.ply'.format(cloud_name))
data = read_ply(sub_ply_file)
sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T
sub_labels = data['class']
# compute num_per_class in training set
if cloud_split == 'training':
self.num_per_class += self.get_num_class_from_label(sub_labels, self.num_classes)
self.num_per_class_weights = self.get_class_weights(self.num_per_class)
# Read pkl with search tree
with open(kd_tree_file, 'rb') as f:
search_tree = pickle.load(f)
self.input_trees[cloud_split] += [search_tree]
self.input_colors[cloud_split] += [sub_colors]
self.input_labels[cloud_split] += [sub_labels]
self.input_names[cloud_split] += [cloud_name]
size = sub_colors.shape[0] * 4 * 7
print('{:s} {:.1f} MB loaded in {:.1f}s'.format(kd_tree_file.split('/')[-1], size * 1e-6, time.time() - t0))
print('\nPreparing reprojected indices for testing')
# Get number of clouds
self.num_training = len(self.input_trees['training'])
self.num_validation = len(self.input_trees['validation'])
self.num_test = len(self.input_trees['test'])
# Get validation and test reprojected indices
for i, file_path in enumerate(self.all_files):
t0 = time.time()
cloud_name = file_path.split('/')[-1][:-4]
# val projection and labels
if cloud_name in self.val_file_name:
proj_file = os.path.join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
with open(proj_file, 'rb') as f:
proj_idx, labels = pickle.load(f)
self.val_proj += [proj_idx]
self.val_labels += [labels]
print('{:s} done in {:.1f}s'.format(cloud_name, time.time() - t0))
# test projection and labels
if cloud_name in self.test_file_name:
proj_file = os.path.join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
with open(proj_file, 'rb') as f:
proj_idx, labels = pickle.load(f)
self.test_proj += [proj_idx]
self.test_labels += [labels]
print('{:s} done in {:.1f}s'.format(cloud_name, time.time() - t0))
def get_batch_gen(self, split):
"""
A function defining the batch generator for each split. Should return the generator, the generated types and
generated shapes
:param split: string in "training", "validation" or "test"
:return: gen_func, gen_types, gen_shapes
"""
############
# Parameters
############
# Initiate parameters depending on the chosen split
if split == 'training':
# First compute the number of point we want to pick in each cloud and for each class
epoch_n = self.epoch_steps * self.batch_size
random_pick_n = None
elif split == 'validation':
# First compute the number of point we want to pick in each cloud and for each class
epoch_n = self.validation_size * self.batch_size
elif split == 'test':
# First compute the number of point we want to pick in each cloud and for each class
epoch_n = self.validation_size * self.batch_size
else:
raise ValueError('Split argument in data generator should be "training", "validation" or "test"')
# Initiate potentials for regular generation
if not hasattr(self, 'potentials'):
self.potentials = {}
self.min_potentials = {}
# Reset potentials
self.potentials[split] = []
self.min_potentials[split] = []
data_split = split
for i, tree in enumerate(self.input_trees[data_split]):
self.potentials[split] += [np.random.rand(tree.data.shape[0]) * 1e-3]
self.min_potentials[split] += [float(np.min(self.potentials[split][-1]))]
def get_random_epoch_inds():
# Initiate container for indices
all_epoch_inds = np.zeros((2, 0), dtype=np.int32)
# Choose random points of each class for each cloud
for cloud_ind, cloud_labels in enumerate(self.input_labels[split]):
epoch_indices = np.empty((0,), dtype=np.int32)
for label_ind, label in enumerate(self.label_values):
if label not in self.ignored_labels:
label_indices = np.where(np.equal(cloud_labels, label))[0]
if len(label_indices) <= random_pick_n:
epoch_indices = np.hstack((epoch_indices, label_indices))
elif len(label_indices) < 50 * random_pick_n:
new_randoms = np.random.choice(label_indices, size=random_pick_n, replace=False)
epoch_indices = np.hstack((epoch_indices, new_randoms.astype(np.int32)))
else:
rand_inds = []
while len(rand_inds) < random_pick_n:
rand_inds = np.unique(np.random.choice(label_indices, size=5 * random_pick_n, replace=True))
epoch_indices = np.hstack((epoch_indices, rand_inds[:random_pick_n].astype(np.int32)))
# Stack those indices with the cloud index
epoch_indices = np.vstack((np.full(epoch_indices.shape, cloud_ind, dtype=np.int32), epoch_indices))
# Update the global indice container
all_epoch_inds = np.hstack((all_epoch_inds, epoch_indices))
return all_epoch_inds
##########################
# Def generators
##########################
def spatially_regular_gen():
# Initiate concatanation lists
p_list = []
c_list = []
pl_list = []
pi_list = []
ci_list = []
batch_n = 0
# Generator loop
for i in range(epoch_n):
# Choose a random cloud
cloud_ind = int(np.argmin(self.min_potentials[split]))
# Choose point ind as minimum of potentials
point_ind = np.argmin(self.potentials[split][cloud_ind])
# Get points from tree structure
points = np.array(self.input_trees[data_split][cloud_ind].data, copy=False)
# Center point of input region
center_point = points[point_ind, :].reshape(1, -1)
# Add noise to the center point
noise = np.random.normal(scale=self.in_radius / 10, size=center_point.shape)
pick_point = center_point + noise.astype(center_point.dtype)
# Indices of points in input region
input_inds = self.input_trees[data_split][cloud_ind].query_radius(pick_point,
r=self.in_radius)[0]
# Number collected
n = input_inds.shape[0]
# Update potentials (Tuckey weights)
dists = np.sum(np.square((points[input_inds] - pick_point).astype(np.float32)), axis=1)
tukeys = np.square(1 - dists / np.square(self.in_radius))
tukeys[dists > np.square(self.in_radius)] = 0
self.potentials[split][cloud_ind][input_inds] += tukeys
self.min_potentials[split][cloud_ind] = float(np.min(self.potentials[split][cloud_ind]))
# Safe check for very dense areas
if n > self.batch_limit:
input_inds = np.random.choice(input_inds, size=int(self.batch_limit) - 1, replace=False)
n = input_inds.shape[0]
# Collect points and colors
input_points = (points[input_inds] - pick_point).astype(np.float32)
input_colors = self.input_colors[data_split][cloud_ind][input_inds]
if split == 'test':
input_labels = np.zeros(input_points.shape[0])
else:
input_labels = self.input_labels[data_split][cloud_ind][input_inds]
input_labels = np.array([self.label_to_idx[l] for l in input_labels])
# In case batch is full, yield it and reset it
if batch_n + n > self.batch_limit and batch_n > 0:
yield (np.concatenate(p_list, axis=0),
np.concatenate(c_list, axis=0),
np.concatenate(pl_list, axis=0),
np.array([tp.shape[0] for tp in p_list]),
np.concatenate(pi_list, axis=0),
np.array(ci_list, dtype=np.int32))
p_list = []
c_list = []
pl_list = []
pi_list = []
ci_list = []
batch_n = 0
# Add data to current batch
if n > 0:
p_list += [input_points]
c_list += [np.hstack((input_colors, input_points + pick_point))]
pl_list += [input_labels]
pi_list += [input_inds]
ci_list += [cloud_ind]
# Update batch size
batch_n += n
if batch_n > 0:
yield (np.concatenate(p_list, axis=0),
np.concatenate(c_list, axis=0),
np.concatenate(pl_list, axis=0),
np.array([tp.shape[0] for tp in p_list]),
np.concatenate(pi_list, axis=0),
np.array(ci_list, dtype=np.int32))
def random_balanced_gen():
# First choose the point we are going to look at for this epoch
# *************************************************************
# This generator cannot be used on test split
if split == 'training':
all_epoch_inds = get_random_epoch_inds()
elif split == 'validation':
all_epoch_inds = get_random_epoch_inds()
else:
raise ValueError('generator to be defined for test split.')
# Now create batches
# ******************
# Initiate concatanation lists
p_list = []
c_list = []
pl_list = []
pi_list = []
ci_list = []
batch_n = 0
# Generator loop
for i, rand_i in enumerate(np.random.permutation(all_epoch_inds.shape[1])):
cloud_ind = all_epoch_inds[0, rand_i]
point_ind = all_epoch_inds[1, rand_i]
# Get points from tree structure
points = np.array(self.input_trees[split][cloud_ind].data, copy=False)
# Center point of input region
center_point = points[point_ind, :].reshape(1, -1)
# Add noise to the center point
noise = np.random.normal(scale=config.in_radius / 10, size=center_point.shape)
pick_point = center_point + noise.astype(center_point.dtype)
# Indices of points in input region
input_inds = self.input_trees[split][cloud_ind].query_radius(pick_point,
r=config.in_radius)[0]
# Number collected
n = input_inds.shape[0]
# Safe check for very dense areas
if n > self.batch_limit:
input_inds = np.random.choice(input_inds, size=int(self.batch_limit) - 1, replace=False)
n = input_inds.shape[0]
# Collect points and colors
input_points = (points[input_inds] - pick_point).astype(np.float32)
input_colors = self.input_colors[split][cloud_ind][input_inds]
input_labels = self.input_labels[split][cloud_ind][input_inds]
input_labels = np.array([self.label_to_idx[l] for l in input_labels])
# In case batch is full, yield it and reset it
if batch_n + n > self.batch_limit and batch_n > 0:
yield (np.concatenate(p_list, axis=0),
np.concatenate(c_list, axis=0),
np.concatenate(pl_list, axis=0),
np.array([tp.shape[0] for tp in p_list]),
np.concatenate(pi_list, axis=0),
np.array(ci_list, dtype=np.int32))
p_list = []
c_list = []
pl_list = []
pi_list = []
ci_list = []
batch_n = 0
# Add data to current batch
if n > 0:
p_list += [input_points]
c_list += [np.hstack((input_colors, input_points + pick_point))]
pl_list += [input_labels]
pi_list += [input_inds]
ci_list += [cloud_ind]
# Update batch size
batch_n += n
if batch_n > 0:
yield (np.concatenate(p_list, axis=0),
np.concatenate(c_list, axis=0),
np.concatenate(pl_list, axis=0),
np.array([tp.shape[0] for tp in p_list]),
|
np.concatenate(pi_list, axis=0)
|
numpy.concatenate
|
from personal.MaurizioFramework.ParameterTuning.BayesianSearch import BayesianSearch
from personal.MaurizioFramework.ParameterTuning.AbstractClassSearch import DictionaryKeys
from utils.definitions import ROOT_DIR
import pickle
from personal.MaurizioFramework.SLIM_BPR.Cython.SLIM_BPR_Cython import SLIM_BPR_Cython
from recommenders.similarity.dot_product import dot_product
from utils.datareader import Datareader
from utils.evaluator import Evaluator
from utils.bot import Bot_v1
from tqdm import tqdm
import scipy.sparse as sps
import numpy as np
import sys
def run_SLIM_bananesyan_search(URM_train, URM_validation, logFilePath = ROOT_DIR+"/results/logs_baysian/"):
recommender_class = SLIM_BPR_Cython
bananesyan_search = BayesianSearch(recommender_class, URM_validation=URM_validation,
evaluation_function=evaluateRecommendationsSpotify_BAYSIAN)
hyperparamethers_range_dictionary = {}
hyperparamethers_range_dictionary["topK"] = [100, 150, 200, 250, 300, 350, 400, 500]
hyperparamethers_range_dictionary["lambda_i"] = [1e-7,1e-6,1e-5,1e-4,1e-3,0.001,0.01,0.05,0.1]
hyperparamethers_range_dictionary["lambda_j"] = [1e-7,1e-6,1e-5,1e-4,1e-3,0.001,0.01,0.05,0.1]
hyperparamethers_range_dictionary["learning_rate"] = [0.1,0.01,0.001,0.0001,0.00005,0.000001, 0.0000001]
hyperparamethers_range_dictionary["minRatingsPerUser"] = [0, 5, 50, 100]
logFile = open(logFilePath + recommender_class.RECOMMENDER_NAME + "_BayesianSearch Results.txt", "a")
recommenderDictionary = {DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [],
DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {
"URM_train":URM_train,
"positive_threshold":0,
"URM_validation":URM_validation,
"final_model_sparse_weights":True,
"train_with_sparse_weights":True,
"symmetric" : True},
DictionaryKeys.FIT_POSITIONAL_ARGS: dict(),
DictionaryKeys.FIT_KEYWORD_ARGS: {
"epochs" : 5,
"beta_1" : 0.9,
"beta_2" : 0.999,
"validation_function": evaluateRecommendationsSpotify_RECOMMENDER,
"stop_on_validation":True ,
"sgd_mode" : 'adam',
"validation_metric" : "ndcg_t",
"lower_validatons_allowed":3,
"validation_every_n":1},
DictionaryKeys.FIT_RANGE_KEYWORD_ARGS: hyperparamethers_range_dictionary}
best_parameters = bananesyan_search.search(recommenderDictionary,
metric="ndcg_t",
n_cases=200,
output_root_path=""+logFilePath + recommender_class.RECOMMENDER_NAME,
parallelPoolSize=4)
logFile.write("best_parameters: {}".format(best_parameters))
logFile.flush()
logFile.close()
pickle.dump(best_parameters, open(logFilePath + recommender_class.RECOMMENDER_NAME + "_best_parameters", "wb"),
protocol=pickle.HIGHEST_PROTOCOL)
def evaluateRecommendationsSpotify_RECOMMENDER(recommender):
"""
THIS FUNCTION WORKS INSIDE THE RECOMMENDER
:param self:
:return:
"""
user_profile_batch = recommender.URM_train[pids_converted]
eurm = dot_product(user_profile_batch, recommender.W_sparse, k=500).tocsr()
recommendation_list =
|
np.zeros((10000, 500))
|
numpy.zeros
|
import client
import load_data
import logging
import numpy as np
import pickle
import random
import sys
from threading import Thread
import torch
import utils.dists as dists # pylint: disable=no-name-in-module
import math
def random_n(b1, b2, b3):
rand_list = []
out = [0, 0, 0, 0]
for i in range(20):
rand_list.append(random.randint(1, 100))
for rand in rand_list:
if rand <= b1:
out[0] += 1
elif b1 < rand <= b2:
out[1] += 1
elif b2 < rand <= b3:
out[2] += 1
else:
out[3] += 1
return out
def random_d(d, k=20):
rand_list = []
out = [0, 0, 0, 0]
for i in range(d):
rand_list.append(random.randint(1, 100))
for rand in rand_list:
if rand <= 25:
out[0] += 1
elif 25 < rand <= 50:
out[1] += 1
elif 50 < rand <= 75:
out[2] += 1
else:
out[3] += 1
pick = k
for i in range(4):
if pick == 0:
out[i] = 0
elif pick < out[i]:
out[i] = pick
pick = 0
else:
pick -= out[i]
return out
def random_sim(sample_clients):
ans = []
out = random_n(25, 50, 75)
pick = np.random.binomial(size=out[0], n=1, p=0.1)
pick = np.append(pick, np.random.binomial(size=out[1], n=1, p=0.3))
pick = np.append(pick, np.random.binomial(size=out[2], n=1, p=0.6))
pick = np.append(pick, np.random.binomial(size=out[3], n=1, p=0.9))
for i in range(len(pick)):
if pick[i] == 1:
ans.append(sample_clients[i])
return ans
def fedcs_sim(sample_clients):
ans = []
pick = np.random.binomial(size=20, n=1, p=0.9)
for i in range(len(pick)):
if pick[i] == 1:
ans.append(sample_clients[i])
return ans
def pow_d_sim(clients_per_round, class_a, class_b, class_c, class_d):
out = random_d(d=70)
pick_a =
|
np.random.binomial(size=out[0], n=1, p=0.1)
|
numpy.random.binomial
|
#!/usr/bin/env python3
import os
import sys
import errno
import numpy as np
import glob
import unittest
import socket
from shutil import which, rmtree
import multiprocessing as mp
import threading
from queue import Empty
from time import sleep
import yaml
from astropy.time import Time, TimeDelta
try:
import psrdada
except ImportError:
psrdada = None
from darc import AMBERListener, AMBERClustering, DADATrigger
from darc import util
from darc.definitions import TIME_UNIT
# only run this test if this script is run directly, _not_ in automated testing (pytest etc)
@unittest.skipUnless(__name__ == '__main__', "Skipping full test run in automated testing")
# skip if not running on arts041
@unittest.skipUnless(socket.gethostname() == 'arts041', "Test can only run on arts041")
# Skip if psrdada not available
@unittest.skipIf(psrdada is None or which('dada_db') is None, "psrdada not available")
class TestFullRun(unittest.TestCase):
def setUp(self):
"""
Set up the pipeline and observation software
"""
print("Setup")
# observation settings
files = glob.glob('/tank/data/sky/B1933+16/20200211_dump/dada/*.dada')
self.assertTrue(len(files) > 0)
output_dir = '/tank/users/oostrum/test_full_run'
amber_dir = os.path.join(output_dir, 'amber')
amber_conf_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'amber.conf')
amber_conf_dir = '/home/arts/.controller/amber_conf'
# ensure we start clean
rmtree(amber_dir)
util.makedirs(amber_dir)
# load the encoded parset
with open('/tank/data/sky/B1933+16/20200211_dump/parset', 'r') as f:
parset = f.read().strip()
# nreader: one for each programme reading from the buffer, i.e. 3x AMBER
self.settings = {'resolution': 1536 * 12500 * 12, 'nbuf': 5, 'key_i': 'aaaa',
'hdr_size': 40960, 'dada_files': files, 'nreader': 3,
'freq': 1370, 'amber_dir': amber_dir, 'nbatch': len(files) * 10,
'beam': 0, 'amber_config': amber_conf_file, 'amber_conf_dir': amber_conf_dir,
'min_freq': 1219.70092773, 'parset': parset, 'datetimesource': '2019-01-01-00:00:00.FAKE'}
# open custom config file
self.config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.yaml')
with open(self.config_file, 'r') as f:
self.master_config = yaml.load(f, Loader=yaml.SafeLoader)['darc_master']
# create buffers
# stokes I
print("Creating buffers")
os.system('dada_db -d -k {key_i} 2>/dev/null ; dada_db -k {key_i} -a {hdr_size} '
'-b {resolution} -n {nbuf} -r {nreader}'.format(**self.settings))
# init GPU pipeline thread
self.t_amber = threading.Thread(target=self.amber, name='AMBER', daemon=True)
# init amber listener thread
self.listener = AMBERListener()
self.listener.set_source_queue(mp.Queue())
self.listener.set_target_queue(mp.Queue())
self.listener.start()
# init amber clustering thread
# do not connect to VOEvent server nor LOFAR trigger system
self.clustering = AMBERClustering(connect_vo=False, connect_lofar=False)
self.clustering.set_source_queue(self.listener.target_queue)
self.clustering.set_target_queue(mp.Queue())
self.clustering.start()
# init DADA trigger thread
self.dadatrigger = DADATrigger()
self.dadatrigger.set_source_queue(self.clustering.target_queue)
self.dadatrigger.start()
# init writer thread
self.t_disk_to_db = threading.Thread(target=self.writer, name='disk_to_db', daemon=True)
# init output listener thread
self.event_queue = mp.Queue()
self.t_dbevent = threading.Thread(target=self.dbevent, name='dbevent', daemon=True,
args=(self.event_queue,))
def tearDown(self):
"""
Clean up after pipeline run
"""
print("teardown")
# remove buffers
print("Removing buffers")
os.system('dada_db -d -k {key_i}'.format(**self.settings))
self.listener.source_queue.put('stop')
self.clustering.source_queue.put('stop')
self.dadatrigger.source_queue.put('stop')
def writer(self):
"""
Write data from disk into ringbuffer
"""
print("Starting writer")
# connect to the buffer
dada_writer = psrdada.Writer()
hex_key = int('0x{key_i}'.format(**self.settings), 16)
dada_writer.connect(hex_key)
# loop over dada files
for n, fname in enumerate(self.settings['dada_files']):
# open file
with open(fname, 'rb') as f:
# read the header of the first file
if n == 0:
# read header, strip empty bytes, convert to string, remove last newline
raw_hdr = f.read(self.settings['hdr_size']).rstrip(b'\x00').decode().strip()
# convert to dict by splitting on newline, then whitespace
hdr = dict([line.split(maxsplit=1) for line in raw_hdr.split('\n') if line])
dada_writer.setHeader(hdr)
else:
# skip header
f.seek(self.settings['hdr_size'])
# read data
while True:
data =
|
np.fromfile(f, count=self.settings['resolution'], dtype='uint8')
|
numpy.fromfile
|
# SPDX-License-Identifier: Apache-2.0
"""
tfl_math
"""
import logging
import numpy as np
from onnx.onnx_pb import TensorProto
from tf2onnx.handler import tfl_op
from tf2onnx import utils
logger = logging.getLogger(__name__)
# pylint: disable=unused-argument,missing-docstring,unused-variable,pointless-string-statement,invalid-name
def separate_fused_activation_function(ctx, node):
activation_fn = node.attr['fused_activation_function'].s
del node.attr['fused_activation_function']
if activation_fn == b'RELU':
ctx.insert_new_node_on_output("Relu", node.output[0])
elif activation_fn == b'RELU6':
# This is a TF op. We will convert it on the 2nd pass.
shape = ctx.get_shape(node.output[0])
dtype = ctx.get_dtype(node.output[0])
new_node = ctx.make_node("Relu6", [node.output[0]], skip_conversion=False, shapes=[shape], dtypes=[dtype])
ctx.insert_node_on_output(new_node, node.output[0])
elif activation_fn == b'TANH':
ctx.insert_new_node_on_output("Tanh", node.output[0])
else:
# TODO: SIGN_BIT and RELU_N1_TO_1 not supported yet
utils.make_sure(activation_fn == b'NONE', "Unsupported fused activation function %s on node %s",
activation_fn, node.name)
@tfl_op(["TFL_ADD"], tf_op="Add")
class TflAdd:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
separate_fused_activation_function(ctx, node)
@tfl_op(["TFL_SUB"], tf_op="Sub")
class TflSub:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
separate_fused_activation_function(ctx, node)
@tfl_op(["TFL_MUL"], tf_op="Mul")
class TflMul:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
separate_fused_activation_function(ctx, node)
@tfl_op(["TFL_DIV"], tf_op="Div")
class TflDiv:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
separate_fused_activation_function(ctx, node)
@tfl_op(["TFL_LOGISTIC"], tf_op="Sigmoid")
class TflLogistic:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
pass
@tfl_op(["TFL_REDUCE_MAX"], tf_op="Max")
@tfl_op(["TFL_REDUCE_ANY"], tf_op="Any")
@tfl_op(["TFL_REDUCE_PROD"], tf_op="Prod")
class TflReduceOp:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
pass
@tfl_op(["TFL_LOCAL_RESPONSE_NORMALIZATION"], tf_op="LRN")
class TFlLocalResponseNormalizationOp:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
node.attr["depth_radius"] = node.attr["radius"]
del node.attr["radius"]
@tfl_op(["TFL_RANGE"], tf_op="Range")
class TflRangeOp:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
node.set_attr("Tidx", ctx.get_dtype(node.output[0]))
@tfl_op(["TFL_QUANTIZE"], onnx_op="QuantizeLinear")
class TflQuantizeOp:
@classmethod
def version_1(cls, ctx, node, dequantize=False, **kwargs):
# We could just let the TFL_QUANTIZE fall through as an unconverted op, but they are added programmatically
# so that might be confusing.
raise ValueError("Opset 10 is required for quantization. Consider using the --dequantize flag or --opset 10.")
@classmethod
def version_10(cls, ctx, node, **kwargs):
scale = node.get_attr_value('scale')
zero_point = node.get_attr_value('zero_point')
axis = node.get_attr_value('quantized_dimension')
np_q_type = utils.map_onnx_to_numpy_type(ctx.get_dtype(node.output[0]))
if len(scale) > 1 or len(zero_point) > 1:
utils.make_sure(ctx.opset >= 13, "Opset 13 is required for per-axis quantization for node %s", node.name)
node.set_attr("axis", axis)
scale_node = ctx.make_const(utils.make_name("scale"), np.array(scale[0], dtype=np.float32))
zero_point_node = ctx.make_const(utils.make_name("zero_point"), np.array(zero_point[0], dtype=np_q_type))
ctx.replace_inputs(node, [node.input[0], scale_node.output[0], zero_point_node.output[0]])
del node.attr["scale"]
del node.attr["zero_point"]
del node.attr["quantized_dimension"]
if "min" in node.attr:
del node.attr["min"]
if "max" in node.attr:
del node.attr["max"]
@tfl_op(["TFL_DEQUANTIZE"], onnx_op="DequantizeLinear")
class TflDequantizeOp:
@classmethod
def version_1(cls, ctx, node, **kwargs):
scale = np.array(node.get_attr_value('scale'), dtype=np.float32)
zero_point = np.array(node.get_attr_value('zero_point'), dtype=np.float32)
axis = node.get_attr_value('quantized_dimension')
in_rank = ctx.get_rank(node.input[0])
def expand_tensor(t):
if t.shape == (1,):
return t[0]
utils.make_sure(in_rank is not None, "Cannot dequantize node %s with unknown input rank", node.name)
new_shape = [1] * in_rank
new_shape[axis] = t.shape[0]
return t.reshape(new_shape)
scale = expand_tensor(scale)
zero_point = expand_tensor(zero_point)
if node.inputs[0].is_const():
x_val = node.inputs[0].get_tensor_value(as_list=False).astype(np.float32)
new_val = (x_val - zero_point) * scale
dequant_const = ctx.make_const(utils.make_name(node.name), new_val)
ctx.replace_all_inputs(node.output[0], dequant_const.output[0])
ctx.remove_node(node.name)
else:
scale_const = ctx.make_const(utils.make_name(node.name + "_scale"), scale).output[0]
zero_point_const = ctx.make_const(utils.make_name(node.name + "_zero_point"), zero_point).output[0]
cast_node = ctx.make_node("Cast", [node.input[0]], attr={'to': TensorProto.FLOAT},
op_name_scope=node.name).output[0]
sub_node = ctx.make_node("Sub", [cast_node, zero_point_const], op_name_scope=node.name).output[0]
mul_node = ctx.make_node("Mul", [sub_node, scale_const], op_name_scope=node.name).output[0]
ctx.replace_all_inputs(node.output[0], mul_node)
ctx.remove_node(node.name)
@classmethod
def version_10(cls, ctx, node, dequantize=False, **kwargs):
if dequantize:
cls.version_1(ctx, node, dequantize=True, **kwargs)
return
scale = node.get_attr_value('scale')
zero_point = node.get_attr_value('zero_point')
axis = node.get_attr_value('quantized_dimension')
np_q_type = utils.map_onnx_to_numpy_type(ctx.get_dtype(node.input[0]))
if len(scale) > 1 or len(zero_point) > 1:
utils.make_sure(ctx.opset >= 13, "Opset 13 is required for per-axis quantization for node %s", node.name)
node.set_attr("axis", axis)
scale_node = ctx.make_const(utils.make_name("scale"), np.array(scale, dtype=np.float32))
zero_point_node = ctx.make_const(utils.make_name("zero_point"), np.array(zero_point, dtype=np_q_type))
else:
scale_node = ctx.make_const(utils.make_name("scale"), np.array(scale[0], dtype=np.float32))
zero_point_node = ctx.make_const(utils.make_name("zero_point"), np.array(zero_point[0], dtype=np_q_type))
ctx.replace_inputs(node, [node.input[0], scale_node.output[0], zero_point_node.output[0]])
del node.attr["scale"]
del node.attr["zero_point"]
del node.attr["quantized_dimension"]
if "min" in node.attr:
del node.attr["min"]
if "max" in node.attr:
del node.attr["max"]
def dynamic_quantize_inputs(ctx, node):
if ctx.opset < 11:
logger.warning("Opset 11 is required for asymmetric_quantize_inputs of node %s", node.name)
return
for i in range(len(node.input)):
# Don't quantize inputs that are already quantized
if node.inputs[i].type in ["DequantizeLinear", "TFL_DEQUANTIZE"]:
continue
dyn_quant = ctx.make_node("DynamicQuantizeLinear", [node.input[i]], output_count=3, op_name_scope=node.name)
dyn_quant.skip_conversion = True
dequant = ctx.make_node("DequantizeLinear", dyn_quant.output, op_name_scope=node.name)
dequant.skip_conversion = True
ctx.replace_input(node, node.input[i], dequant.output[0], input_index=i)
@tfl_op(["TFL_FULLY_CONNECTED"])
class TflFullyConnectedOp:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
separate_fused_activation_function(ctx, node)
utils.make_sure(node.attr['weights_format'].s == b'DEFAULT',
"Only default weights format supported for fully connected op")
utils.make_sure(node.attr['keep_num_dims'].i == 0,
"Only keep_num_dims=False supported for fully connected op")
if node.attr['asymmetric_quantize_inputs'].i == 1:
dynamic_quantize_inputs(ctx, node)
transpose_node = ctx.insert_new_node_on_input(node, "Transpose", node.input[1],
name=None, input_index=1, perm=[1, 0])
transpose_node.skip_conversion = True
node.set_attr("transpose_a", 0)
node.set_attr("transpose_b", 0)
node.type = "MatMul"
if len(node.input) == 3:
# FIXME: Add a test for this
bias_inp = node.input[2]
ctx.replace_inputs(node, node.input[:2])
add_node = ctx.insert_new_node_on_output("Add", node.output[0], inputs=[node.output[0], bias_inp])
add_node.skip_conversion = True
del node.attr["weights_format"]
del node.attr["keep_num_dims"]
del node.attr["asymmetric_quantize_inputs"]
@tfl_op(["TFL_SOFTMAX"], tf_op="Softmax")
class TFlSoftmaxOp:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
beta = node.get_attr_value("beta")
beta_node = ctx.make_const(utils.make_name("beta"),
|
np.array(beta, dtype=np.float32)
|
numpy.array
|
from datetime import datetime
import numpy as np
from hdmf.backends.hdf5 import H5DataIO
from hdmf.build import ObjectMapper
from hdmf.data_utils import DataChunkIterator
from hdmf.spec import DatasetSpec, RefSpec, DtypeSpec
from hdmf.testing import TestCase
class TestConvertDtype(TestCase):
def test_value_none(self):
spec = DatasetSpec('an example dataset', 'int', name='data')
self.assertTupleEqual(ObjectMapper.convert_dtype(spec, None), (None, 'int'))
spec = DatasetSpec('an example dataset', RefSpec(reftype='object', target_type='int'), name='data')
self.assertTupleEqual(ObjectMapper.convert_dtype(spec, None), (None, 'object'))
# do full matrix test of given value x and spec y, what does convert_dtype return?
def test_convert_to_64bit_spec(self):
"""
Test that if given any value for a spec with a 64-bit dtype, convert_dtype will convert to the spec type.
Also test that if the given value is not the same as the spec, convert_dtype raises a warning.
"""
spec_type = 'float64'
value_types = ['double', 'float64']
self._test_convert_alias(spec_type, value_types)
spec_type = 'float64'
value_types = ['float', 'float32', 'long', 'int64', 'int', 'int32', 'int16', 'short', 'int8', 'uint64', 'uint',
'uint32', 'uint16', 'uint8', 'bool']
self._test_convert_higher_precision_helper(spec_type, value_types)
spec_type = 'int64'
value_types = ['long', 'int64']
self._test_convert_alias(spec_type, value_types)
spec_type = 'int64'
value_types = ['double', 'float64', 'float', 'float32', 'int', 'int32', 'int16', 'short', 'int8', 'uint64',
'uint', 'uint32', 'uint16', 'uint8', 'bool']
self._test_convert_higher_precision_helper(spec_type, value_types)
spec_type = 'uint64'
value_types = ['uint64']
self._test_convert_alias(spec_type, value_types)
spec_type = 'uint64'
value_types = ['double', 'float64', 'float', 'float32', 'long', 'int64', 'int', 'int32', 'int16', 'short',
'int8', 'uint', 'uint32', 'uint16', 'uint8', 'bool']
self._test_convert_higher_precision_helper(spec_type, value_types)
def test_convert_to_float32_spec(self):
"""Test conversion of various types to float32.
If given a value with precision > float32 and float base type, convert_dtype will keep the higher precision.
If given a value with 64-bit precision and different base type, convert_dtype will convert to float64.
If given a value that is float32, convert_dtype will convert to float32.
If given a value with precision <= float32, convert_dtype will convert to float32 and raise a warning.
"""
spec_type = 'float32'
value_types = ['double', 'float64']
self._test_keep_higher_precision_helper(spec_type, value_types)
value_types = ['long', 'int64', 'uint64']
expected_type = 'float64'
self._test_change_basetype_helper(spec_type, value_types, expected_type)
value_types = ['float', 'float32']
self._test_convert_alias(spec_type, value_types)
value_types = ['int', 'int32', 'int16', 'short', 'int8', 'uint', 'uint32', 'uint16', 'uint8', 'bool']
self._test_convert_higher_precision_helper(spec_type, value_types)
def test_convert_to_int32_spec(self):
"""Test conversion of various types to int32.
If given a value with precision > int32 and int base type, convert_dtype will keep the higher precision.
If given a value with 64-bit precision and different base type, convert_dtype will convert to int64.
If given a value that is int32, convert_dtype will convert to int32.
If given a value with precision <= int32, convert_dtype will convert to int32 and raise a warning.
"""
spec_type = 'int32'
value_types = ['int64', 'long']
self._test_keep_higher_precision_helper(spec_type, value_types)
value_types = ['double', 'float64', 'uint64']
expected_type = 'int64'
self._test_change_basetype_helper(spec_type, value_types, expected_type)
value_types = ['int', 'int32']
self._test_convert_alias(spec_type, value_types)
value_types = ['float', 'float32', 'int16', 'short', 'int8', 'uint', 'uint32', 'uint16', 'uint8', 'bool']
self._test_convert_higher_precision_helper(spec_type, value_types)
def test_convert_to_uint32_spec(self):
"""Test conversion of various types to uint32.
If given a value with precision > uint32 and uint base type, convert_dtype will keep the higher precision.
If given a value with 64-bit precision and different base type, convert_dtype will convert to uint64.
If given a value that is uint32, convert_dtype will convert to uint32.
If given a value with precision <= uint32, convert_dtype will convert to uint32 and raise a warning.
"""
spec_type = 'uint32'
value_types = ['uint64']
self._test_keep_higher_precision_helper(spec_type, value_types)
value_types = ['double', 'float64', 'long', 'int64']
expected_type = 'uint64'
self._test_change_basetype_helper(spec_type, value_types, expected_type)
value_types = ['uint', 'uint32']
self._test_convert_alias(spec_type, value_types)
value_types = ['float', 'float32', 'int', 'int32', 'int16', 'short', 'int8', 'uint16', 'uint8', 'bool']
self._test_convert_higher_precision_helper(spec_type, value_types)
def test_convert_to_int16_spec(self):
"""Test conversion of various types to int16.
If given a value with precision > int16 and int base type, convert_dtype will keep the higher precision.
If given a value with 64-bit precision and different base type, convert_dtype will convert to int64.
If given a value with 32-bit precision and different base type, convert_dtype will convert to int32.
If given a value that is int16, convert_dtype will convert to int16.
If given a value with precision <= int16, convert_dtype will convert to int16 and raise a warning.
"""
spec_type = 'int16'
value_types = ['long', 'int64', 'int', 'int32']
self._test_keep_higher_precision_helper(spec_type, value_types)
value_types = ['double', 'float64', 'uint64']
expected_type = 'int64'
self._test_change_basetype_helper(spec_type, value_types, expected_type)
value_types = ['float', 'float32', 'uint', 'uint32']
expected_type = 'int32'
self._test_change_basetype_helper(spec_type, value_types, expected_type)
value_types = ['int16', 'short']
self._test_convert_alias(spec_type, value_types)
value_types = ['int8', 'uint16', 'uint8', 'bool']
self._test_convert_higher_precision_helper(spec_type, value_types)
def test_convert_to_uint16_spec(self):
"""Test conversion of various types to uint16.
If given a value with precision > uint16 and uint base type, convert_dtype will keep the higher precision.
If given a value with 64-bit precision and different base type, convert_dtype will convert to uint64.
If given a value with 32-bit precision and different base type, convert_dtype will convert to uint32.
If given a value that is uint16, convert_dtype will convert to uint16.
If given a value with precision <= uint16, convert_dtype will convert to uint16 and raise a warning.
"""
spec_type = 'uint16'
value_types = ['uint64', 'uint', 'uint32']
self._test_keep_higher_precision_helper(spec_type, value_types)
value_types = ['double', 'float64', 'long', 'int64']
expected_type = 'uint64'
self._test_change_basetype_helper(spec_type, value_types, expected_type)
value_types = ['float', 'float32', 'int', 'int32']
expected_type = 'uint32'
self._test_change_basetype_helper(spec_type, value_types, expected_type)
value_types = ['uint16']
self._test_convert_alias(spec_type, value_types)
value_types = ['int16', 'short', 'int8', 'uint8', 'bool']
self._test_convert_higher_precision_helper(spec_type, value_types)
def test_convert_to_bool_spec(self):
"""Test conversion of various types to bool.
If given a value with type bool, convert_dtype will convert to bool.
If given a value with type int8/uint8, convert_dtype will convert to bool and raise a warning.
Otherwise, convert_dtype will raise an error.
"""
spec_type = 'bool'
value_types = ['bool']
self._test_convert_alias(spec_type, value_types)
value_types = ['uint8', 'int8']
self._test_convert_higher_precision_helper(spec_type, value_types)
value_types = ['double', 'float64', 'float', 'float32', 'long', 'int64', 'int', 'int32', 'int16', 'short',
'uint64', 'uint', 'uint32', 'uint16']
self._test_convert_mismatch_helper(spec_type, value_types)
def _get_type(self, type_str):
return ObjectMapper._ObjectMapper__dtypes[type_str] # apply ObjectMapper mapping string to dtype
def _test_convert_alias(self, spec_type, value_types):
data = 1
spec = DatasetSpec('an example dataset', spec_type, name='data')
match = (self._get_type(spec_type)(data), self._get_type(spec_type))
for dtype in value_types:
value = self._get_type(dtype)(data) # convert data to given dtype
with self.subTest(dtype=dtype):
ret = ObjectMapper.convert_dtype(spec, value)
self.assertTupleEqual(ret, match)
self.assertIs(ret[0].dtype.type, match[1])
def _test_convert_higher_precision_helper(self, spec_type, value_types):
data = 1
spec = DatasetSpec('an example dataset', spec_type, name='data')
match = (self._get_type(spec_type)(data), self._get_type(spec_type))
for dtype in value_types:
value = self._get_type(dtype)(data) # convert data to given dtype
with self.subTest(dtype=dtype):
s = np.dtype(self._get_type(spec_type))
g = np.dtype(self._get_type(dtype))
msg = ("Spec 'data': Value with data type %s is being converted to data type %s as specified."
% (g.name, s.name))
with self.assertWarnsWith(UserWarning, msg):
ret = ObjectMapper.convert_dtype(spec, value)
self.assertTupleEqual(ret, match)
self.assertIs(ret[0].dtype.type, match[1])
def _test_keep_higher_precision_helper(self, spec_type, value_types):
data = 1
spec = DatasetSpec('an example dataset', spec_type, name='data')
for dtype in value_types:
value = self._get_type(dtype)(data)
match = (value, self._get_type(dtype))
with self.subTest(dtype=dtype):
ret = ObjectMapper.convert_dtype(spec, value)
self.assertTupleEqual(ret, match)
self.assertIs(ret[0].dtype.type, match[1])
def _test_change_basetype_helper(self, spec_type, value_types, exp_type):
data = 1
spec = DatasetSpec('an example dataset', spec_type, name='data')
match = (self._get_type(exp_type)(data), self._get_type(exp_type))
for dtype in value_types:
value = self._get_type(dtype)(data) # convert data to given dtype
with self.subTest(dtype=dtype):
s = np.dtype(self._get_type(spec_type))
e = np.dtype(self._get_type(exp_type))
g = np.dtype(self._get_type(dtype))
msg = ("Spec 'data': Value with data type %s is being converted to data type %s "
"(min specification: %s)." % (g.name, e.name, s.name))
with self.assertWarnsWith(UserWarning, msg):
ret = ObjectMapper.convert_dtype(spec, value)
self.assertTupleEqual(ret, match)
self.assertIs(ret[0].dtype.type, match[1])
def _test_convert_mismatch_helper(self, spec_type, value_types):
data = 1
spec = DatasetSpec('an example dataset', spec_type, name='data')
for dtype in value_types:
value = self._get_type(dtype)(data) # convert data to given dtype
with self.subTest(dtype=dtype):
s = np.dtype(self._get_type(spec_type))
g = np.dtype(self._get_type(dtype))
msg = "expected %s, received %s - must supply %s" % (s.name, g.name, s.name)
with self.assertRaisesWith(ValueError, msg):
ObjectMapper.convert_dtype(spec, value)
def test_dci_input(self):
spec = DatasetSpec('an example dataset', 'int64', name='data')
value = DataChunkIterator(np.array([1, 2, 3], dtype=np.int32))
msg = "Spec 'data': Value with data type int32 is being converted to data type int64 as specified."
with self.assertWarnsWith(UserWarning, msg):
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value) # no conversion
self.assertIs(ret, value)
self.assertEqual(ret_dtype, np.int64)
spec = DatasetSpec('an example dataset', 'int16', name='data')
value = DataChunkIterator(np.array([1, 2, 3], dtype=np.int32))
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value) # no conversion
self.assertIs(ret, value)
self.assertEqual(ret_dtype, np.int32) # increase precision
def test_text_spec(self):
text_spec_types = ['text', 'utf', 'utf8', 'utf-8']
for spec_type in text_spec_types:
with self.subTest(spec_type=spec_type):
spec = DatasetSpec('an example dataset', spec_type, name='data')
value = 'a'
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value)
self.assertEqual(ret, value)
self.assertIs(type(ret), str)
self.assertEqual(ret_dtype, 'utf8')
value = b'a'
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value)
self.assertEqual(ret, 'a')
self.assertIs(type(ret), str)
self.assertEqual(ret_dtype, 'utf8')
value = ['a', 'b']
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value)
self.assertListEqual(ret, value)
self.assertIs(type(ret[0]), str)
self.assertEqual(ret_dtype, 'utf8')
value = np.array(['a', 'b'])
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value)
np.testing.assert_array_equal(ret, value)
self.assertEqual(ret_dtype, 'utf8')
value = np.array(['a', 'b'], dtype='S1')
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value)
np.testing.assert_array_equal(ret, np.array(['a', 'b'], dtype='U1'))
self.assertEqual(ret_dtype, 'utf8')
value = []
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value)
self.assertListEqual(ret, value)
self.assertEqual(ret_dtype, 'utf8')
value = 1
msg = "Expected unicode or ascii string, got <class 'int'>"
with self.assertRaisesWith(ValueError, msg):
ObjectMapper.convert_dtype(spec, value)
value = DataChunkIterator(np.array(['a', 'b']))
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value) # no conversion
self.assertIs(ret, value)
self.assertEqual(ret_dtype, 'utf8')
value = DataChunkIterator(np.array(['a', 'b'], dtype='S1'))
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value) # no conversion
self.assertIs(ret, value)
self.assertEqual(ret_dtype, 'utf8')
def test_ascii_spec(self):
ascii_spec_types = ['ascii', 'bytes']
for spec_type in ascii_spec_types:
with self.subTest(spec_type=spec_type):
spec = DatasetSpec('an example dataset', spec_type, name='data')
value = 'a'
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value)
self.assertEqual(ret, b'a')
self.assertIs(type(ret), bytes)
self.assertEqual(ret_dtype, 'ascii')
value = b'a'
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value)
self.assertEqual(ret, b'a')
self.assertIs(type(ret), bytes)
self.assertEqual(ret_dtype, 'ascii')
value = ['a', 'b']
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value)
self.assertListEqual(ret, [b'a', b'b'])
self.assertIs(type(ret[0]), bytes)
self.assertEqual(ret_dtype, 'ascii')
value = np.array(['a', 'b'])
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value)
np.testing.assert_array_equal(ret, np.array(['a', 'b'], dtype='S1'))
self.assertEqual(ret_dtype, 'ascii')
value = np.array(['a', 'b'], dtype='S1')
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value)
np.testing.assert_array_equal(ret, value)
self.assertEqual(ret_dtype, 'ascii')
value = []
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value)
self.assertListEqual(ret, value)
self.assertEqual(ret_dtype, 'ascii')
value = 1
msg = "Expected unicode or ascii string, got <class 'int'>"
with self.assertRaisesWith(ValueError, msg):
ObjectMapper.convert_dtype(spec, value)
value = DataChunkIterator(np.array(['a', 'b']))
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value) # no conversion
self.assertIs(ret, value)
self.assertEqual(ret_dtype, 'ascii')
value = DataChunkIterator(np.array(['a', 'b'], dtype='S1'))
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value) # no conversion
self.assertIs(ret, value)
self.assertEqual(ret_dtype, 'ascii')
def test_no_spec(self):
spec_type = None
spec = DatasetSpec('an example dataset', spec_type, name='data')
value = [1, 2, 3]
ret, ret_dtype = ObjectMapper.convert_dtype(spec, value)
self.assertListEqual(ret, value)
self.assertIs(type(ret[0]), int)
self.assertEqual(ret_dtype, int)
value =
|
np.uint64(4)
|
numpy.uint64
|
# time2dist.py - DeerAnalysis distance axis constructor
# ------------------------------------------------------------------------
# This file is a part of DeerLab. License is MIT (see LICENSE.md).
# Copyright(c) 2019-2021: <NAME>, <NAME> and other contributors.
import numpy as np
from deerlab.utils import isempty
def time2dist(t, M=[]):
r"""
DeerAnalysis conversion from time-axis to distance-axis
.. warning:: This is a legacy function. Its use is not recommended for routine or accurate data analysis.
Parameters
----------
t : array_like
Time axis, in microseconds.
M : int scalar
Length of output distance axis, by default equal length as time axis.
Returns
-------
r : ndarray
Distance axis, in nanometers.
Notes
-----
The minimal and maximal distances (rmin,rmax) are determined by the empirical
approximations derived by <NAME> as implemented in DeerAnalysis.
These empirical equation approximate the minimal and maximal detectable distances
given a certain time step :math:`\Delta t` and trace length :math:`t_\text{max}`.
.. math::
r_\text{min} = 4\left( \frac{4\Delta t \nu_0}{0.85} \right)^{1/3}
.. math::
r_\text{max} = 6\left( \frac{t_\text{max}}{2} \right)^{1/3}
where :math:`\nu_0` = 52.04 MHz is the dipolar frequency of between two nitroxide electron spins separated by 1 nm.
"""
t = np.atleast_1d(t)
if isempty(M):
M = len(t)
t = np.abs(t)
dt = np.mean(np.abs(np.diff(t)))
tmax =
|
np.max(t)
|
numpy.max
|
"""
QTNM field module.
Provides concrete implementations of QtnmBaseField.
Implementations:
----------------
BiotSavart: General purpose numerical integration of current elements.
CoilField: Magnetic field due to a circular loop of wire.
BathTub: Magnetic field based on Project8 bathtub trap.
Solenoid: Approximate magnetic field due to solenoid.
ExternalField: Reads and interpolates field from external file.
"""
import numpy as np
from scipy.constants import mu_0 as mu0
from scipy.special import ellipk, ellipe
from scipy.spatial import KDTree
from qtnm_base import QtnmBaseField
class BiotSavart(QtnmBaseField):
"""
Generic QTNM field class which numerically integrates the Biot-Savart law
to produce a magnetic field.
"""
def __init__(self, x, y, z, current=1.0, mu=mu0):
"""
Parameters
----------
x: x positions of current elements (1D)
y: y positions of current elements (1D)
z: z positions of current elements (1D)
current: Current (Default 1.0).
mu: Magnetic permeability (Default mu0)
"""
self.current = current
self.mu = mu
# Construct positions of current elements
self.xc = 0.5 * (x[1:] + x[:-1])
self.yc = 0.5 * (y[1:] + y[:-1])
self.zc = 0.5 * (z[1:] + z[:-1])
# Wire elements
self.dlx = x[1:] - x[:-1]
self.dly = y[1:] - y[:-1]
self.dlz = z[1:] - z[:-1]
def evaluate_field_at_point(self, x, y, z):
# Displacement vectors
r_x = x - self.xc
r_y = y - self.yc
r_z = z - self.zc
rmag = np.sqrt(r_x**2 + r_y**2 + r_z**2)
# Cross product components. Better implementations
# (e.g. using built in cross product) exist
lrx = self.dly * r_z - self.dlz * r_y
lry = self.dlz * r_x - self.dlx * r_z
lrz = self.dlx * r_y - self.dly * r_x
b_x = np.sum(lrx / rmag**3)
b_y = np.sum(lry / rmag**3)
b_z = np.sum(lrz / rmag**3)
b_x *= self.current * self.mu / 4.0 / np.pi
b_y *= self.current * self.mu / 4.0 / np.pi
b_z *= self.current * self.mu / 4.0 / np.pi
return b_x, b_y, b_z
class CoilField(QtnmBaseField):
"""
Interface to magnetic field generated by a circular loop of wire in xy
plane.
"""
def __init__(self, radius=0.005, current=40, Z=0.0, mu=mu0):
"""
Parameters
----------
radius: Radius of coil (Default 0.005)
current: Current (Default 1.0).
Z: Vertical position of coil (Default 1.0).
mu: Magnetic permeability (Default mu0)
"""
self.current = current
self.mu = mu
self.z = Z
self.radius = radius
def __central_field(self):
return self.current * self.mu / self.radius / 2.0
# On-Axis field
def __on_axis_field(self, z):
return (self.mu * self.current * self.radius**2 / 2.0
/ (self.radius**2 + (z - self.z)**2)**(1.5))
def evaluate_field_at_point(self, x, y, z):
rad = np.sqrt(x**2 + y**2)
# If on-axis
if rad / self.radius < 1e-10:
return 0.0, 0.0, self.__on_axis_field(z)
# z relative to position of coil
z_rel = z - self.z
b_central = self.__central_field()
rad_norm = rad / self.radius
z_norm = z_rel / self.radius
alpha = (1.0 + rad_norm)**2 + z_norm**2
root_alpha_pi = np.sqrt(alpha) * np.pi
beta = 4 * rad_norm / alpha
int_e = ellipe(beta)
int_k = ellipk(beta)
gamma = alpha - 4 * rad_norm
b_r = b_central * (int_e * ((1.0 + rad_norm**2 + z_norm**2) / gamma)
- int_k) / root_alpha_pi * (z_rel / rad)
b_z = b_central * (int_e * ((1.0 - rad_norm**2 - z_norm**2) / gamma)
+ int_k) / root_alpha_pi
return b_r * x / rad, b_r * y / rad, b_z
class BathTubField(QtnmBaseField):
"""
Interface to magnetic field based on the Project 8 "bath tub" set-up.
Essentially a superposition of a magnetic bottle formed by two current
loops, and a background magnetic field.
"""
def __init__(self, radius=0.005, current=40, Z1=-1, Z2=1,
background=np.zeros(3)):
"""
Parameters
----------
radius: Radius of current loops (Default 0.005)
current: Current (Default 1.0).
Z1: Vertical position of 1st coil (Default -1).
Z2: Vertical position of 2nd coil (Default 1).
background: Background magnetic field (Default 0).
"""
self.coil1 = CoilField(radius=radius, current=current, Z=Z1)
self.coil2 = CoilField(radius=radius, current=current, Z=Z2)
self.background = background
def evaluate_field_at_point(self, x, y, z):
b_x1, b_y1, b_z1 = self.coil1.evaluate_field_at_point(x, y, z)
b_x2, b_y2, b_z2 = self.coil2.evaluate_field_at_point(x, y, z)
b_x = b_x1 + b_x2
b_y = b_y1 + b_y2
b_z = b_z1 + b_z2
return np.array([b_x, b_y, b_z]) + self.background
class SolenoidField(QtnmBaseField):
"""
Interface to approximate solenoid field.
Solenoid is approximated by combining multiple instances of CoilField.
"""
def __init__(self, radius=0.005, current=40, Zmin=-1, Zmax=1,
Ncoils=11):
"""
Parameters
----------
radius: Radius of current loops (Default 0.005)
current: Current (Default 40).
Zmin: Vertical position of 1st coil (Default -1).
Zmax: Vertical position of last coil (Default 1).
Ncoils: Number of coils to use (Default 11).
"""
self.coils = []
for z in np.linspace(Zmin, Zmax, Ncoils):
self.coils.append(CoilField(radius=radius, current=current,
Z=z))
def evaluate_field_at_point(self, x, y, z):
field = np.zeros(3)
for coil in self.coils:
field = field + coil.evaluate_field_at_point(x, y, z)
return field
class ExternalField(QtnmBaseField):
"""
Reads the field in from an external (plain text) file
Assumes the file is formatted as (x, y, z, Bx, By, Bz) with
a variable number of header lines before the data
"""
def __init__(self, fname, nheader=8, interp_pts=11):
"""
Parameters
----------
fname: Name of file to read data from
nheader: Number of lines to skip at start of file (Default 6)
interp_pts: Number of points to use for interpolation (Default 11)
"""
try:
data = np.loadtxt(fname, skiprows=nheader)
positions = data[:,:3]
self.bx = data[:,3]
self.by = data[:,4]
self.bz = data[:,5]
except IOError:
print('Requested file: %s could not be opened' % fname)
# Set up KD Tree
self.tree = KDTree(positions)
# Number of points for IDW
self.ipts = interp_pts
def evaluate_field_at_point(self, x, y, z):
# Use nearest neighbour for now
distances, indices = self.tree.query([x, y, z], k=self.ipts)
# If we got lucky
if distances[0] < 1e-10:
return self.bx[indices[0]], self.by[indices[0]], self.bz[indices[0]]
# Otherwise
weights = 1.0 / distances
weights_tot = np.sum(weights)
bx =
|
np.dot(self.bx[indices], weights)
|
numpy.dot
|
#!/usr/bin/env python3
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
# Keyboard controlling for CARLA. Please refer to client_example.py for a simpler
# and more documented example.
"""
Welcome to CARLA manual control.
Use ARROWS or WASD keys for control.
W : throttle
S : brake
AD : steer
Q : toggle reverse
Space : hand-brake
P : toggle autopilot
R : restart level
STARTING in a moment...
"""
from __future__ import print_function
import argparse
import logging
import random
import time
try:
import pygame
from pygame.locals import K_DOWN
from pygame.locals import K_LEFT
from pygame.locals import K_RIGHT
from pygame.locals import K_SPACE
from pygame.locals import K_UP
from pygame.locals import K_a
from pygame.locals import K_d
from pygame.locals import K_p
from pygame.locals import K_q
from pygame.locals import K_r
from pygame.locals import K_s
from pygame.locals import K_w
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
try:
import numpy as np
except ImportError:
raise RuntimeError('cannot import numpy, make sure numpy package is installed')
from carla import image_converter
from carla import sensor
from carla.client import make_carla_client, VehicleControl
from carla.planner.map import CarlaMap
from carla.settings import CarlaSettings
from carla.tcp import TCPConnectionError
from carla.util import print_over_same_line
WINDOW_WIDTH = 800
WINDOW_HEIGHT = 600
MINI_WINDOW_WIDTH = 320
MINI_WINDOW_HEIGHT = 180
def make_carla_settings(args):
"""Make a CarlaSettings object with the settings we need."""
settings = CarlaSettings()
settings.set(
SynchronousMode=False,
SendNonPlayerAgentsInfo=True,
NumberOfVehicles=15,
NumberOfPedestrians=30,
WeatherId=random.choice([1, 3, 7, 8, 14]),
QualityLevel=args.quality_level)
settings.randomize_seeds()
camera0 = sensor.Camera('CameraRGB')
camera0.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)
camera0.set_position(2.0, 0.0, 1.4)
camera0.set_rotation(0.0, 0.0, 0.0)
settings.add_sensor(camera0)
camera1 = sensor.Camera('CameraDepth', PostProcessing='Depth')
camera1.set_image_size(MINI_WINDOW_WIDTH, MINI_WINDOW_HEIGHT)
camera1.set_position(2.0, 0.0, 1.4)
camera1.set_rotation(0.0, 0.0, 0.0)
settings.add_sensor(camera1)
camera2 = sensor.Camera('CameraSemSeg', PostProcessing='SemanticSegmentation')
camera2.set_image_size(MINI_WINDOW_WIDTH, MINI_WINDOW_HEIGHT)
camera2.set_position(2.0, 0.0, 1.4)
camera2.set_rotation(0.0, 0.0, 0.0)
settings.add_sensor(camera2)
if args.lidar:
lidar = sensor.Lidar('Lidar32')
lidar.set_position(0, 0, 2.5)
lidar.set_rotation(0, 0, 0)
lidar.set(
Channels=32,
Range=50,
PointsPerSecond=100000,
RotationFrequency=10,
UpperFovLimit=10,
LowerFovLimit=-30)
settings.add_sensor(lidar)
return settings
class Timer(object):
def __init__(self):
self.step = 0
self._lap_step = 0
self._lap_time = time.time()
def tick(self):
self.step += 1
def lap(self):
self._lap_step = self.step
self._lap_time = time.time()
def ticks_per_second(self):
return float(self.step - self._lap_step) / self.elapsed_seconds_since_lap()
def elapsed_seconds_since_lap(self):
return time.time() - self._lap_time
class CarlaGame(object):
def __init__(self, carla_client, args):
self.client = carla_client
self._carla_settings = make_carla_settings(args)
self._timer = None
self._display = None
self._main_image = None
self._mini_view_image1 = None
self._mini_view_image2 = None
self._enable_autopilot = args.autopilot
self._lidar_measurement = None
self._map_view = None
self._is_on_reverse = False
self._city_name = args.map_name
self._map = CarlaMap(self._city_name, 0.1643, 50.0) if self._city_name is not None else None
self._map_shape = self._map.map_image.shape if self._city_name is not None else None
self._map_view = self._map.get_map(WINDOW_HEIGHT) if self._city_name is not None else None
self._position = None
self._agent_positions = None
def execute(self):
"""Launch the PyGame."""
pygame.init()
self._initialize_game()
try:
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
self._on_loop()
self._on_render()
finally:
pygame.quit()
def _initialize_game(self):
if self._city_name is not None:
self._display = pygame.display.set_mode(
(WINDOW_WIDTH + int((WINDOW_HEIGHT/float(self._map.map_image.shape[0]))*self._map.map_image.shape[1]), WINDOW_HEIGHT),
pygame.HWSURFACE | pygame.DOUBLEBUF)
else:
self._display = pygame.display.set_mode(
(WINDOW_WIDTH, WINDOW_HEIGHT),
pygame.HWSURFACE | pygame.DOUBLEBUF)
logging.debug('pygame started')
self._on_new_episode()
def _on_new_episode(self):
self._carla_settings.randomize_seeds()
self._carla_settings.randomize_weather()
scene = self.client.load_settings(self._carla_settings)
number_of_player_starts = len(scene.player_start_spots)
player_start =
|
np.random.randint(number_of_player_starts)
|
numpy.random.randint
|
import numpy as np
import sys
import os
import asdf
import matplotlib.pyplot as plt
from numpy import log10
from scipy.integrate import simps
from astropy.io import fits
from matplotlib.ticker import FormatStrFormatter
from .function import *
from .function_class import Func
from .basic_func import Basic
import corner
col = ['violet', 'indigo', 'b', 'lightblue', 'lightgreen', 'g', 'orange', 'coral', 'r', 'darkred']#, 'k']
#col = ['darkred', 'r', 'coral','orange','g','lightgreen', 'lightblue', 'b','indigo','violet','k']
def plot_sed(MB, flim=0.01, fil_path='./', scale=1e-19, f_chind=True, figpdf=False, save_sed=True, inputs=False, \
mmax=300, dust_model=0, DIR_TMP='./templates/', f_label=False, f_bbbox=False, verbose=False, f_silence=True, \
f_fill=False, f_fancyplot=False, f_Alog=True, dpi=300, f_plot_filter=True):
'''
Parameters
----------
MB.SNlim : float
SN limit to show flux or up lim in SED.
f_chind : bool
If include non-detection in chi2 calculation, using Sawicki12.
mmax : int
Number of mcmc realization for plot. Not for calculation.
f_fancy : bool
plot each SED component.
f_fill: bool
if True, and so is f_fancy, fill each SED component.
Returns
-------
plots
'''
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from scipy.optimize import curve_fit
from scipy import asarray as ar,exp
import matplotlib
import scipy.integrate as integrate
import scipy.special as special
import os.path
from astropy.io import ascii
import time
if f_silence:
import matplotlib
matplotlib.use("Agg")
def gaus(x,a,x0,sigma):
return a*exp(-(x-x0)**2/(2*sigma**2))
lcb = '#4682b4' # line color, blue
fnc = MB.fnc
bfnc = MB.bfnc
ID = MB.ID
Z = MB.Zall
age = MB.age
nage = MB.nage
tau0 = MB.tau0
#col = ['violet', 'indigo', 'b', 'lightblue', 'lightgreen', 'g', 'orange', 'coral', 'r', 'darkred']#, 'k']
NUM_COLORS = len(age)
cm = plt.get_cmap('gist_rainbow')
col = [cm(1 - 1.*i/NUM_COLORS) for i in range(NUM_COLORS)]
nstep_plot = 1
if MB.f_bpass:
nstep_plot = 30
SNlim = MB.SNlim
################
# RF colors.
home = os.path.expanduser('~')
c = MB.c
chimax = 1.
m0set = MB.m0set
Mpc_cm = MB.Mpc_cm
d = MB.d * scale
##################
# Fitting Results
##################
DIR_FILT = MB.DIR_FILT
SFILT = MB.filts
try:
f_err = MB.ferr
except:
f_err = 0
###########################
# Open result file
###########################
file = MB.DIR_OUT + 'summary_' + ID + '.fits'
hdul = fits.open(file)
ndim_eff = hdul[0].header['NDIM']
# Redshift MC
zp16 = hdul[1].data['zmc'][0]
zp50 = hdul[1].data['zmc'][1]
zp84 = hdul[1].data['zmc'][2]
# Stellar mass MC
M16 = hdul[1].data['ms'][0]
M50 = hdul[1].data['ms'][1]
M84 = hdul[1].data['ms'][2]
if verbose:
print('Total stellar mass is %.2e'%(M50))
# Amplitude MC
A50 = np.zeros(len(age), dtype='float')
A16 = np.zeros(len(age), dtype='float')
A84 = np.zeros(len(age), dtype='float')
for aa in range(len(age)):
A16[aa] = 10**hdul[1].data['A'+str(aa)][0]
A50[aa] = 10**hdul[1].data['A'+str(aa)][1]
A84[aa] = 10**hdul[1].data['A'+str(aa)][2]
Asum = np.sum(A50)
aa = 0
Av16 = hdul[1].data['Av'+str(aa)][0]
Av50 = hdul[1].data['Av'+str(aa)][1]
Av84 = hdul[1].data['Av'+str(aa)][2]
AAv = [Av50]
Z50 = np.zeros(len(age), dtype='float')
Z16 = np.zeros(len(age), dtype='float')
Z84 = np.zeros(len(age), dtype='float')
NZbest = np.zeros(len(age), dtype='int')
for aa in range(len(age)):
Z16[aa] = hdul[1].data['Z'+str(aa)][0]
Z50[aa] = hdul[1].data['Z'+str(aa)][1]
Z84[aa] = hdul[1].data['Z'+str(aa)][2]
NZbest[aa]= bfnc.Z2NZ(Z50[aa])
# Light weighted Z.
ZZ50 = np.sum(Z50*A50)/np.sum(A50)
# FIR Dust;
try:
MD16 = hdul[1].data['MDUST'][0]
MD50 = hdul[1].data['MDUST'][1]
MD84 = hdul[1].data['MDUST'][2]
TD16 = hdul[1].data['TDUST'][0]
TD50 = hdul[1].data['TDUST'][1]
TD84 = hdul[1].data['TDUST'][2]
nTD16 = hdul[1].data['nTDUST'][0]
nTD50 = hdul[1].data['nTDUST'][1]
nTD84 = hdul[1].data['nTDUST'][2]
DFILT = inputs['FIR_FILTER'] # filter band string.
DFILT = [x.strip() for x in DFILT.split(',')]
DFWFILT = fil_fwhm(DFILT, DIR_FILT)
if verbose:
print('Total dust mass is %.2e'%(MD50))
f_dust = True
except:
f_dust = False
chi = hdul[1].data['chi'][0]
chin = hdul[1].data['chi'][1]
fitc = chin
Cz0 = hdul[0].header['Cz0']
Cz1 = hdul[0].header['Cz1']
zbes = zp50
zscl = (1.+zbes)
###############################
# Data taken from
###############################
if MB.f_dust:
MB.dict = MB.read_data(Cz0, Cz1, zbes, add_fir=True)
else:
MB.dict = MB.read_data(Cz0, Cz1, zbes)
NR = MB.dict['NR']
x = MB.dict['x']
fy = MB.dict['fy']
ey = MB.dict['ey']
con0 = (NR<1000)
xg0 = x[con0]
fg0 = fy[con0]
eg0 = ey[con0]
con1 = (NR>=1000) & (NR<10000)
xg1 = x[con1]
fg1 = fy[con1]
eg1 = ey[con1]
if len(xg0)>0 or len(xg1)>0:
f_grsm = True
else:
f_grsm = False
wht = fy * 0
con_wht = (ey>0)
wht[con_wht] = 1./np.square(ey[con_wht])
# BB data points;
NRbb = MB.dict['NRbb']
xbb = MB.dict['xbb']
fybb = MB.dict['fybb']
eybb = MB.dict['eybb']
exbb = MB.dict['exbb']
snbb = fybb/eybb
######################
# Weight by line
######################
wh0 = 1./np.square(eg0)
LW0 = []
model = fg0
wht3 = check_line_man(fy, x, wht, fy, zbes, LW0)
######################
# Mass-to-Light ratio.
######################
ms = np.zeros(len(age), dtype='float')
af = MB.af
sedpar = af['ML']
for aa in range(len(age)):
ms[aa] = sedpar['ML_' + str(int(NZbest[aa]))][aa]
try:
isochrone = af['isochrone']
LIBRARY = af['library']
except:
isochrone = ''
LIBRARY = ''
#############
# Plot.
#############
# Set the inset.
if f_grsm or f_dust:
fig = plt.figure(figsize=(7.,3.2))
fig.subplots_adjust(top=0.98, bottom=0.16, left=0.1, right=0.99, hspace=0.15, wspace=0.25)
ax1 = fig.add_subplot(111)
xsize = 0.29
ysize = 0.25
if f_grsm:
ax2t = ax1.inset_axes((1-xsize-0.01,1-ysize-0.01,xsize,ysize))
if f_dust:
ax3t = ax1.inset_axes((0.7,.35,.28,.25))
else:
fig = plt.figure(figsize=(5.5,2.2))
fig.subplots_adjust(top=0.98, bottom=0.16, left=0.1, right=0.99, hspace=0.15, wspace=0.25)
ax1 = fig.add_subplot(111)
#######################################
# D.Kelson like Box for BB photometry
#######################################
col_dat = 'r'
if f_bbbox:
for ii in range(len(xbb)):
if eybb[ii]<100 and fybb[ii]/eybb[ii]>1:
xx = [xbb[ii]-exbb[ii],xbb[ii]-exbb[ii]]
yy = [(fybb[ii]-eybb[ii])*c/np.square(xbb[ii])/d, (fybb[ii]+eybb[ii])*c/np.square(xbb[ii])/d]
ax1.plot(xx, yy, color='k', linestyle='-', linewidth=0.5, zorder=3)
xx = [xbb[ii]+exbb[ii],xbb[ii]+exbb[ii]]
yy = [(fybb[ii]-eybb[ii])*c/np.square(xbb[ii])/d, (fybb[ii]+eybb[ii])*c/np.square(xbb[ii])/d]
ax1.plot(xx, yy, color='k', linestyle='-', linewidth=0.5, zorder=3)
xx = [xbb[ii]-exbb[ii],xbb[ii]+exbb[ii]]
yy = [(fybb[ii]-eybb[ii])*c/np.square(xbb[ii])/d, (fybb[ii]-eybb[ii])*c/np.square(xbb[ii])/d]
ax1.plot(xx, yy, color='k', linestyle='-', linewidth=0.5, zorder=3)
xx = [xbb[ii]-exbb[ii],xbb[ii]+exbb[ii]]
yy = [(fybb[ii]+eybb[ii])*c/np.square(xbb[ii])/d, (fybb[ii]+eybb[ii])*c/np.square(xbb[ii])/d]
ax1.plot(xx, yy, color='k', linestyle='-', linewidth=0.5, zorder=3)
else: # Normal BB plot;
# Detection;
conbb_hs = (fybb/eybb>SNlim)
ax1.errorbar(xbb[conbb_hs], fybb[conbb_hs] * c / np.square(xbb[conbb_hs]) / d, \
yerr=eybb[conbb_hs]*c/np.square(xbb[conbb_hs])/d, color='k', linestyle='', linewidth=0.5, zorder=4)
ax1.plot(xbb[conbb_hs], fybb[conbb_hs] * c / np.square(xbb[conbb_hs]) / d, \
marker='.', color=col_dat, linestyle='', linewidth=0, zorder=4, ms=8)#, label='Obs.(BB)')
try:
# For any data removed fron fit (i.e. IRAC excess):
data_ex = ascii.read(DIR_TMP + 'bb_obs_' + ID + '_removed.cat')
NR_ex = data_ex['col1']
except:
NR_ex = []
# Upperlim;
sigma = 1.0
leng = np.max(fybb[conbb_hs] * c / np.square(xbb[conbb_hs]) / d) * 0.05 #0.2
conebb_ls = (fybb/eybb<=SNlim) & (eybb>0)
for ii in range(len(xbb)):
if NRbb[ii] in NR_ex[:]:
conebb_ls[ii] = False
ax1.errorbar(xbb[conebb_ls], eybb[conebb_ls] * c / np.square(xbb[conebb_ls]) / d * sigma, yerr=leng,\
uplims=eybb[conebb_ls] * c / np.square(xbb[conebb_ls]) / d * sigma, linestyle='', color=col_dat, marker='', ms=4, label='', zorder=4, capsize=3)
# For any data removed fron fit (i.e. IRAC excess):
f_exclude = False
try:
col_ex = 'lawngreen'
#col_ex = 'limegreen'
#col_ex = 'r'
# Currently, this file is made after FILTER_SKIP;
data_ex = ascii.read(DIR_TMP + 'bb_obs_' + ID + '_removed.cat')
x_ex = data_ex['col2']
fy_ex = data_ex['col3']
ey_ex = data_ex['col4']
ex_ex = data_ex['col5']
ax1.errorbar(x_ex, fy_ex * c / np.square(x_ex) / d, \
xerr=ex_ex, yerr=ey_ex*c/np.square(x_ex)/d, color='k', linestyle='', linewidth=0.5, zorder=5)
ax1.scatter(x_ex, fy_ex * c / np.square(x_ex) / d, marker='s', color=col_ex, edgecolor='k', zorder=5, s=30)
f_exclude = True
except:
pass
#####################################
# Open ascii file and stock to array.
lib = fnc.open_spec_fits(fall=0)
lib_all = fnc.open_spec_fits(fall=1, orig=True)
#lib_all_conv = fnc.open_spec_fits(fall=1)
if f_dust:
DT0 = float(inputs['TDUST_LOW'])
DT1 = float(inputs['TDUST_HIG'])
dDT = float(inputs['TDUST_DEL'])
Temp = np.arange(DT0,DT1,dDT)
iimax = len(nage)-1
# FIR dust plot;
if f_dust:
from lmfit import Parameters
par = Parameters()
par.add('MDUST',value=MD50)
par.add('TDUST',value=nTD50)
par.add('zmc',value=zp50)
y0d, x0d = fnc.tmp04_dust(par.valuesdict())#, zbes, lib_dust_all)
y0d_cut, x0d_cut = fnc.tmp04_dust(par.valuesdict())#, zbes, lib_dust)
# data;
dat_d = ascii.read(MB.DIR_TMP + 'bb_dust_obs_' + MB.ID + '.cat')
NRbbd = dat_d['col1']
xbbd = dat_d['col2']
fybbd = dat_d['col3']
eybbd = dat_d['col4']
exbbd = dat_d['col5']
snbbd = fybbd/eybbd
try:
conbbd_hs = (fybbd/eybbd>SNlim)
ax1.errorbar(xbbd[conbbd_hs], fybbd[conbbd_hs] * c / np.square(xbbd[conbbd_hs]) / d, \
yerr=eybbd[conbbd_hs]*c/np.square(xbbd[conbbd_hs])/d, color='k', linestyle='', linewidth=0.5, zorder=4)
ax1.plot(xbbd[conbbd_hs], fybbd[conbbd_hs] * c / np.square(xbbd[conbbd_hs]) / d, \
'.r', linestyle='', linewidth=0, zorder=4)#, label='Obs.(BB)')
ax3t.plot(xbbd[conbbd_hs], fybbd[conbbd_hs] * c / np.square(xbbd[conbbd_hs]) / d, \
'.r', linestyle='', linewidth=0, zorder=4)#, label='Obs.(BB)')
except:
pass
try:
conebbd_ls = (fybbd/eybbd<=SNlim)
ax1.errorbar(xbbd[conebbd_ls], eybbd[conebbd_ls] * c / np.square(xbbd[conebbd_ls]) / d, \
yerr=fybbd[conebbd_ls]*0+np.max(fybbd[conebbd_ls]*c/np.square(xbbd[conebbd_ls])/d)*0.05, \
uplims=eybbd[conebbd_ls]*c/np.square(xbbd[conebbd_ls])/d, color='r', linestyle='', linewidth=0.5, zorder=4)
ax3t.errorbar(xbbd[conebbd_ls], eybbd[conebbd_ls] * c / np.square(xbbd[conebbd_ls]) / d, \
yerr=fybbd[conebbd_ls]*0+np.max(fybbd[conebbd_ls]*c/np.square(xbbd[conebbd_ls])/d)*0.05, \
uplims=eybbd[conebbd_ls]*c/np.square(xbbd[conebbd_ls])/d, color='r', linestyle='', linewidth=0.5, zorder=4)
except:
pass
#
# This is for UVJ color time evolution.
#
Asum = np.sum(A50[:])
alp = .5
for jj in range(len(age)):
ii = int(len(nage) - jj - 1) # from old to young templates.
if jj == 0:
y0, x0 = fnc.tmp03(A50[ii], AAv[0], ii, Z50[ii], zbes, lib_all)
y0p, x0p = fnc.tmp03(A50[ii], AAv[0], ii, Z50[ii], zbes, lib)
ysum = y0
ysump = y0p
nopt = len(ysump)
f_50_comp = np.zeros((len(age),len(y0)),'float')
# Keep each component;
f_50_comp[ii,:] = y0[:] * c / np.square(x0) / d
if f_dust:
ysump[:] += y0d_cut[:nopt]
ysump = np.append(ysump,y0d_cut[nopt:])
# Keep each component;
f_50_comp_dust = y0d * c / np.square(x0d) / d
else:
y0_r, x0_tmp = fnc.tmp03(A50[ii], AAv[0], ii, Z50[ii], zbes, lib_all)
y0p, x0p = fnc.tmp03(A50[ii], AAv[0], ii, Z50[ii], zbes, lib)
ysum += y0_r
ysump[:nopt] += y0p
f_50_comp[ii,:] = y0_r[:] * c / np.square(x0_tmp) / d
# The following needs revised.
f_uvj = False
if f_uvj:
if jj == 0:
fwuvj = open(MB.DIR_OUT + ID + '_uvj.txt', 'w')
fwuvj.write('# age uv vj\n')
ysum_wid = ysum * 0
for kk in range(0,ii+1,1):
tt = int(len(nage) - kk - 1)
nn = int(len(nage) - ii - 1)
nZ = bfnc.Z2NZ(Z50[tt])
y0_wid, x0_wid = fnc.open_spec_fits_dir(tt, nZ, nn, AAv[0], zbes, A50[tt])
ysum_wid += y0_wid
lmrest_wid = x0_wid/(1.+zbes)
band0 = ['u','v','j']
lmconv,fconv = filconv(band0, lmrest_wid, ysum_wid, fil_path) # f0 in fnu
fu_t = fconv[0]
fv_t = fconv[1]
fj_t = fconv[2]
uvt = -2.5*log10(fu_t/fv_t)
vjt = -2.5*log10(fv_t/fj_t)
fwuvj.write('%.2f %.3f %.3f\n'%(age[ii], uvt, vjt))
fwuvj.close()
#############
# Main result
#############
conbb_ymax = (xbb>0) & (fybb>0) & (eybb>0) & (fybb/eybb>1)
ymax = np.max(fybb[conbb_ymax]*c/np.square(xbb[conbb_ymax])/d) * 1.6
xboxl = 17000
xboxu = 28000
ax1.set_xlabel('Observed wavelength ($\mathrm{\mu m}$)', fontsize=12)
ax1.set_ylabel('Flux ($10^{%d}\mathrm{erg}/\mathrm{s}/\mathrm{cm}^{2}/\mathrm{\AA}$)'%(np.log10(scale)),fontsize=12,labelpad=-2)
x1min = 2000
x1max = 100000
xticks = [2500, 5000, 10000, 20000, 40000, 80000, x1max]
xlabels= ['0.25', '0.5', '1', '2', '4', '8', '']
if f_dust:
x1max = 400000
xticks = [2500, 5000, 10000, 20000, 40000, 80000, 400000]
xlabels= ['0.25', '0.5', '1', '2', '4', '8', '']
#if x1max < np.max(xbb[conbb_ymax]):
# x1max = np.max(xbb[conbb_ymax]) * 1.5
if x1max < np.max(xbb):
x1max = np.max(xbb) * 1.5
if x1min > np.min(xbb[conbb_ymax]):
x1min = np.min(xbb[conbb_ymax]) / 1.5
ax1.set_xlim(x1min, x1max)
ax1.set_xscale('log')
if f_plot_filter:
scl_yaxis = 0.2
else:
scl_yaxis = 0.1
ax1.set_ylim(-ymax*scl_yaxis,ymax)
ax1.text(x1min+100,-ymax*0.08,'SNlimit:%.1f'%(SNlim),fontsize=8)
ax1.set_xticks(xticks)
ax1.set_xticklabels(xlabels)
dely1 = 0.5
while (ymax-0)/dely1<1:
dely1 /= 2.
while (ymax-0)/dely1>4:
dely1 *= 2.
y1ticks = np.arange(0, ymax, dely1)
ax1.set_yticks(y1ticks)
ax1.set_yticklabels(np.arange(0, ymax, dely1), minor=False)
ax1.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax1.yaxis.labelpad = 1.5
xx = np.arange(100,400000)
yy = xx * 0
ax1.plot(xx, yy, ls='--', lw=0.5, color='k')
#############
# Plot
#############
eAAl = np.zeros(len(age),dtype='float')
eAAu = np.zeros(len(age),dtype='float')
eAMl = np.zeros(len(age),dtype='float')
eAMu = np.zeros(len(age),dtype='float')
MSsum = np.sum(ms)
Asum = np.sum(A50)
A50 /= Asum
A16 /= Asum
A84 /= Asum
AM50 = A50 * M50 * ms / MSsum
CM = M50/np.sum(AM50)
AM50 = A50 * M50 * ms / MSsum * CM
AM16 = A16 * M50 * ms / MSsum * CM
AM84 = A84 * M50 * ms / MSsum * CM
AC50 = A50 * 0 # Cumulative
for ii in range(len(A50)):
eAAl[ii] = A50[ii] - A16[ii]
eAAu[ii] = A84[ii] - A50[ii]
eAMl[ii] = AM50[ii] - AM16[ii]
eAMu[ii] = AM84[ii] - AM50[ii]
AC50[ii] = np.sum(AM50[ii:])
################
# Lines
################
LN = ['Mg2', 'Ne5', 'O2', 'Htheta', 'Heta', 'Ne3', 'Hdelta', 'Hgamma', 'Hbeta', 'O3', 'O3', 'Mgb', 'Halpha', 'S2L', 'S2H']
FLW = np.zeros(len(LN),dtype='int')
####################
# For cosmology
####################
DL = MB.cosmo.luminosity_distance(zbes).value * Mpc_cm #, **cosmo) # Luminositydistance in cm
Cons = (4.*np.pi*DL**2/(1.+zbes))
if f_grsm:
print('This function (write_lines) needs to be revised.')
write_lines(ID, zbes, DIR_OUT=MB.DIR_OUT)
##########################
# Zoom in Line regions
##########################
if f_grsm:
conspec = (NR<10000) #& (fy/ey>1)
#ax2t.fill_between(xg1, (fg1-eg1) * c/np.square(xg1)/d, (fg1+eg1) * c/np.square(xg1)/d, lw=0, color='#DF4E00', zorder=10, alpha=0.7, label='')
#ax2t.fill_between(xg0, (fg0-eg0) * c/np.square(xg0)/d, (fg0+eg0) * c/np.square(xg0)/d, lw=0, color='royalblue', zorder=10, alpha=0.2, label='')
ax2t.errorbar(xg1, fg1 * c/np.square(xg1)/d, yerr=eg1 * c/np.square(xg1)/d, lw=0.5, color='#DF4E00', zorder=10, alpha=1., label='', capsize=0)
ax2t.errorbar(xg0, fg0 * c/np.square(xg0)/d, yerr=eg0 * c/np.square(xg0)/d, lw=0.5, linestyle='', color='royalblue', zorder=10, alpha=1., label='', capsize=0)
xgrism = np.concatenate([xg0,xg1])
fgrism = np.concatenate([fg0,fg1])
egrism = np.concatenate([eg0,eg1])
con4000b = (xgrism/zscl>3400) & (xgrism/zscl<3800) & (fgrism>0) & (egrism>0)
con4000r = (xgrism/zscl>4200) & (xgrism/zscl<5000) & (fgrism>0) & (egrism>0)
print('Median SN at 3400-3800 is;', np.median((fgrism/egrism)[con4000b]))
print('Median SN at 4200-5000 is;', np.median((fgrism/egrism)[con4000r]))
#ax1.errorbar(xg1, fg1 * c/np.square(xg1)/d, yerr=eg1 * c/np.square(xg1)/d, lw=0.5, color='#DF4E00', zorder=10, alpha=1., label='', capsize=0)
#ax1.errorbar(xg0, fg0 * c/np.square(xg0)/d, yerr=eg0 * c/np.square(xg0)/d, lw=0.5, linestyle='', color='royalblue', zorder=10, alpha=1., label='', capsize=0)
#
# From MCMC chain
#
file = MB.DIR_OUT + 'chain_' + ID + '_corner.cpkl'
niter = 0
data = loadcpkl(file)
try:
ndim = data['ndim'] # By default, use ndim and burnin values contained in the cpkl file, if present.
burnin = data['burnin']
nmc = data['niter']
nwalk = data['nwalkers']
Nburn = burnin #*20
res = data['chain'][:]
except:
if verbose: print(' = > NO keys of ndim and burnin found in cpkl, use input keyword values')
samples = res
# Saved template;
ytmp = np.zeros((mmax,len(ysum)), dtype='float')
ytmp_each = np.zeros((mmax,len(ysum),len(age)), dtype='float')
ytmpmax = np.zeros(len(ysum), dtype='float')
ytmpmin = np.zeros(len(ysum), dtype='float')
# MUV;
DL = MB.cosmo.luminosity_distance(zbes).value * Mpc_cm # Luminositydistance in cm
DL10 = Mpc_cm/1e6 * 10 # 10pc in cm
Fuv = np.zeros(mmax, dtype='float') # For Muv
Fuv28 = np.zeros(mmax, dtype='float') # For Fuv(1500-2800)
Lir = np.zeros(mmax, dtype='float') # For L(8-1000um)
UVJ = np.zeros((mmax,4), dtype='float') # For UVJ color;
Cmznu = 10**((48.6+m0set)/(-2.5)) # Conversion from m0_25 to fnu
# From random chain;
alp=0.02
for kk in range(0,mmax,1):
nr = np.random.randint(Nburn, len(samples['A%d'%MB.aamin[0]]))
try:
Av_tmp = samples['Av'][nr]
except:
Av_tmp = MB.AVFIX
try:
zmc = samples['zmc'][nr]
except:
zmc = zbes
for ss in MB.aamin:
try:
AA_tmp = 10**samples['A'+str(ss)][nr]
except:
AA_tmp = 0
pass
try:
Ztest = samples['Z'+str(len(age)-1)][nr]
ZZ_tmp = samples['Z'+str(ss)][nr]
except:
try:
ZZ_tmp = samples['Z0'][nr]
except:
ZZ_tmp = MB.ZFIX
if ss == MB.aamin[0]:
mod0_tmp, xm_tmp = fnc.tmp03(AA_tmp, Av_tmp, ss, ZZ_tmp, zmc, lib_all)
fm_tmp = mod0_tmp
else:
mod0_tmp, xx_tmp = fnc.tmp03(AA_tmp, Av_tmp, ss, ZZ_tmp, zmc, lib_all)
fm_tmp += mod0_tmp
# Each;
ytmp_each[kk,:,ss] = mod0_tmp[:] * c / np.square(xm_tmp[:]) / d
#
# Dust component;
#
if f_dust:
if kk == 0:
par = Parameters()
par.add('MDUST',value=samples['MDUST'][nr])
try:
par.add('TDUST',value=samples['TDUST'][nr])
except:
par.add('TDUST',value=0)
par['MDUST'].value = samples['MDUST'][nr]
try:
par['TDUST'].value = samples['TDUST'][nr]
except:
par['TDUST'].value = 0
model_dust, x1_dust = fnc.tmp04_dust(par.valuesdict())#, zbes, lib_dust_all)
if kk == 0:
deldt = (x1_dust[1] - x1_dust[0])
x1_tot = np.append(xm_tmp,np.arange(np.max(xm_tmp),np.max(x1_dust),deldt))
# Redefine??
ytmp = np.zeros((mmax,len(x1_tot)), dtype='float')
ytmp_dust = np.zeros((mmax,len(x1_dust)), dtype='float')
ytmp_comp = np.zeros((mmax,len(x1_tot)), dtype='float')
ytmp_dust[kk,:] = model_dust * c/np.square(x1_dust)/d
model_tot = np.interp(x1_tot,xx_tmp,fm_tmp) + np.interp(x1_tot,x1_dust,model_dust)
ytmp[kk,:] = model_tot[:] * c/np.square(x1_tot[:])/d
else:
x1_tot = xm_tmp
ytmp[kk,:] = fm_tmp[:] * c / np.square(xm_tmp[:]) / d
#
# Grism plot + Fuv flux + LIR.
#
#if f_grsm:
#ax2t.plot(x1_tot, ytmp[kk,:], '-', lw=0.5, color='gray', zorder=3., alpha=0.02)
# Get FUV flux;
Fuv[kk] = get_Fuv(x1_tot[:]/(1.+zbes), (ytmp[kk,:]/(c/np.square(x1_tot)/d)) * (DL**2/(1.+zbes)) / (DL10**2), lmin=1250, lmax=1650)
Fuv28[kk] = get_Fuv(x1_tot[:]/(1.+zbes), (ytmp[kk,:]/(c/np.square(x1_tot)/d)) * (4*np.pi*DL**2/(1.+zbes))*Cmznu, lmin=1500, lmax=2800)
Lir[kk] = 0
# Get UVJ Color;
lmconv,fconv = filconv_fast(MB.filts_rf, MB.band_rf, x1_tot[:]/(1.+zbes), (ytmp[kk,:]/(c/np.square(x1_tot)/d)))
UVJ[kk,0] = -2.5*np.log10(fconv[0]/fconv[2])
UVJ[kk,1] = -2.5*np.log10(fconv[1]/fconv[2])
UVJ[kk,2] = -2.5*np.log10(fconv[2]/fconv[3])
UVJ[kk,3] = -2.5*np.log10(fconv[4]/fconv[3])
# Do stuff...
time.sleep(0.01)
# Update Progress Bar
printProgressBar(kk, mmax, prefix = 'Progress:', suffix = 'Complete', length = 40)
#
# Plot Median SED;
#
ytmp16 = np.percentile(ytmp[:,:],16,axis=0)
ytmp50 = np.percentile(ytmp[:,:],50,axis=0)
ytmp84 = np.percentile(ytmp[:,:],84,axis=0)
if f_dust:
ytmp_dust50 = np.percentile(ytmp_dust[:,:],50, axis=0)
#if not f_fill:
ax1.fill_between(x1_tot[::nstep_plot], ytmp16[::nstep_plot], ytmp84[::nstep_plot], ls='-', lw=.5, color='gray', zorder=-2, alpha=0.5)
ax1.plot(x1_tot[::nstep_plot], ytmp50[::nstep_plot], '-', lw=.5, color='gray', zorder=-1, alpha=1.)
# For grism;
if f_grsm:
from astropy.convolution import convolve
from .maketmp_filt import get_LSF
LSF, lmtmp = get_LSF(MB.inputs, MB.DIR_EXTR, ID, x1_tot[::nstep_plot], c=3e18)
spec_grsm16 = convolve(ytmp16[::nstep_plot], LSF, boundary='extend')
spec_grsm50 = convolve(ytmp50[::nstep_plot], LSF, boundary='extend')
spec_grsm84 = convolve(ytmp84[::nstep_plot], LSF, boundary='extend')
ax2t.plot(x1_tot[::nstep_plot], spec_grsm50, '-', lw=0.5, color='gray', zorder=3., alpha=1.0)
# Attach the data point in MB;
MB.sed_wave_obs = xbb
MB.sed_flux_obs = fybb * c / np.square(xbb) / d
MB.sed_eflux_obs = eybb * c / np.square(xbb) / d
# Attach the best SED to MB;
MB.sed_wave = x1_tot
MB.sed_flux16 = ytmp16
MB.sed_flux50 = ytmp50
MB.sed_flux84 = ytmp84
if f_fancyplot:
alp_fancy = 0.5
#ax1.plot(x1_tot[::nstep_plot], np.percentile(ytmp[:, ::nstep_plot], 50, axis=0), '-', lw=.5, color='gray', zorder=-1, alpha=1.)
ysumtmp = ytmp[0, ::nstep_plot] * 0
ysumtmp2 = ytmp[:, ::nstep_plot] * 0
ysumtmp2_prior = ytmp[0, ::nstep_plot] * 0
for ss in range(len(age)):
ii = int(len(nage) - ss - 1) # from old to young templates.
#ysumtmp += np.percentile(ytmp_each[:, ::nstep_plot, ii], 50, axis=0)
#ax1.plot(x1_tot[::nstep_plot], ysumtmp, linestyle='--', lw=.5, color=col[ii], alpha=0.5)
# !! Take median after summation;
ysumtmp2[:,:len(xm_tmp)] += ytmp_each[:, ::nstep_plot, ii]
if f_fill:
ax1.fill_between(x1_tot[::nstep_plot], ysumtmp2_prior, np.percentile(ysumtmp2[:,:], 50, axis=0), linestyle='None', lw=0., color=col[ii], alpha=alp_fancy, zorder=-3)
else:
ax1.plot(x1_tot[::nstep_plot], np.percentile(ysumtmp2[:, ::nstep_plot], 50, axis=0), linestyle='--', lw=.5, color=col[ii], alpha=alp_fancy, zorder=-3)
ysumtmp2_prior[:] = np.percentile(ysumtmp2[:, :], 50, axis=0)
elif f_fill:
print('f_fancyplot is False. f_fill is set to False.')
#########################
# Calculate non-det chi2
# based on Sawick12
#########################
def func_tmp(xint,eobs,fmodel):
int_tmp = np.exp(-0.5 * ((xint-fmodel)/eobs)**2)
return int_tmp
if f_chind:
conw = (wht3>0) & (ey>0) & (fy/ey>SNlim)
else:
conw = (wht3>0) & (ey>0) #& (fy/ey>SNlim)
#chi2 = sum((np.square(fy-ysump) * np.sqrt(wht3))[conw])
try:
logf = hdul[1].data['logf'][1]
ey_revised = np.sqrt(ey**2+ ysump**2 * np.exp(logf)**2)
except:
ey_revised = ey
chi2 = sum((np.square(fy-ysump) / ey_revised)[conw])
chi_nd = 0.0
if f_chind:
f_ex = np.zeros(len(fy), 'int')
if f_exclude:
for ii in range(len(fy)):
if x[ii] in x_ex:
f_ex[ii] = 1
con_up = (ey>0) & (fy/ey<=SNlim) & (f_ex == 0)
from scipy import special
#x_erf = (ey[con_up] - ysump[con_up]) / (np.sqrt(2) * ey[con_up])
#f_erf = special.erf(x_erf)
#chi_nd = np.sum( np.log(np.sqrt(np.pi / 2) * ey[con_up] * (1 + f_erf)) )
x_erf = (ey_revised[con_up] - ysump[con_up]) / (np.sqrt(2) * ey_revised[con_up])
f_erf = special.erf(x_erf)
chi_nd = np.sum( np.log(np.sqrt(np.pi / 2) * ey_revised[con_up] * (1 + f_erf)) )
# Number of degree;
con_nod = (wht3>0) & (ey>0) #& (fy/ey>SNlim)
nod = int(len(wht3[con_nod])-ndim_eff)
print('\n')
print('No-of-detection : %d'%(len(wht3[conw])))
print('chi2 : %.2f'%(chi2))
if f_chind:
print('No-of-non-detection: %d'%(len(ey[con_up])))
print('chi2 for non-det : %.2f'%(- 2 * chi_nd))
print('No-of-params : %d'%(ndim_eff))
print('Degrees-of-freedom : %d'%(nod))
if nod>0:
fin_chi2 = (chi2 - 2 * chi_nd) / nod
else:
fin_chi2 = -99
print('Final chi2/nu : %.2f'%(fin_chi2))
#
# plot BB model from best template (blue squares)
#
col_dia = 'blue'
if f_dust:
ALLFILT = np.append(SFILT,DFILT)
#for ii in range(len(x1_tot)):
# print(x1_tot[ii], model_tot[ii]*c/np.square(x1_tot[ii])/d)
lbb, fbb, lfwhm = filconv(ALLFILT, x1_tot, ytmp50, DIR_FILT, fw=True)
lbb, fbb16, lfwhm = filconv(ALLFILT, x1_tot, ytmp16, DIR_FILT, fw=True)
lbb, fbb84, lfwhm = filconv(ALLFILT, x1_tot, ytmp84, DIR_FILT, fw=True)
ax1.plot(x1_tot, ytmp50, '--', lw=0.5, color='purple', zorder=-1, label='')
ax3t.plot(x1_tot, ytmp50, '--', lw=0.5, color='purple', zorder=-1, label='')
iix = []
for ii in range(len(fbb)):
iix.append(ii)
con_sed = ()
ax1.scatter(lbb[iix][con_sed], fbb[iix][con_sed], lw=0.5, color='none', edgecolor=col_dia, zorder=3, alpha=1.0, marker='d', s=50)
# plot FIR range;
ax3t.scatter(lbb, fbb, lw=0.5, color='none', edgecolor=col_dia, \
zorder=2, alpha=1.0, marker='d', s=50)
else:
lbb, fbb, lfwhm = filconv(SFILT, x1_tot, ytmp50, DIR_FILT, fw=True, MB=MB, f_regist=False)
lbb, fbb16, lfwhm = filconv(SFILT, x1_tot, ytmp16, DIR_FILT, fw=True, MB=MB, f_regist=False)
lbb, fbb84, lfwhm = filconv(SFILT, x1_tot, ytmp84, DIR_FILT, fw=True, MB=MB, f_regist=False)
iix = []
for ii in range(len(fbb)):
iix.append(np.argmin(np.abs(lbb[ii]-xbb[:])))
con_sed = (eybb>0)
ax1.scatter(lbb[iix][con_sed], fbb[iix][con_sed], lw=0.5, color='none', edgecolor=col_dia, zorder=3, alpha=1.0, marker='d', s=50)
# Calculate EW, if there is excess band;
try:
iix2 = []
for ii in range(len(fy_ex)):
iix2.append(np.argmin(np.abs(lbb[:]-x_ex[ii])))
# Rest-frame EW;
# Note about 16/84 in fbb
EW16 = (fy_ex * c / np.square(x_ex) / d - fbb84[iix2]) / (fbb[iix2]) * lfwhm[iix2] / (1.+zbes)
EW50 = (fy_ex * c / np.square(x_ex) / d - fbb[iix2]) / (fbb[iix2]) * lfwhm[iix2] / (1.+zbes)
EW84 = (fy_ex * c / np.square(x_ex) / d - fbb16[iix2]) / (fbb[iix2]) * lfwhm[iix2] / (1.+zbes)
EW50_er1 = ((fy_ex-ey_ex) * c / np.square(x_ex) / d - fbb[iix2]) / (fbb[iix2]) * lfwhm[iix2] / (1.+zbes)
EW50_er2 = ((fy_ex+ey_ex) * c / np.square(x_ex) / d - fbb[iix2]) / (fbb[iix2]) * lfwhm[iix2] / (1.+zbes)
cnt50 = fbb[iix2]
cnt16 = fbb16[iix2]
cnt84 = fbb84[iix2]
# Luminosity;
#Lsun = 3.839 * 1e33 #erg s-1
L16 = EW16 * cnt16 * (4.*np.pi*DL**2) * scale * (1+zbes) # A * erg/s/A/cm2 * cm2
L50 = EW50 * cnt50 * (4.*np.pi*DL**2) * scale * (1+zbes) # A * erg/s/A/cm2 * cm2
L84 = EW84 * cnt84 * (4.*np.pi*DL**2) * scale * (1+zbes) # A * erg/s/A/cm2 * cm2
ew_label = []
for ii in range(len(fy_ex)):
lres = MB.band['%s_lam'%MB.filts[iix2[ii]]][:]
fres = MB.band['%s_res'%MB.filts[iix2[ii]]][:]
ew_label.append(MB.filts[iix2[ii]])
print('\n')
print('EW016 for', x_ex[ii], 'is %d'%EW16[ii])
print('EW050 for', x_ex[ii], 'is %d'%EW50[ii])
print('EW084 for', x_ex[ii], 'is %d'%EW84[ii])
print('%d_{-%d}^{+%d} , for sed error'%(EW50[ii],EW50[ii]-EW84[ii],EW16[ii]-EW50[ii]))
print('Or, %d\pm{%d} , for flux error'%(EW50[ii],EW50[ii]-EW50_er1[ii]))
except:
pass
if save_sed:
fbb16_nu = flamtonu(lbb, fbb16*scale, m0set=25.0)
fbb_nu = flamtonu(lbb, fbb*scale, m0set=25.0)
fbb84_nu = flamtonu(lbb, fbb84*scale, m0set=25.0)
# Then save full spectrum;
col00 = []
col1 = fits.Column(name='wave_model', format='E', unit='AA', array=x1_tot)
col00.append(col1)
col2 = fits.Column(name='f_model_16', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=ytmp16[:])
col00.append(col2)
col3 = fits.Column(name='f_model_50', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=ytmp50[:])
col00.append(col3)
col4 = fits.Column(name='f_model_84', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=ytmp84[:])
col00.append(col4)
# Each component
# Stellar
col1 = fits.Column(name='wave_model_stel', format='E', unit='AA', array=x0)
col00.append(col1)
for aa in range(len(age)):
col1 = fits.Column(name='f_model_stel_%d'%aa, format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=f_50_comp[aa,:])
col00.append(col1)
if f_dust:
col1 = fits.Column(name='wave_model_dust', format='E', unit='AA', array=x1_dust)
col00.append(col1)
col1 = fits.Column(name='f_model_dust', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=ytmp_dust50)
col00.append(col1)
# Grism;
if f_grsm:
col2 = fits.Column(name='f_model_conv_16', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=spec_grsm16)
col00.append(col2)
col3 = fits.Column(name='f_model_conv_50', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=spec_grsm50)
col00.append(col3)
col4 = fits.Column(name='f_model_conv_84', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=spec_grsm84)
col00.append(col4)
# BB for dust
if f_dust:
xbb = np.append(xbb,xbbd)
fybb = np.append(fybb,fybbd)
eybb = np.append(eybb,eybbd)
col5 = fits.Column(name='wave_obs', format='E', unit='AA', array=xbb)
col00.append(col5)
col6 = fits.Column(name='f_obs', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=fybb[:] * c /
|
np.square(xbb[:])
|
numpy.square
|
from copy import deepcopy
import numpy as np
"""
This module defines the ranges of hyper-params to search upon
"""
# Define the high level configuration structure
_all_config = {}
_all_config['AWA1'] = {}
_all_config['SUN'] = {}
_all_config['CUB'] = {}
_all_config['FLO'] = {}
def get(dataset_name, combiner, metric_name, all_config=_all_config):
assert(dataset_name in all_config)
dataset_cfgs = all_config[dataset_name]
if metric_name in dataset_cfgs:
cfg = dataset_cfgs[metric_name].get(combiner,
dataset_cfgs[metric_name]['default'])
else:
cfg = dataset_cfgs.get(combiner, dataset_cfgs['default'])
# sanity check
assert (len(cfg['anomaly_detector_threshold']) == 1)
return deepcopy(cfg)
def cast_to_lists(hyper_params):
""" Put in a list (with len=1), if given as individual value """
hyper_params_as_lists = {}
for k,v in hyper_params.items():
if not (isinstance(v, list) or isinstance(v, np.ndarray)) :
v = [v,]
hyper_params_as_lists[k] = list(v)
return hyper_params_as_lists
def _individual_cfg(hyper_params={}, anomaly_detector_threshold={}):
assert( len(anomaly_detector_threshold) == 1 )
hyper_params_as_lists = cast_to_lists(hyper_params)
threshold_as_list = cast_to_lists(anomaly_detector_threshold)
return dict(hyper_params=hyper_params_as_lists,
anomaly_detector_threshold=threshold_as_list)
# Here we define defaults configurations
CUB_default = _individual_cfg(hyper_params=
dict(
T_cond=[0.1, 0.3, 1, 3],
),
anomaly_detector_threshold=
dict(threshold=np.arange(-2.5, 2.5, 0.1))
)
_all_config['CUB']['default'] = deepcopy(CUB_default)
SUN_default = _individual_cfg(hyper_params=
dict(
T_cond=[0.1, 0.3, 1, 3, 10],
),
anomaly_detector_threshold=
dict(threshold=np.arange(-2.5, 20, 0.2))
)
_all_config['SUN']['default'] = deepcopy(SUN_default)
_all_config['SUN']['Confidence Based Gater: T = (3,)'] = {}
_all_config['SUN']['Confidence Based Gater: T = (3,)']['adaptive_smoothing'] = \
_individual_cfg(hyper_params=
dict(
T_cond=[0.1, 0.3, 1, 3, 10],
),
anomaly_detector_threshold=
dict(threshold=np.arange(0, 50, 0.2))
)
_all_config['SUN']['Confidence Based Gater: T = (3,)']['default'] = deepcopy(
SUN_default)
_all_config['SUN']['const_smoothing'] = deepcopy(SUN_default)
_all_config['SUN']['const_smoothing']['hyper_params']['T_cond'] = [0.1, 0.3, 1, 3, 10]
_all_config['SUN']['const_smoothing']['anomaly_detector_threshold']['threshold'] = \
np.arange(-2.5, 20, 0.2).tolist()
_all_config['SUN']['const_smoothing']['hyper_params']['gamma'] = \
list(np.arange(0.05, 1.001, 0.05))
AWA1_default = _individual_cfg(hyper_params=
dict(
T_cond=[0.003, 0.01, 0.03, 0.1,],
),
anomaly_detector_threshold=
dict(threshold=np.block([
np.arange(0., 0.7, 0.05),
|
np.arange(0.7, 1.2, 0.005)
|
numpy.arange
|
try:
import numpy as np
from scipy.interpolate import interp1d
import decida.Data
except ModuleNotFoundError:
print('Failed to import libraries for results parsing. Capabilities may be limited.') # noqa
class SpiceResult:
def __init__(self, t, v):
self.t = t
self.v = v
self.func = interp1d(t, v, bounds_error=False, fill_value=(v[0], v[-1]))
def __call__(self, t):
return self.func(t)
# temporary measure -- CSDF parsing is broken in
# DeCiDa so a simple parser is implemented here
class CSDFData:
def __init__(self):
self._names = {'time': 0}
self._data = None
self.data = {}
def names(self):
return list(self._names.keys())
def get(self, name):
return self._data[:, self._names[name]]
def read(self, file):
# read in flat data vector
mode = None
data = []
with open(file, 'r') as f:
for line in f:
# determine mode
line = line.strip().split()
if not line:
continue
elif line[0] == '#N':
mode = '#N'
line.pop(0)
elif line[0] == '#C':
mode = '#C'
line.pop(0)
data.append(float(line.pop(0)))
line.pop(0)
elif line[0] == '#;':
break
# parse depending on mode
if mode == '#N':
for tok in line:
tok = tok[1:-1]
self._names[tok] = len(self._names)
elif mode == '#C':
for tok in line:
data.append(float(tok))
# reshape into numpy array
nv = len(self._names)
ns = len(data) // nv
data =
|
np.array(data, dtype=float)
|
numpy.array
|
import os, logging
import pyqtgraph as pg
from pyqtgraph import QtGui, QtCore
import pyqtgraph_extensions as pgx
import numpy as np
logging.basicConfig(level=logging.DEBUG)
logging.getLogger(pgx.__name__).setLevel(level=logging.DEBUG)
def test_ColorBarItem_manual(qtbot):
##
glw = pg.GraphicsLayoutWidget()
plt = glw.addPlot(title='Testing colormaps', labels={'left': 'y', 'bottom': 'x'})
im = pgx.ImageItem()
im.setLookupTable(pgx.get_colormap_lut())
x = np.arange(100) - 50
y = np.arange(110)[:, None] - 55
z = 5e9*np.exp(-(x**2 + y**2)/100.0)
im.setImage(z + np.random.random(z.shape))
plt.addItem(im)
cb = pgx.ColorBarItem()
cb.setManual(lut=im.lut, levels=im.levels)
# cb.setLabel('intensity')
glw.addItem(cb)
glw.show()
##
assert np.allclose(cb.axis.range, im.levels)
qtbot.addWidget(glw)
def test_ColorBarItem_auto(qtbot):
##
glw = pg.GraphicsLayoutWidget()
plt = glw.addPlot(title='Testing colormaps', labels={'left': 'y', 'bottom': 'x'})
im = pgx.ImageItem()
im.setLookupTable(pgx.get_colormap_lut())
x = np.arange(100) - 50
y = np.arange(110)[:, None] - 55
z = 5e9*np.exp(-(x**2 + y**2)/100.0)
im.setImage(z +
|
np.random.random(z.shape)
|
numpy.random.random
|
from __future__ import division, absolute_import, print_function
try:
# Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import functools
import ctypes
import os
import gc
import weakref
import pytest
from contextlib import contextmanager
from numpy.core.numeric import pickle
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import strchar, unicode
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
temppath, suppress_warnings
)
from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# https://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""
Allocate a new ndarray with aligned memory.
The ndarray is guaranteed *not* aligned to twice the requested alignment.
Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
dtype.alignment."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array alignment not supported")
return np.zeros(shape, dtype=dtype, order=order)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + 2*align + 1, np.uint8)
ptr = buf.__array_interface__['data'][0]
offset = ptr % align
if offset != 0:
offset = align - offset
if (ptr % (2*align)) == 0:
offset += align
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class TestFlags(object):
def setup(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_writeable_from_readonly(self):
# gh-9440 - make sure fromstring, from buffer on readonly buffers
# set writeable False
data = b'\x00' * 100
vals = np.frombuffer(data, 'B')
assert_raises(ValueError, vals.setflags, write=True)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_raises(ValueError, vals.setflags, write=True)
def test_writeable_from_buffer(self):
data = bytearray(b'\x00' * 100)
vals = np.frombuffer(data, 'B')
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies")
def test_writeable_pickle(self):
import pickle
# Small arrays will be copied without setting base.
# See condition for using PyArray_SetBaseObject in
# array_setstate.
a = np.arange(1000)
for v in range(pickle.HIGHEST_PROTOCOL):
vals = pickle.loads(pickle.dumps(a, v))
assert_(vals.flags.writeable)
assert_(isinstance(vals.base, bytes))
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags['C'], True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags.updateifcopy, False)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags['U'], False)
assert_equal(self.a.flags['UPDATEIFCOPY'], False)
assert_equal(self.a.flags.writebackifcopy, False)
assert_equal(self.a.flags['X'], False)
assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(object):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(object):
def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core._multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except Exception as e:
raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(object):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(object):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
def test_unicode_assignment(self):
# gh-5049
from numpy.core.numeric import set_string_function
@contextmanager
def inject_str(s):
""" replace ndarray.__str__ temporarily """
set_string_function(lambda x: s, repr=False)
try:
yield
finally:
set_string_function(None, repr=False)
a1d = np.array([u'test'])
a0d = np.array(u'done')
with inject_str(u'bad'):
a1d[0] = a0d # previously this would invoke __str__
assert_equal(a1d[0], u'done')
# this would crash for the same reason
np.array([np.array(u'\xe5\xe4\xf6')])
def test_stringlike_empty_list(self):
# gh-8902
u = np.array([u'done'])
b = np.array([b'done'])
class bad_sequence(object):
def __getitem__(self): pass
def __len__(self): raise RuntimeError
assert_raises(ValueError, operator.setitem, u, 0, [])
assert_raises(ValueError, operator.setitem, b, 0, [])
assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
def test_longdouble_assignment(self):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
for dtype in (np.longdouble, np.longcomplex):
# gh-8902
tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
# construction
tiny1d = np.array([tinya])
assert_equal(tiny1d[0], tinya)
# scalar = scalar
tiny1d[0] = tinyb
assert_equal(tiny1d[0], tinyb)
# 0d = scalar
tiny1d[0, ...] = tinya
assert_equal(tiny1d[0], tinya)
# 0d = 0d
tiny1d[0, ...] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
# scalar = 0d
tiny1d[0] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
arr = np.array([np.array(tinya)])
assert_equal(arr[0], tinya)
def test_cast_to_string(self):
# cast to str should do "str(scalar)", not "str(scalar.item())"
# Example: In python2, str(float) is truncated, so we want to avoid
# str(np.float64(...).item()) as this would incorrectly truncate.
a = np.zeros(1, dtype='S20')
a[:] = np.array(['1.12345678901234567890'], dtype='f8')
assert_equal(a[0], b"1.1234567890123457")
class TestDtypedescr(object):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
assert_(np.dtype('<i4') != np.dtype('>i4'))
assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
def test_structured_non_void(self):
fields = [('a', '<i2'), ('b', '<i2')]
dt_int = np.dtype(('i4', fields))
assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])")
# gh-9821
arr_int = np.zeros(4, dt_int)
assert_equal(repr(arr_int),
"array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
class TestZeroRank(object):
def setup(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
assert_equal(a[...], 0)
assert_equal(b[...], 'x')
assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
assert_equal(a[()], 0)
assert_equal(b[()], 'x')
assert_(type(a[()]) is a.dtype.type)
assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[0], b)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
assert_equal(a, 42)
b[...] = ''
assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
assert_equal(a, 42)
b[()] = ''
assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
assert_raises(IndexError, assign, a, 0, 42)
assert_raises(IndexError, assign, b, 0, '')
assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
assert_equal(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
assert_equal(x[()], 6)
def test_output(self):
x = np.array(2)
assert_raises(ValueError, np.add, x, [1], x)
def test_real_imag(self):
# contiguity checks are for gh-11245
x = np.array(1j)
xr = x.real
xi = x.imag
assert_equal(xr, np.array(0))
assert_(type(xr) is np.ndarray)
assert_equal(xr.flags.contiguous, True)
assert_equal(xr.flags.f_contiguous, True)
assert_equal(xi, np.array(1))
assert_(type(xi) is np.ndarray)
assert_equal(xi.flags.contiguous, True)
assert_equal(xi.flags.f_contiguous, True)
class TestScalarIndexing(object):
def setup(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
assert_equal(a[...], 0)
assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
assert_equal(a[()], 0)
assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(object):
"""
Test the np.array constructor
"""
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@pytest.mark.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, object)
assert_equal(np.array([4, 2**80, 4]).dtype, object)
assert_equal(np.array([2**80, 4]).dtype, object)
assert_equal(np.array([2**80] * 3).dtype, object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
assert_equal(np.array([2**80, long(4)]).dtype, object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
assert_raises(ValueError, np.array, C()) # segfault?
def test_failed_len_sequence(self):
# gh-7393
class A(object):
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
def test_jagged_ndim_object(self):
# Lists of mismatching depths are treated as object arrays
a = np.array([[1], 2, 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([1, [2], 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([1, 2, [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
def test_jagged_shape_object(self):
# The jagged dimension of a list is turned into an object array
a = np.array([[1, 1], [2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([[1], [2, 2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([[1], [2], [3, 3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
class TestStructured(object):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can change byte order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
# check for no uninitialized memory due to viewing S0 array
assert_equal(xx[:].dtype, xx.dtype)
assert_array_equal(eval(repr(xx), dict(array=np.array)), xx)
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
def test_assignment(self):
def testassign(arr, v):
c = arr.copy()
c[0] = v # assign using setitem
c[1:] = v # assign using "dtype_transfer" code paths
return c
dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])
arr = np.ones(2, dt)
v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])
v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])
v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])
v4 = np.array([(2,)], dtype=[('bar', 'i8')])
v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])
w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})
ans = np.array([(2,3),(2,3)], dtype=dt)
assert_equal(testassign(arr, v1), ans)
assert_equal(testassign(arr, v2), ans)
assert_equal(testassign(arr, v3), ans)
assert_raises(ValueError, lambda: testassign(arr, v4))
assert_equal(testassign(arr, v5), ans)
w[:] = 4
assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))
# test field-reordering, assignment by position, and self-assignment
a = np.array([(1,2,3)],
dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])
a[['foo', 'bar']] = a[['bar', 'foo']]
assert_equal(a[0].item(), (2,1,3))
# test that this works even for 'simple_unaligned' structs
# (ie, that PyArray_EquivTypes cares about field order too)
a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
a[['a', 'b']] = a[['b', 'a']]
assert_equal(a[0].item(), (2,1))
def test_structuredscalar_indexing(self):
# test gh-7262
x = np.empty(shape=1, dtype="(2)3S,(2)3U")
assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]])
assert_equal(x[0], x[0][()])
def test_multiindex_titles(self):
a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
assert_raises(KeyError, lambda : a[['a','c']])
assert_raises(KeyError, lambda : a[['a','a']])
assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated
a[['b','c']] # no exception
class TestBool(object):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
assert_(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
assert_(a1 is b1)
assert_(np.array([True])[0] is a1)
assert_(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=bool)
c = builtins.sum(l)
assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
assert_equal(np.count_nonzero(a), c)
av *= 4
assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@pytest.mark.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=bool)[o+1:]
a[:o] = True
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=bool)[o+1:]
a[:o] = False
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
def _test_cast_from_flexible(self, dtype):
# empty string -> false
for n in range(3):
v = np.array(b'', (dtype, n))
assert_equal(bool(v), False)
assert_equal(bool(v[()]), False)
assert_equal(v.astype(bool), False)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.False_)
# anything else -> true
for n in range(1, 4):
for val in [b'a', b'0', b' ']:
v = np.array(val, (dtype, n))
assert_equal(bool(v), True)
assert_equal(bool(v[()]), True)
assert_equal(v.astype(bool), True)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.True_)
def test_cast_from_void(self):
self._test_cast_from_flexible(np.void)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_unicode(self):
self._test_cast_from_flexible(np.unicode_)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_bytes(self):
self._test_cast_from_flexible(np.bytes_)
class TestZeroSizeFlexible(object):
@staticmethod
def _zeros(shape, dtype=str):
dtype = np.dtype(dtype)
if dtype == np.void:
return np.zeros(shape, dtype=(dtype, 0))
# not constructable directly
dtype = np.dtype([('x', dtype, 0)])
return np.zeros(shape, dtype=dtype)['x']
def test_create(self):
zs = self._zeros(10, bytes)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, np.void)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, unicode)
assert_equal(zs.itemsize, 0)
def _test_sort_partition(self, name, kinds, **kwargs):
# Previously, these would all hang
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
sort_method = getattr(zs, name)
sort_func = getattr(np, name)
for kind in kinds:
sort_method(kind=kind, **kwargs)
sort_func(zs, kind=kind, **kwargs)
def test_sort(self):
self._test_sort_partition('sort', kinds='qhm')
def test_argsort(self):
self._test_sort_partition('argsort', kinds='qhm')
def test_partition(self):
self._test_sort_partition('partition', kinds=['introselect'], kth=2)
def test_argpartition(self):
self._test_sort_partition('argpartition', kinds=['introselect'], kth=2)
def test_resize(self):
# previously an error
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
zs.resize(25)
zs.resize((10, 10))
def test_view(self):
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
# viewing as itself should be allowed
assert_equal(zs.view(dt).dtype, np.dtype(dt))
# viewing as any non-empty type gives an empty result
assert_equal(zs.view((dt, 1)).shape, (0,))
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
p = pickle.dumps(zs, protocol=proto)
zs2 = pickle.loads(p)
assert_equal(zs.dtype, zs2.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_pickle_with_buffercallback(self):
array = np.arange(10)
buffers = []
bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
protocol=5)
array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
# when using pickle protocol 5 with buffer callbacks,
# array_from_buffer is reconstructed from a buffer holding a view
# to the initial array's data, so modifying an element in array
# should modify it in array_from_buffer too.
array[0] = -1
assert array_from_buffer[0] == -1, array_from_buffer[0]
class TestMethods(object):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
oned = np.ones(1)
# gh-12031, caused SEGFAULT
assert_raises(TypeError, oned.choose,np.void(0), [oned])
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, a.prod)
assert_raises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
assert_raises(ValueError, lambda: a.transpose(0))
assert_raises(ValueError, lambda: a.transpose(0, 0))
assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
# test generic class with bogus ordering,
# should not segfault.
class Boom(object):
def __lt__(self, other):
return True
a = np.array([Boom()]*100, dtype=object)
for kind in ['q', 'm', 'h']:
msg = "bogus comparison object sort, kind=%s" % kind
c.sort(kind=kind)
def test_void_sort(self):
# gh-8210 - previously segfaulted
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view('V4')
arr[::-1].sort()
dt = np.dtype([('val', 'i4', (1,))])
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view(dt)
arr[::-1].sort()
def test_sort_raises(self):
#gh-9404
arr = np.array([0, datetime.now(), 1], dtype=object)
for kind in ['q', 'm', 'h']:
assert_raises(TypeError, arr.sort, kind=kind)
#gh-3879
class Raiser(object):
def raises_anything(*args, **kwargs):
raise TypeError("SOMETHING ERRORED")
__eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
np.random.shuffle(arr)
for kind in ['q', 'm', 'h']:
assert_raises(TypeError, arr.sort, kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
assert_raises_regex(ValueError, 'duplicate',
lambda: r.sort(order=['id', 'id']))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'l')
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'l')
assert_array_equal(b, np.zeros(0, dtype=np.intp))
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'l', s[:0])
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'l', s)
assert_array_equal(b, np.zeros(0, dtype=np.intp))
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
d = np.arange(10)
assert_raises(TypeError, d.argpartition, 9.)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.argpartition, 9.)
def test_partition_integer(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(TypeError, d.partition, 9.)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
assert_equal(np.partition(d, 5, kind=k)[5], 5)
assert_equal(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
assert_equal(np.partition(d, 6, kind=k)[6], 6)
assert_equal(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
assert_equal(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(np.AxisError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(np.AxisError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(np.AxisError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(np.AxisError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = assert_
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
assert_equal(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
@pytest.mark.parametrize('func', (np.dot, np.matmul))
def test_arr_mult(self, func):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(func(eaf, eaf), eaf)
assert_equal(func(eaf.T, eaf), eaf)
assert_equal(func(eaf, eaf.T), eaf)
assert_equal(func(eaf.T, eaf.T), eaf)
assert_equal(func(eaf.T.copy(), eaf), eaf)
assert_equal(func(eaf, eaf.T.copy()), eaf)
assert_equal(func(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(func(ebf, ebf), eaf)
assert_equal(func(ebf.T, ebf), eaf)
assert_equal(func(ebf, ebf.T), eaf)
assert_equal(func(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
func(edf[::-1, :], edf.T),
func(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
func(edf[:, ::-1], edf.T),
func(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
func(edf, edf[::-1, :].T),
func(edf, edf[::-1, :].T.copy())
)
assert_equal(
func(edf, edf[:, ::-1].T),
func(edf, edf[:, ::-1].T.copy())
)
assert_equal(
func(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
func(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
func(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
func(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(func(edf, edf.T), eddtf)
assert_equal(func(edf.T, edf), edtdf)
@pytest.mark.parametrize('func', (np.dot, np.matmul))
@pytest.mark.parametrize('dtype', 'ifdFD')
def test_no_dgemv(self, func, dtype):
# check vector arg for contiguous before gemv
# gh-12156
a = np.arange(8.0, dtype=dtype).reshape(2, 4)
b = np.broadcast_to(1., (4, 1))
ret1 = func(a, b)
ret2 = func(a, b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T)
assert_equal(ret1, ret2)
# check for unaligned data
dt = np.dtype(dtype)
a = np.zeros(8 * dt.itemsize // 2 + 1, dtype='int16')[1:].view(dtype)
a = a.reshape(2, 4)
b = a[0]
# make sure it is not aligned
assert_(a.__array_interface__['data'][0] % dt.itemsize != 0)
ret1 = func(a, b)
ret2 = func(a.copy(), b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T.copy())
assert_equal(ret1, ret2)
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_dot_out_mem_overlap(self):
np.random.seed(1)
# Test BLAS and non-BLAS code paths, including all dtypes
# that dot() supports
dtypes = [np.dtype(code) for code in np.typecodes['All']
if code not in 'USVM']
for dtype in dtypes:
a = np.random.rand(3, 3).astype(dtype)
# Valid dot() output arrays must be aligned
b = _aligned_zeros((3, 3), dtype=dtype)
b[...] = np.random.rand(3, 3)
y = np.dot(a, b)
x = np.dot(a, b, out=b)
assert_equal(x, y, err_msg=repr(dtype))
# Check invalid output array
assert_raises(ValueError, np.dot, a, b, out=b[::2])
assert_raises(ValueError, np.dot, a, b, out=b.T)
def test_dot_matmul_out(self):
# gh-9641
class Sub(np.ndarray):
pass
a = np.ones((2, 2)).view(Sub)
b = np.ones((2, 2)).view(Sub)
out = np.ones((2, 2))
# make sure out can be any ndarray (not only subclass of inputs)
np.dot(a, b, out=out)
np.matmul(a, b, out=out)
def test_dot_matmul_inner_array_casting_fails(self):
class A(object):
def __array__(self, *args, **kwargs):
raise NotImplementedError
# Don't override the error from calling __array__()
assert_raises(NotImplementedError, np.dot, A(), A())
assert_raises(NotImplementedError, np.matmul, A(), A())
assert_raises(NotImplementedError, np.inner, A(), A())
def test_matmul_out(self):
# overlapping memory
a = np.arange(18).reshape(2, 3, 3)
b = np.matmul(a, a)
c = np.matmul(a, a, out=a)
assert_(c is a)
assert_equal(c, b)
a = np.arange(18).reshape(2, 3, 3)
c = np.matmul(a, a, out=a[::-1, ...])
assert_(c.base is a.base)
assert_equal(c, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
assert_raises(np.AxisError, a.diagonal, axis1=0, axis2=5)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=0)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=5)
assert_raises(ValueError, a.diagonal, axis1=1, axis2=1)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_size_zero_memleak(self):
# Regression test for issue 9615
# Exercises a special-case code path for dot products of length
# zero in cblasfuncs (making it is specific to floating dtypes).
a = np.array([], dtype=np.float64)
x = np.array(2.0)
for _ in range(100):
np.dot(a, a, out=x)
if HAS_REFCOUNT:
assert_(sys.getrefcount(x) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert_(isinstance(t, MyArray))
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(np.AxisError, a.swapaxes, -5, 0)
assert_raises(np.AxisError, a.swapaxes, 4, 0)
assert_raises(np.AxisError, a.swapaxes, 0, -5)
assert_raises(np.AxisError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestCequenceMethods(object):
def test_array_contains(self):
assert_(4.0 in np.arange(16.).reshape(4,4))
assert_(20.0 not in np.arange(16.).reshape(4,4))
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
# ndarray.__rop__ always calls ufunc
# ndarray.__iop__ always calls ufunc
# ndarray.__op__, __rop__:
# - defer if other has __array_ufunc__ and it is None
# or other is not a subclass and has higher array priority
# - else, call ufunc
def test_ufunc_binop_interaction(self):
# Python method name (without underscores)
# -> (numpy ufunc, has_in_place_version, preferred_dtype)
ops = {
'add': (np.add, True, float),
'sub': (np.subtract, True, float),
'mul': (np.multiply, True, float),
'truediv': (np.true_divide, True, float),
'floordiv': (np.floor_divide, True, float),
'mod': (np.remainder, True, float),
'divmod': (np.divmod, False, float),
'pow': (np.power, True, int),
'lshift': (np.left_shift, True, int),
'rshift': (np.right_shift, True, int),
'and': (np.bitwise_and, True, int),
'xor': (np.bitwise_xor, True, int),
'or': (np.bitwise_or, True, int),
# 'ge': (np.less_equal, False),
# 'gt': (np.less, False),
# 'le': (np.greater_equal, False),
# 'lt': (np.greater, False),
# 'eq': (np.equal, False),
# 'ne': (np.not_equal, False),
}
if sys.version_info >= (3, 5):
ops['matmul'] = (np.matmul, False, float)
class Coerced(Exception):
pass
def array_impl(self):
raise Coerced
def op_impl(self, other):
return "forward"
def rop_impl(self, other):
return "reverse"
def iop_impl(self, other):
return "in-place"
def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
return ("__array_ufunc__", ufunc, method, args, kwargs)
# Create an object with the given base, in the given module, with a
# bunch of placeholder __op__ methods, and optionally a
# __array_ufunc__ and __array_priority__.
def make_obj(base, array_priority=False, array_ufunc=False,
alleged_module="__main__"):
class_namespace = {"__array__": array_impl}
if array_priority is not False:
class_namespace["__array_priority__"] = array_priority
for op in ops:
class_namespace["__{0}__".format(op)] = op_impl
class_namespace["__r{0}__".format(op)] = rop_impl
class_namespace["__i{0}__".format(op)] = iop_impl
if array_ufunc is not False:
class_namespace["__array_ufunc__"] = array_ufunc
eval_namespace = {"base": base,
"class_namespace": class_namespace,
"__name__": alleged_module,
}
MyType = eval("type('MyType', (base,), class_namespace)",
eval_namespace)
if issubclass(MyType, np.ndarray):
# Use this range to avoid special case weirdnesses around
# divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
return np.arange(3, 7).reshape(2, 2).view(MyType)
else:
return MyType()
def check(obj, binop_override_expected, ufunc_override_expected,
inplace_override_expected, check_scalar=True):
for op, (ufunc, has_inplace, dtype) in ops.items():
err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
% (op, ufunc, has_inplace, dtype))
check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
if check_scalar:
check_objs.append(check_objs[0][0])
for arr in check_objs:
arr_method = getattr(arr, "__{0}__".format(op))
def first_out_arg(result):
if op == "divmod":
assert_(isinstance(result, tuple))
return result[0]
else:
return result
# arr __op__ obj
if binop_override_expected:
assert_equal(arr_method(obj), NotImplemented, err_msg)
elif ufunc_override_expected:
assert_equal(arr_method(obj)[0], "__array_ufunc__",
err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_method(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_method, obj, err_msg=err_msg)
# obj __op__ arr
arr_rmethod = getattr(arr, "__r{0}__".format(op))
if ufunc_override_expected:
res = arr_rmethod(obj)
assert_equal(res[0], "__array_ufunc__",
err_msg=err_msg)
assert_equal(res[1], ufunc, err_msg=err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_rmethod(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
# __array_ufunc__ = "asdf" creates a TypeError
assert_raises((TypeError, Coerced),
arr_rmethod, obj, err_msg=err_msg)
# arr __iop__ obj
# array scalars don't have in-place operators
if has_inplace and isinstance(arr, np.ndarray):
arr_imethod = getattr(arr, "__i{0}__".format(op))
if inplace_override_expected:
assert_equal(arr_method(obj), NotImplemented,
err_msg=err_msg)
elif ufunc_override_expected:
res = arr_imethod(obj)
assert_equal(res[0], "__array_ufunc__", err_msg)
assert_equal(res[1], ufunc, err_msg)
assert_(type(res[-1]["out"]) is tuple, err_msg)
assert_(res[-1]["out"][0] is arr, err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
assert_(arr_imethod(obj) is arr, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_imethod, obj,
err_msg=err_msg)
op_fn = getattr(operator, op, None)
if op_fn is None:
op_fn = getattr(operator, op + "_", None)
if op_fn is None:
op_fn = getattr(builtins, op)
assert_equal(op_fn(obj, arr), "forward", err_msg)
if not isinstance(obj, np.ndarray):
if binop_override_expected:
assert_equal(op_fn(arr, obj), "reverse", err_msg)
elif ufunc_override_expected:
assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
err_msg)
if ufunc_override_expected:
assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
err_msg)
# No array priority, no array_ufunc -> nothing called
check(make_obj(object), False, False, False)
# Negative array priority, no array_ufunc -> nothing called
# (has to be very negative, because scalar priority is -1000000.0)
check(make_obj(object, array_priority=-2**30), False, False, False)
# Positive array priority, no array_ufunc -> binops and iops only
check(make_obj(object, array_priority=1), True, False, True)
# ndarray ignores array_priority for ndarray subclasses
check(make_obj(np.ndarray, array_priority=1), False, False, False,
check_scalar=False)
# Positive array_priority and array_ufunc -> array_ufunc only
check(make_obj(object, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
check(make_obj(np.ndarray, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
# array_ufunc set to None -> defer binops only
check(make_obj(object, array_ufunc=None), True, False, False)
check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
check_scalar=False)
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_array_ufunc_index(self):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
# This also checks implicitly that 'out' is always a tuple.
class CheckIndex(object):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
for i, a in enumerate(inputs):
if a is self:
return i
# calls below mean we must be in an output.
for j, a in enumerate(kw['out']):
if a is self:
return (j,)
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), (0,))
assert_equal(np.sin(dummy, out=a), (0,))
assert_equal(np.sin(dummy, out=(a,)), (0,))
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), (0,))
assert_equal(np.modf(dummy, None, a), (1,))
assert_equal(np.modf(dummy, dummy, a), (1,))
assert_equal(np.modf(dummy, out=(a, None)), (0,))
assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
assert_equal(np.modf(dummy, out=(None, a)), (1,))
assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
assert_equal(np.modf(a, out=(dummy, a)), 0)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(np.modf(dummy, out=a), (0,))
assert_(w[0].category is DeprecationWarning)
assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), (0,))
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), (0,))
assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# regression test for github bug 4753
class OutClass(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][0][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
def test_pow_override_with_errors(self):
# regression test for gh-9112
class PowerOnly(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if ufunc is not np.power:
raise NotImplementedError
return "POWER!"
# explicit cast to float, to ensure the fast power path is taken.
a = np.array(5., dtype=np.float64).view(PowerOnly)
assert_equal(a ** 2.5, "POWER!")
with assert_raises(NotImplementedError):
a ** 0.5
with assert_raises(NotImplementedError):
a ** 0
with assert_raises(NotImplementedError):
a ** 1
with assert_raises(NotImplementedError):
a ** -1
with assert_raises(NotImplementedError):
a ** 2
def test_pow_array_object_dtype(self):
# test pow on arrays of object dtype
class SomeClass(object):
def __init__(self, num=None):
self.num = num
# want to ensure a fast pow path is not taken
def __mul__(self, other):
raise AssertionError('__mul__ should not be called')
def __div__(self, other):
raise AssertionError('__div__ should not be called')
def __pow__(self, exp):
return SomeClass(num=self.num ** exp)
def __eq__(self, other):
if isinstance(other, SomeClass):
return self.num == other.num
__rpow__ = __pow__
def pow_for(exp, arr):
return np.array([x ** exp for x in arr])
obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
def test_pos_array_ufunc_override(self):
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*[i.view(np.ndarray) for
i in inputs], **kwargs)
tst = np.array('foo').view(A)
with assert_raises(TypeError):
+tst
class TestTemporaryElide(object):
# elision is only triggered on relatively large arrays
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide
d = np.ones(100000)
orig, res = incref_elide(d)
d + d
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwritten
l = [1, 1, 1, 1, np.ones(100000)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(100000))
assert_array_equal(res, l[4] + l[4])
def test_temporary_with_cast(self):
# check that we don't elide into a temporary which would need casting
d = np.ones(200000, dtype=np.int64)
assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
r = ((d + d) / 2)
assert_equal(r.dtype, np.dtype('f8'))
r = np.true_divide((d + d), 2)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) / 2.)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) // 2)
assert_equal(r.dtype, np.dtype(np.int64))
# commutative elision into the astype result
f = np.ones(100000, dtype=np.float32)
assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
# no elision into lower type
d = f.astype(np.float64)
assert_equal(((f + f) + d).dtype, d.dtype)
l = np.ones(100000, dtype=np.longdouble)
assert_equal(((d + d) + l).dtype, l.dtype)
# test unary abs with different output dtype
for dt in (np.complex64, np.complex128, np.clongdouble):
c = np.ones(100000, dtype=dt)
r = abs(c * 2.0)
assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
def test_elide_broadcast(self):
# test no elision on broadcast to higher dimension
# only triggers elision code path in debug mode as triggering it in
# normal mode needs 256kb large matching dimension, so a lot of memory
d = np.ones((2000, 1), dtype=int)
b = np.ones((2000), dtype=bool)
r = (1 - d) + b
assert_equal(r, 1)
assert_equal(r.shape, (2000, 2000))
def test_elide_scalar(self):
# check inplace op does not create ndarray from scalars
a = np.bool_()
assert_(type(~(a & a)) is np.bool_)
def test_elide_scalar_readonly(self):
# The imaginary part of a real array is readonly. This needs to go
# through fast_scalar_power which is only called for powers of
# +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
# elision which can be gotten for the imaginary part of a real
# array. Should not error.
a = np.empty(100000, dtype=np.float64)
a.imag ** 2
def test_elide_readonly(self):
# don't try to elide readonly temporaries
r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
assert_equal(r, 0)
def test_elide_updateifcopy(self):
a = np.ones(2**20)[::2]
b = a.flat.__array__() + 1
del b
assert_equal(a, 1)
class TestCAPI(object):
def test_IsPythonScalar(self):
from numpy.core._multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(object):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
assert_(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
assert_(isinstance(x[0], int))
assert_(type(x[0, ...]) is np.ndarray)
class TestPickling(object):
def test_highest_available_pickle_protocol(self):
try:
import pickle5
except ImportError:
pickle5 = None
if sys.version_info[:2] >= (3, 8) or pickle5 is not None:
assert pickle.HIGHEST_PROTOCOL >= 5
else:
assert pickle.HIGHEST_PROTOCOL < 5
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
reason=('this tests the error messages when trying to'
'protocol 5 although it is not available'))
def test_correct_protocol5_error_message(self):
array = np.arange(10)
if sys.version_info[:2] in ((3, 6), (3, 7)):
# For the specific case of python3.6 and 3.7, raise a clear import
# error about the pickle5 backport when trying to use protocol=5
# without the pickle5 package
with pytest.raises(ImportError):
array.__reduce_ex__(5)
elif sys.version_info[:2] < (3, 6):
# when calling __reduce_ex__ explicitly with protocol=5 on python
# raise a ValueError saying that protocol 5 is not available for
# this python version
with pytest.raises(ValueError):
array.__reduce_ex__(5)
def test_record_array_with_object_dtype(self):
my_object = object()
arr_with_object = np.array(
[(my_object, 1, 2.0)],
dtype=[('a', object), ('b', int), ('c', float)])
arr_without_object = np.array(
[('xxx', 1, 2.0)],
dtype=[('a', str), ('b', int), ('c', float)])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_arr_with_object = pickle.loads(
pickle.dumps(arr_with_object, protocol=proto))
depickled_arr_without_object = pickle.loads(
pickle.dumps(arr_without_object, protocol=proto))
assert_equal(arr_with_object.dtype,
depickled_arr_with_object.dtype)
assert_equal(arr_without_object.dtype,
depickled_arr_without_object.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_f_contiguous_array(self):
f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')
buffers = []
# When using pickle protocol 5, Fortran-contiguous arrays can be
# serialized using out-of-band buffers
bytes_string = pickle.dumps(f_contiguous_array, protocol=5,
buffer_callback=buffers.append)
assert len(buffers) > 0
depickled_f_contiguous_array = pickle.loads(bytes_string,
buffers=buffers)
assert_equal(f_contiguous_array, depickled_f_contiguous_array)
def test_non_contiguous_array(self):
non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]
assert not non_contiguous_array.flags.c_contiguous
assert not non_contiguous_array.flags.f_contiguous
# make sure non-contiguous arrays can be pickled-depickled
# using any protocol
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_non_contiguous_array = pickle.loads(
pickle.dumps(non_contiguous_array, protocol=proto))
assert_equal(non_contiguous_array, depickled_non_contiguous_array)
def test_roundtrip(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
refs = [weakref.ref(a) for a in DATA]
for a in DATA:
assert_equal(
a, pickle.loads(pickle.dumps(a, protocol=proto)),
err_msg="%r" % a)
del a, DATA, carray
gc.collect()
# check for reference leaks (gh-12793)
for ref in refs:
assert ref() is None
def _loads(self, obj):
if sys.version_info[0] >= 3:
return pickle.loads(obj, encoding='latin1')
else:
return pickle.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version0_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version0_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version1_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version1_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
def test_subarray_int_shape(self):
s = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(s)
assert_equal(a, p)
class TestFancyIndexing(object):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(object):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([u"This", u"is", u"example"])
g2 = np.array([u"This", u"was", u"example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
max_val = np.max(arr)
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmax(), 0)
a[3] = 10
assert_equal(a.argmax(), 3)
a[1] = 30
assert_equal(a.argmax(), 1)
class TestArgmin(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
min_val = np.min(arr)
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2, 3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmin(), 0)
a[3] = 30
assert_equal(a.argmin(), 3)
a[1] = 10
assert_equal(a.argmin(), 1)
class TestMinMax(object):
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
assert_raises(np.AxisError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(object):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(object):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress(object):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], T(val))
assert_equal(x.dtype, T)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T), T, mask, val)
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T))
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(object):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setup(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def teardown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(IOError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(IOError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.frombuffer(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.frombuffer(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unseekable_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
raise IOError('Can not tell or seek')
with io.open(self.filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
assert_raises(IOError, np.fromfile, f, dtype=self.dtype)
def test_io_open_unbuffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
with io.open(self.filename, 'rb', buffering=0) as f:
y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_largish_file(self):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(self.filename)
assert_equal(os.path.getsize(self.filename), d.nbytes)
assert_array_equal(d, np.fromfile(self.filename))
# check offset
with open(self.filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
# check append mode (gh-8329)
open(self.filename, "w").close() # delete file contents
with open(self.filename, "ab") as f:
d.tofile(f)
assert_array_equal(d, np.fromfile(self.filename))
with open(self.filename, "ab") as f:
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
def test_io_open_buffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
with io.open(self.filename, 'rb', buffering=-1) as f:
y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_load_object_array_fromfile(self):
# gh-12300
with open(self.filename, 'w') as f:
# Ensure we have a file with consistent contents
pass
with open(self.filename, 'rb') as f:
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, f, dtype=object)
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, self.filename, dtype=object)
def _check_from(self, s, value, **kw):
if 'sep' not in kw:
y = np.frombuffer(s, **kw)
else:
y = np.fromstring(s, **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(s)
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
b"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@pytest.mark.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = b'1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(s)
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
with CommaDecimalPointLocale():
self.test_numbers()
self.test_nan()
self.test_inf()
self.test_counted_string()
self.test_ascii()
self.test_malformed()
self.test_tofile_sep()
self.test_tofile_format()
class TestFromBuffer(object):
@pytest.mark.parametrize('byteorder', ['<', '>'])
@pytest.mark.parametrize('dtype', [float, int, complex])
def test_basic(self, byteorder, dtype):
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7)) * 5).astype(dt)
buf = x.tobytes()
assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
def test_empty(self):
assert_array_equal(np.frombuffer(b''), np.array([]))
class TestFlat(object):
def setup(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
# for 1.14 all are set to non-writeable on the way to replacing the
# UPDATEIFCOPY array returned for non-contiguous arrays.
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is False)
with assert_warns(DeprecationWarning):
assert_(c.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
assert_(d.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
assert_(e.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
# UPDATEIFCOPY is removed.
assert_(f.flags.updateifcopy is False)
assert_(c.flags.writebackifcopy is False)
assert_(d.flags.writebackifcopy is False)
assert_(e.flags.writebackifcopy is False)
assert_(f.flags.writebackifcopy is False)
class TestResize(object):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
x.resize((5, 5), refcheck=False)
else:
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, refcheck=False)
else:
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_0d_shape(self):
# to it multiple times to test it does not break alloc cache gh-9216
for i in range(10):
x = np.empty((1,))
x.resize(())
assert_equal(x.shape, ())
assert_equal(x.size, 1)
x = np.empty(())
x.resize((1,))
assert_equal(x.shape, (1,))
assert_equal(x.size, 1)
def test_invalid_arguments(self):
assert_raises(TypeError, np.eye(3).resize, 'hi')
assert_raises(ValueError, np.eye(3).resize, -1)
assert_raises(TypeError, np.eye(3).resize, order=1)
assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1, refcheck=False)
else:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
x.resize(2, 3, 3, refcheck=False)
else:
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
if IS_PYPY:
a.resize(15, refcheck=False)
else:
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
def test_empty_view(self):
# check that sizes containing a zero don't trigger a reallocate for
# already empty arrays
x = np.zeros((10, 0), int)
x_view = x[...]
x_view.resize((0, 10))
x_view.resize((0, 100))
def test_check_weakref(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
xref = weakref.ref(x)
assert_raises(ValueError, x.resize, (5, 1))
del xref # avoid pyflakes unused variable warning.
class TestRecord(object):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_dtype_init():
np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_dtype_init)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(b'a', int)])
assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
dt = np.dtype([((b'a', 'b'), int)])
assert_raises(TypeError, dt.__getitem__, b'a')
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, b'a')
y = x[0]
assert_raises(IndexError, y.__getitem__, b'a')
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
def test_multiple_field_name_unicode(self):
def test_dtype_unicode():
np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_dtype_unicode)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = u'b'
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_unicode_field_names(self):
# Unicode field names are converted to ascii on Python 2:
encodable_name = u'b'
assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b')
assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b')
# But raises UnicodeEncodeError if it can't be encoded:
nonencodable_name = u'\uc3bc'
assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)])
assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)])
def test_fromarrays_unicode(self):
# A single name string provided to fromarrays() is allowed to be unicode
# on both Python 2 and 3:
x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4')
assert_equal(x['a'][0], 0)
assert_equal(x['b'][0], 1)
def test_unicode_order(self):
# Test that we can sort with order as a unicode field name in both Python 2 and
# 3:
name = u'b'
x = np.array([1, 3, 2], dtype=[(name, int)])
x.sort(order=name)
assert_equal(x[u'b'], np.array([1, 2, 3]))
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, b'f1', 1)
assert_raises(IndexError, a.__getitem__, b'f1')
assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
pytest.skip('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
assert_(hash(a[0]) == hash(a[1]))
assert_(hash(a[0]) == hash(b[0]))
assert_(hash(a[0]) != hash(b[1]))
assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
assert_raises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
def test_multifield_indexing_view(self):
a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')])
v = a[['a', 'c']]
assert_(v.base is a)
assert_(v.dtype == np.dtype({'names': ['a', 'c'],
'formats': ['i4', 'u4'],
'offsets': [0, 8]}))
v[:] = (4,5)
assert_equal(a[0].item(), (4, 1, 5))
class TestView(object):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(object):
funcs = [_mean, _var, _std]
def setup(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_python_type(self):
for x in (np.float16(1.), 1, 1., 1+0j):
assert_equal(np.mean([x]), 1.)
assert_equal(np.std([x]), 0.)
assert_equal(np.var([x]), 0.)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
# for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_mean_float16(self):
# This fail if the sum inside mean is done in float16 instead
# of float32.
assert_(_mean(np.ones(100000, dtype='float16')) == 1)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(object):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(object):
def setup(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon(object):
"""Common tests for '@' operator and numpy.matmul.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_scalar_output(self):
vec1 = np.array([2])
vec2 = np.array([3, 4]).reshape(1, -1)
tgt = np.array([6, 8])
for dt in self.types[1:]:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
res = self.matmul(v1, v2)
assert_equal(res, tgt)
res = self.matmul(v2.T, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?').reshape(1, -1)
res = self.matmul(vec[:, 0], vec)
assert_equal(res, True)
def test_vector_vector_values(self):
vec1 = np.array([1, 2])
vec2 = np.array([3, 4]).reshape(-1, 1)
tgt1 = np.array([11])
tgt2 = np.array([[3, 6], [4, 8]])
for dt in self.types[1:]:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
res = self.matmul(v1, v2)
assert_equal(res, tgt1)
# no broadcast, we must make v1 into a 2d ndarray
res = self.matmul(v2, v1.reshape(1, -1))
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
|
assert_equal(res, tgt2)
|
numpy.testing.assert_equal
|
""" Utilities for converting COLIBRI tracks into TRILEGAL tracks """
import glob
import logging
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
from ast import literal_eval
from matplotlib import cm, rcParams
from matplotlib.ticker import NullFormatter, MultipleLocator, ScalarFormatter, MaxNLocator
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
rcParams['text.usetex'] = True
rcParams['text.latex.unicode'] = False
rcParams['axes.linewidth'] = 1
rcParams['ytick.labelsize'] = 'large'
rcParams['xtick.labelsize'] = 'large'
rcParams['axes.edgecolor'] = 'black'
nullfmt = NullFormatter() # no labels
def colibri2trilegal(input_file):
'''
This script formats Paola's tracks and creates the files needed to use them
with TRILEGAL as well as the option to make diagnostic plots.
infile is an input_file object. See class InputFile
WHAT'S GOING ON:
Here is what it takes to go from COLIBRI agb tracks to TRILEGAL:
1. COLIBRI tracks
2. COLIBRI tracks formatted for TRILEGAL
3. TRILEGAL files that link to COLIBRI re-formatted tracks
1. COLIBRI tracks must have naming scheme
[mix]/[set]/[Trash]_[metallicity]_[Y]/[track_identifier]
ex: agb_caf09_z0.008/S1/agb_*Z*.dat
ex: CAF09/S_SCS/*Z*/agb_*Z*.dat
These can live anywhere, and do not need to be included in TRILEGAL
directories.
2. COLIBRI tracks are formatted for TRILEGAL with no header file.
They only include the quiescent phases and a column with dL/dT
(See AGBTracks)
3. TRILEGAL needs two files to link to COLIBRI re-formatted tracks
a. track file goes here: isotrack/tracce_[mix]_[set].dat
b. cmd_input_file that links to the track file
goes here trilegal_1.3/cmd_input_[mix]_[set].dat
'''
infile = InputFile(input_file, default_dict=agb_input_defaults())
# set up file names and directories, cd to COLIBRI tracks.
agb_setup(infile)
# list of isofiles, zs, and ys to send to tracce file.
isofiles, Zs, Ys = [], [], []
imfr_data = np.array([])
lifetime_data = np.array([])
for metal_dir in infile.metal_dirs:
metallicity, Y = metallicity_from_dir(metal_dir)
logger.info('Z = {}'.format(metallicity))
if infile.diag_plots is True:
if infile.diagnostic_dir0 is not None:
diagnostic_dir = os.path.join(infile.diagnostic_dir0, infile.agb_mix,
infile.set_name, metal_dir) + '/'
ensure_dir(diagnostic_dir)
# update infile class to place plots in this directory
infile.diagnostic_dir = diagnostic_dir
else:
logger.error('Must specifiy diagnostic_dir0 in infile for diag plots')
sys.exit(2)
agb_tracks = get_files(os.path.join(infile.agbtrack_dir, infile.agb_mix,
infile.set_name, metal_dir),
infile.track_identifier)
agb_tracks.sort()
assert len(agb_tracks) > 0, 'No agb tracks found!'
iso_name_conv = '_'.join(('Z%.4f' % metallicity, infile.name_conv))
isofile = os.path.join(infile.home, infile.isotrack_dir, iso_name_conv)
if infile.over_write is False and os.path.isfile(isofile):
logger.warning('not over writing {}'.format(isofile))
out = None
else:
out = open(isofile, 'w')
isofile_rel_name = os.path.join('isotrack', infile.isotrack_dir,
iso_name_conv)
logger.info('found {} tracks'.format(len(agb_tracks)))
for i, agb_track in enumerate(agb_tracks):
# load track
track = get_numeric_data(agb_track)
if track == -1:
continue
if track.bad_track is True:
continue
assert metallicity == track.metallicity, \
'directory and track metallicity do not match'
# make iso file for trilegal
if out is not None:
if i == 0:
make_iso_file(track, out, write_header=True)
else:
make_iso_file(track, out)
# save information for lifetime file.
lifetime_datum = np.array([metallicity, track.mass, track.tauc,
track.taum])
lifetime_data = np.append(lifetime_data, lifetime_datum)
# make diagnostic plots
if infile.diag_plots is True and infile.diagnostic_dir0 is not None:
assert metallicity_from_dir(infile.diagnostic_dir)[0] == \
track.metallicity, 'diag dir met wrong!'
diag_plots(track, infile)
# save information for imfr
if infile.make_imfr is True:
M_s = track.data_array['M_star']
imfr_datum = np.array([M_s[0], M_s[-1], float(metallicity)])
imfr_data = np.append(imfr_data, imfr_datum)
if out is not None:
out.close()
logger.info('wrote {}'.format(isofile))
# keep information for tracce file
isofiles.append(isofile_rel_name)
Ys.append(Y)
Zs.append(metallicity)
#bigplots(agb_tracks, infile)
# make file to link cmd_input to formatted agb tracks
make_met_file(infile.tracce_file, Zs, Ys, isofiles)
# make cmd_input file
cmd_in_kw = {'cmd_input_file': infile.cmd_input_file,
'file_tpagb': infile.tracce_file_rel,
'mass_loss': infile.mass_loss,
'file_isotrack': infile.file_isotrack}
write_cmd_input_file(**cmd_in_kw)
if infile.make_imfr is True and infile.diagnostic_dir0 is not None:
ifmr_file = os.path.join(infile.diagnostic_dir0, infile.agb_mix,
infile.set_name,
'_'.join(('ifmr', infile.name_conv)))
ncols = 3
nrows = imfr_data.size/ncols
savetxt(ifmr_file, imfr_data.reshape(nrows, ncols),
header='# M_i M_f Z \n')
plot_ifmr(ifmr_file)
if infile.diagnostic_dir0 is not None and infile.diag_plots is True:
lifetime_file = os.path.join(infile.diagnostic_dir0, infile.agb_mix,
infile.set_name,
'_'.join(('tau_cm', infile.name_conv)))
ncols = 4
nrows = lifetime_data.size / ncols
savetxt(lifetime_file, lifetime_data.reshape(nrows, ncols),
header='# z mass tauc taum\n')
plot_cluster_test(lifetime_file)
os.chdir(infile.home)
return infile.cmd_input_file
def agb_setup(infile):
'''set up files and directories for TPAGB parsing.'''
infile.home = os.getcwd()
# Check for Paola's formatted tracks
ensure_dir(infile.isotrack_dir)
# are we making diagnostic plots, check directory.
if infile.diagnostic_dir0:
ensure_dir(os.path.join(infile.diagnostic_dir0, infile.agb_mix,
infile.set_name + '/'))
else:
logger.info('not making diagnostic plots')
# set name convention: [mix]_[set].dat
infile.name_conv = '{}_{}.dat'.format(infile.agb_mix, infile.set_name)
# set track search string
infile.track_identifier = 'agb_*Z*.dat'
# cmd_input_file that links to the track file
cmd_input = '_'.join(('cmd', 'input', infile.name_conv))
ensure_dir(os.path.join(infile.home, infile.trilegal_dir))
infile.cmd_input_file = os.path.join(infile.home, infile.trilegal_dir,
cmd_input)
# track file to link from cmd_input to paola's formatted tracks
tracce_fh = '_'.join(('tracce', infile.name_conv))
infile.tracce_file = os.path.join(infile.home, infile.tracce_dir,
tracce_fh)
infile.tracce_file_rel = os.path.join('isotrack', infile.tracce_dir,
tracce_fh)
# moving to the the directory with metallicities.
infile.working_dir = os.path.join(infile.agbtrack_dir, infile.agb_mix,
infile.set_name)
os.chdir(infile.working_dir)
metal_dirs = [m for m in os.listdir(infile.working_dir)
if os.path.isdir(m) and 'Z' in m]
if infile.metals_subset is not None:
logger.info('doing a subset of metallicities')
metal_dirs = [m for m in metal_dirs
if metallicity_from_dir(m)[0] in infile.metals_subset]
metals = np.argsort([metallicity_from_dir(m)[0] for m in metal_dirs])
infile.metal_dirs = np.array(metal_dirs)[metals]
logger.info('found {} metallicities'.format(len(infile.metal_dirs)))
def metallicity_from_dir(met):
''' take Z and Y values from string'''
if met.endswith('/'):
met = met[:-1]
if len(os.path.split(met)) > 0:
met = os.path.split(met)[1]
z = float(met.split('_')[1].replace('Z', ''))
y = float(met.split('_')[-1].replace('Y', ''))
return z, y
class AGBTracks(object):
'''AGB data class'''
def __init__(self, data_array, col_keys, name):
self.data_array = data_array
self.key_dict = dict(zip(col_keys, range(len(col_keys))))
self.name = name
self.firstname = os.path.split(name)[1]
self.mass = float(self.firstname.split('_')[1])
self.metallicity = float(self.firstname.split('_')[2].replace('Z', ''))
# initialize: it's a well formatted track with more than one pulse
self.bad_track = False
# if only one thermal pulse, stop the press.
self.check_ntp()
self.get_TP_inds()
if not self.bad_track:
# force the beginning phase to not look like it's quiescent
self.fix_phi()
# load quiescent tracks
self.get_quiescent_inds()
# load indices of m and c stars
self.m_cstars()
# calculate the lifetimes of m and c stars
self.tauc_m()
# add points to low mass quiescent track for better interpolation
self.addpt = []
if len(self.Qs) <= 9 and self.mass < 3.:
self.add_points_to_q_track()
# find dl/dt of track
self.find_dldt()
else:
logger.error('bad track: {}'.format(name))
def find_dldt(self, order=1):
'''
Finds dL/dt of track object by a poly fit of order = 1 (default)
'''
TPs = self.TPs
qs = list(self.Qs)
status = self.data_array['status']
logl = self.data_array['L_star']
logt = self.data_array['T_star']
phi = self.data_array['PHI_TP']
# if a low mass interpolation point was added it will get
# the same slope as the rest of the thermal pulse.
#dl/dT seems somewhat linear for 0.2 < phi < 0.4 ...
lin_rise, = np.nonzero((status == 7) & (phi < 0.4) & (phi > 0.2))
rising = [list(set(TP) & set(lin_rise)) for TP in TPs]
fits = [np.polyfit(logt[r], logl[r], order) for r in rising if len(r) > 0]
slopes = np.array([])
# first line slope
slopes = np.append(slopes, (logl[2] - logl[0]) / (logt[2] - logt[0]))
# poly fitted slopes
slopes = np.append(slopes, [fits[i][0] for i in range(len(fits))])
# pop in an additional copy of the slope if an interpolation point
# was added.
if len(self.addpt) > 0:
tps_of_addpt = np.array([i for i in range(len(TPs))
if list(set(self.addpt) & set(TPs[i])) > 0])
slopes = np.insert(slopes, tps_of_addpt, slopes[tps_of_addpt])
self.Qs = np.insert(self.Qs, 0, 0)
self.rising = rising
self.slopes = slopes
self.fits = fits
def add_points_to_q_track(self):
'''
when to add an extra point for low masses
if logt[qs+1] is hotter than logt[qs]
and there is a point inbetween logt[qs] and logt[qs+1] that is cooler
than logt[qs] add the coolest point.
'''
addpt = self.addpt
qs = list(self.Qs)
logt = self.data_array['T_star']
tstep = self.data_array['step']
status = self.data_array['status']
Tqs = logt[qs]
# need to use some unique array, not logt, since logt could repeat,
# index would find the first one, not necessarily the correct one.
Sqs = tstep[qs] - 1. # steps start at 1, not zero
# takes the sign of the difference in logt(qs)
# if the sign of the difference is more than 0, we're going from cold to hot
# finds where the logt goes from getting colder to hotter...
ht, = np.nonzero(np.sign(np.diff(Tqs)) > 0)
ht = np.append(ht, ht + 1) # between the first and second
Sqs_ht = Sqs[ht]
# the indices between each hot point.
t_mids = [map(int, tstep[int(Sqs_ht[i]): int(Sqs_ht[i + 1])])
for i in range(len(Sqs_ht) - 1)]
Sqs_ht = Sqs_ht[: -1]
for i in range(len(Sqs_ht) - 1):
hot_inds = np.nonzero(logt[int(Sqs_ht[i])] > logt[t_mids[i]])[0]
if len(hot_inds) > 0:
# index of the min T of the hot index from above.
addpt.append(list(logt).index(np.min(logt[[t_mids[i][hi]
for hi in hot_inds]])))
if len(addpt) > 0:
addpt = np.unique([a for a in addpt if status[a] == 7.])
# hack: if there is more than one point, take the most evolved.
if len(addpt) > 1:
addpt = [np.max(addpt)]
# update Qs with added pts.
self.Qs = np.sort(np.concatenate((addpt, qs)))
# update mins to take the same point as Qs.
self.mins = list(np.sort(np.concatenate((addpt, self.mins))))
self.addpt = addpt
def check_ntp(self):
'''sets self.bad_track = True if only one thermal pulse.'''
ntp = self.data_array['NTP']
if ntp.size == 1:
logger.error('no tracks! {}'.format(self.name))
self.bad_track = True
def fix_phi(self):
'''The first line in the agb track is 1. This isn't a quiescent stage.'''
self.data_array['PHI_TP'][0] = -1.
def m_cstars(self, mdot_cond=-5, logl_cond=3.3):
'''
adds mstar and cstar attribute of indices that are true for:
mstar: co <=1 logl >= 3.3 mdot <= -5
cstar: co >=1 mdot <= -5
(by default) adjust mdot with mdot_cond and logl with logl_cond.
'''
data = self.data_array
self.mstar, = np.nonzero((data['CO'] <= 1) &
(data['L_star'] >= logl_cond) &
(data['dMdt'] <= mdot_cond))
self.cstar, = np.nonzero((data['CO'] >= 1) &
(data['dMdt'] <= mdot_cond))
def tauc_m(self):
'''lifetimes of c and m stars'''
try:
tauc = np.sum(self.data_array['dt'][self.cstar]) / 1e6
except IndexError:
tauc = 0.
logger.warning('no tauc')
try:
taum = np.sum(self.data_array['dt'][self.mstar]) / 1e6
except IndexError:
taum = 0.
logger.warning('no taum')
self.taum = taum
self.tauc = tauc
def get_TP_inds(self):
'''find the thermal pulsations of each file'''
self.TPs = []
if self.bad_track:
return
ntp = self.data_array['NTP']
un, iTPs = np.unique(ntp, return_index=True)
if un.size == 1:
logger.warning('only one themal pulse.')
self.TPs = un
else:
# The indices each TP is just filling values between the iTPs
# and the final grid point
iTPs = np.append(iTPs, len(ntp))
self.TPs = [np.arange(iTPs[i], iTPs[i+1])
for i in range(len(iTPs) - 1)]
if len(self.TPs) == 1:
self.bad_track = True
def get_quiescent_inds(self):
'''
The quiescent phase, Qs, is the the max phase in each TP,
i.e., closest to 1.
'''
phi = self.data_array['PHI_TP']
logl = self.data_array['L_star']
self.Qs = np.unique([TP[np.argmax(phi[TP])] for TP in self.TPs])
self.mins = np.unique([TP[np.argmin(logl[TP])] for TP in self.TPs])
def get_numeric_data(filename):
'''
made to read all of Paola's tracks. It takes away her "lg" meaning log.
Returns an AGBTracks object. If there is a problem reading the data, all
data are passed as zeros.
'''
f = open(filename, 'r')
lines = f.readlines()
f.close()
line = lines[0]
if len(lines) == 1:
logger.warning('only one line in {}'.format(filename))
return -1
col_keys = line.replace('#', '').replace('lg', '').replace('*', 'star')
col_keys = col_keys.strip().split()
try:
data = np.genfromtxt(filename, missing_values='************',
names=col_keys)
except ValueError:
logger.error('problem with', filename)
data = np.zeros(len(col_keys))
return AGBTracks(data, col_keys, filename)
def calc_c_o(row):
"""
C or O excess
if (C/O>1):
excess = log10 [(YC/YH) - (YO/YH)] + 12
if C/O<1:
excess = log10 [(YO/YH) - (YC/YH)] + 12
where YC = X(C12)/12 + X(C13)/13
YO = X(O16)/16 + X(O17)/17 + X(O18)/18
YH = XH/1.00794
"""
yh = row['H'] / 1.00794
yc = row['C12'] / 12. + row['C13'] / 13.
yo = row['O16'] / 16. + row['O17'] / 17. + row['O18'] / 18.
if row['CO'] > 1:
excess = np.log10((yc / yh) - (yo / yh)) + 12.
else:
excess = np.log10((yo / yh) - (yc / yh)) + 12.
return excess
def make_iso_file(track, isofile, write_header=False):
'''
this only writes the quiescent lines and the first line.
format of this file is:
age : age in yr
logl : logL
logte : logTe
mass : actual mass along track
mcore : core mass
per : period in days
period mode : 1=first overtone, 0=fundamental mode
mlr : mass loss rate in Msun/yr
x_min : X
y_min : Y
X_C : X_C
X_N : X_N
X_O : X_O
slope : dTe/dL
x_min : Xm at log L min of (the following) TP
y_min : Ym at log L min of (the following) TP
X_Cm : X_C at log L min of (the following) TP
X_Nm : X_N at log L min of (the following) TP
X_Om : X_O at log L min of (the following) TP
'''
isofile.write('# age(yr) logL logTe m_act mcore period ip')
isofile.write(' Mdot(Msun/yr) X Y X_C X_N X_O dlogTe/dlogL Xm Ym X_Cm X_Nm X_Om\n')
fmt = '%.4e %.4f %.4f %.5f %.5f %.4e %i %.4e %.6e %.6e %.6e %.6e %.6e %.4f %.6e %.6e %.6e %.6e %.6e\n'
# cull agb track to quiescent
rows = track.Qs
# min of each TP
mins = track.data_array[track.mins]
if len(rows) - len(mins) > 1:
import pdb; pdb.set_trace()
keys = track.key_dict.keys()
vals = track.key_dict.values()
col_keys = np.array(keys)[np.argsort(vals)]
#cno = [key for key in col_keys if (key.startswith('C1') or
# key.startswith('N1') or
# key.startswith('O1'))]
isofile.write(' %.4f %i # %s \n' % (track.mass, len(rows), track.firstname))
for i, r in enumerate(rows):
slope = 999
xcm = 999
xnm = 999
xom = 999
iminh = 999
iminy = 999
row = track.data_array[r]
period = row['P1']
if row['Pmod'] == 0:
period = row['P0']
mdot = 10 ** (row['dMdt'])
# CNO and excess are no longer used
#CNO = np.sum([row[c] for c in cno])
#excess = calc_c_o(row)
xc = np.sum([row[c] for c in col_keys if c.startswith('C1')])
xn = np.sum([row[c] for c in col_keys if c.startswith('N1')])
xo = np.sum([row[c] for c in col_keys if c.startswith('O1')])
if r != rows[-1]:
imin = mins[i]
xcm = np.sum([imin[c] for c in col_keys if c.startswith('C1')])
xnm = np.sum([imin[c] for c in col_keys if c.startswith('N1')])
xom = np.sum([imin[c] for c in col_keys if c.startswith('O1')])
iminh = imin['H']
iminy = imin['Y']
try:
slope = 1. / track.slopes[list(rows).index(r)]
except:
logger.error('bad slope: {}, row: {}'.format(track.firstname, i))
try:
isofile.write(fmt % (row['ageyr'], row['L_star'], row['T_star'],
row['M_star'], row['M_c'], period, row['Pmod'],
mdot, row['H'], row['Y'], xc, xn, xo, slope,
iminh, iminy, xcm, xnm, xom))
except IndexError:
logger.error('this row: {}'.format(list(rows).index(r)))
logger.error('row length: {} slope array length {}'.format(len(rows), len(track.slopes)))
logger.error('slope reciprical: {}'.format(1. / slope[list(rows).index(r)]))
return
class InputFile(object):
'''
a class to replace too many kwargs from the input file.
does two things:
1. sets a default dictionary (see input_defaults) as attributes
2. unpacks the dictionary from load_input as attributes
(overwrites defaults).
'''
def __init__(self, filename, default_dict=None):
if default_dict is not None:
self.set_defaults(default_dict)
self.in_dict = load_input(filename)
self.unpack_dict()
def set_defaults(self, in_def):
self.unpack_dict(udict=in_def)
def unpack_dict(self, udict=None):
if udict is None:
udict = self.in_dict
[self.__setattr__(k, v) for k, v in udict.items()]
def load_input(filename, comment_char='#', list_sep=','):
'''
read an input file into a dictionary
Ignores all lines that start with #
each line in the file has format key value
True and False are interpreted as bool
converts values to float, string, or list
also accepts dictionary with one key and one val
e.g: inp_dict {'key': val1}
Parameters
----------
filename : string
filename to parse
comment_char : string
skip line if it starts with comment_char
list_sep : string
within a value, if it's a list, split it by this value
if it's numeric, it will make a np.array of floats.
Returns
-------
d : dict
parsed information from filename
'''
d = {}
with open(filename) as f:
# skip comment_char, empty lines, strip out []
lines = [l.strip().translate(None, '[]') for l in f.readlines()
if not l.startswith(comment_char) and len(l.strip()) > 0]
# fill the dict
for line in lines:
key, val = line.partition(' ')[0::2]
d[key] = is_numeric(val.replace(' ', ''))
# check the values
for key in d.keys():
# is_numeric already got the floats and ints
if type(d[key]) == float or type(d[key]) == int:
continue
# check for a comma separated list
temp = d[key].split(list_sep)
if len(temp) > 1:
try:
# assume list of floats.
d[key] = [is_numeric(t) for t in temp]
except:
d[key] = temp
# check for a dictionary
elif len(d[key].split(':')) > 1:
temp1 = d[key].split(':')
d[key] = {is_numeric(temp1[0]): is_numeric(temp1[1])}
else:
val = temp[0]
# check bool
true = val.upper().startswith('TRUE')
false = val.upper().startswith('FALSE')
none = val.title().startswith('None')
if true or false or none:
val = literal_eval(val)
d[key] = val
return d
def agb_input_defaults(profile=None):
'''
# COLIBRI directory structure must be [agbtrack_dir]/[agb_mix]/[set_name]
agbtrack_dir /home/rosenfield/research/TP-AGBcalib/AGBTracks
agb_mix CAF09
set_name S_NOV13
### Parameters for TRILEGAL INPUT:
# Where input files will go for TRILEGAL
trilegal_dir cmd_inputfiles/
# Non-TP-AGB tracks to use
file_isotrack isotrack/parsec/CAF09_V1.2S_M36_S12D_NS_NAS.dat
# mass_loss parameter (for RGB stars) so far only "Reimers: [float]"
mass_loss Reimers: 0.2
# TRILEGAL formatted TP-AGB tracks will go here:
# structure: [isotrack_dir]/[agb_mix]/[set_name]
isotrack_dir isotrack_agb/
# File to link TRILEGAL formatted TP-AGB tracks to cmd_input:
tracce_dir isotrack_agb/
### Diag plots, extra files:
# make initial and final mass relation (and also lifetimes c and m)?
# (Need more than one metallicity)
make_imfr True
# Diagnostic plots base, this will have directory structure:
# diagnostic_dir/[agb_mix]_[metallicity]/[set]/
diagnostic_dir0 /home/rosenfield/research/TP-AGBcalib/diagnostics/
# If True, will make HRD or age vs C/O, log L, log Te (takes time!)
diag_plots True
### Misc Options:
# overwrite TRILEGAL formatted TP-AGB tracks and the output diag plots?
over_write True
# only do these metallicities (if commented out, do all metallicities)
# NB: this functionality has not been tested recently
#metals_subset 0.001, 0.004, 0.0005, 0.006, 0.008, 0.01, 0.017
'''
if profile is None:
keys = ['over_write',
'agbtrack_dir',
'agb_mix',
'set_name',
'trilegal_dir',
'isotrack_dir',
'tracce_dir',
'diagnostic_dir0',
'make_imfr',
'mass_loss',
'metals_subset']
else:
logger.error('only default profile set... must code')
in_def = {}
for k in keys:
in_def[k] = None
return in_def
def write_cmd_input_file(**kwargs):
'''
make a TRILEGAL cmd_input file based on default.
Send each parameter that is different than default by:
kwargs = { 'kind_tpagb': 4, 'file_tpagb': 'isotrack/tracce_CAF09_S0.dat'}
cmd_input_file = write_cmd_input_file(**kwargs)
To make the default file:
cmd_input_file = write_cmd_input_file()
if you don't specify cmd_input_file, output goes to cmd_input_TEMP.dat
'''
kind_tracks = kwargs.get('kind_tracks', 2)
file_isotrack = kwargs.get('file_isotrack', 'isotrack/parsec/CAF09.dat')
file_lowzams = kwargs.get('file_lowzams', 'isotrack/bassazams_fasulla.dat')
kind_tpagb = kwargs.get('kind_tpagb', 4)
file_tpagb = kwargs.get('file_tpagb')
if not file_tpagb:
file_tpagb = 'isotrack/isotrack_agb/tracce_CAF09_AFEP02_I1_S1.dat'
kind_postagb = kwargs.get('kind_postagb', 0)
file_postagb = kwargs.get('file_postagb', 'isotrack/final/pne_wd_test.dat')
mass_loss = kwargs.get('mass_loss')
if mass_loss:
kind_rgbmloss = 1
law_mass_loss, = mass_loss.keys()
efficiency_mass_loss, = mass_loss.values()
# these are for using cmd2.2:
kind_mag = kwargs.get('kind_mag', None)
photsys = kwargs.get('photsys', 'wfpc2')
file_mag = 'tab_mag_odfnew/tab_mag_%s.dat' % photsys
kind_imf = kwargs.get('kind_imf', None)
file_imf = kwargs.get('file_imf', 'tab_imf/imf_chabrier_lognormal.dat')
# if not using cmd2.2:
if kind_imf is None:
kind_imfr = kwargs.get('kind_imfr', 0)
file_imfr = kwargs.get('file_imfr', 'tab_ifmr/weidemann.dat')
track_comments = '# kind_tracks, file_isotrack, file_lowzams'
tpagb_comments = '# kind_tpagb, file_tpagb'
pagb_comments = '# kind_postagb, file_postagb DA VERIFICARE file_postagb'
mag_comments = '# kind_mag, file_mag'
imf_comments = '# kind_imf, file_imf'
imfr_comments = '# ifmr_kind, file with ifmr'
mass_loss_comments = '# RGB mass loss: kind_rgbmloss, law, and efficiency'
footer = (
'################################explanation######################',
'kind_tracks: 1= normal file',
'file_isotrack: tracks for low+int mass',
'file_lowzams: tracks for low-ZAMS',
'kind_tpagb:',
' 0 = none',
' 1 = Girardi et al., synthetic on the flight, no dredge up',
' 2 = Marigo & Girardi 2001, from file, includes mcore and C/O',
' 3 = Marigo & Girardi 2007, from file, includes per, mode and mloss',
' 4 = Marigo et al. 2011, from file, includes slope',
'file_tpagb: tracks for TP-AGB',
'kind_postagb:',
' 0 = none',
' 1 = from file',
'file_postagb: PN+WD tracks',
'kind_ifmr:',
' 0 = default',
' 1 = from file\n')
cmd_input_file = kwargs.get('cmd_input_file', 'cmd_input_TEMP.dat')
fh = open(cmd_input_file, 'w')
formatter = ' %i %s %s \n'
fh.write(' %i %s %s %s \n' % (kind_tracks, file_isotrack, file_lowzams,
track_comments))
fh.write(formatter % (kind_tpagb, file_tpagb, tpagb_comments))
fh.write(formatter % (kind_postagb, file_postagb, pagb_comments))
if kind_mag is not None:
fh.write(formatter % (kind_mag, file_mag, mag_comments))
if kind_imf is None:
fh.write(formatter % (kind_imfr, file_imfr, imfr_comments))
else:
fh.write(formatter % (kind_imf, file_imf, imf_comments))
if mass_loss:
fh.write(' %i %s %.3f \n' % (kind_rgbmloss, law_mass_loss,
efficiency_mass_loss))
fh.write('\n'.join(footer))
fh.close()
logger.info('wrote {}'.format(cmd_input_file))
return cmd_input_file
def make_met_file(tracce, Zs, Ys, isofiles):
with open(tracce, 'w') as t:
t.write(' %i\n' % len(isofiles))
[t.write(' %.4f\t%.3f\t%s\n' % (Zs[i], Ys[i], isofiles[i]))
for i in np.argsort(Zs)]
logger.info('wrote {}'.format(tracce))
return
def get_files(src, search_string):
'''
returns a list of files, similar to ls src/search_string
'''
if not src.endswith('/'):
src += '/'
try:
files = glob.glob1(src, search_string)
except IndexError:
logging.error('Can''t find %s in %s' % (search_string, src))
sys.exit(2)
files = [os.path.join(src, f)
for f in files if ensure_file(os.path.join(src, f), mad=False)]
return files
def ensure_file(f, mad=True):
'''
Parameters
----------
f : string
if f is not a file will log warning.
mad : bool [True]
if mad is True, will exit if f is not a file
'''
test = os.path.isfile(f)
if test is False:
logging.warning('there is no file', f)
if mad:
sys.exit(2)
return test
def load_input(filename):
'''
reads an input file into a dictionary.
file must have key first then value(s)
Will make 'True' into a boolean True
Will understand if a value is a float, string, or list, etc.
Ignores all lines that start with #, but not with # on the same line as
key, value.
'''
try:
literal_eval
except NameError:
from ast import literal_eval
d = {}
with open(filename) as f:
for line in f.readlines():
if line.startswith('#'):
continue
if len(line.strip()) == 0:
continue
key, val = line.strip().partition(' ')[0::2]
d[key] = is_numeric(val.replace(' ', ''))
# do we have a list?
for key in d.keys():
# float
if type(d[key]) == float:
continue
# list:
temp = d[key].split(',')
if len(temp) > 1:
try:
d[key] = map(float, temp)
except:
d[key] = temp
# dict:
elif len(d[key].split(':')) > 1:
temp1 = d[key].split(':')
d[key] = {is_numeric(temp1[0]): is_numeric(temp1[1])}
else:
val = temp[0]
# boolean
true = val.upper().startswith('TR')
false = val.upper().startswith('FA')
if true or false:
val = literal_eval(val)
# string
d[key] = val
return d
def ensure_dir(f):
'''
will make all dirs necessary for input to be an existing directory.
if input does not end with '/' it will add it, and then make a directory.
'''
if not f.endswith('/'):
f += '/'
d = os.path.dirname(f)
if not os.path.isdir(d):
os.makedirs(d)
logging.info('made dirs: {}'.format(d))
def savetxt(filename, data, fmt='%.4f', header=None):
'''
np.savetxt wrapper that adds header. Some versions of savetxt
already allow this...
'''
with open(filename, 'w') as f:
if header is not None:
f.write(header)
np.savetxt(f, data, fmt=fmt)
logger.info('wrote {}'.format(filename))
def is_numeric(lit):
"""
value of numeric: literal, string, int, float, hex, binary
From http://rosettacode.org/wiki/Determine_if_a_string_is_numeric#Python
"""
# Empty String
if len(lit) <= 0:
return lit
# Handle '0'
if lit == '0':
return 0
# Hex/Binary
if len(lit) > 1: # sometimes just '-' means no data...
litneg = lit[1:] if lit[0] == '-' else lit
if litneg[0] == '0':
if litneg[1] in 'xX':
return int(lit, 16)
elif litneg[1] in 'bB':
return int(lit, 2)
else:
try:
return int(lit, 8)
except ValueError:
pass
# Int/Float/Complex
try:
return int(lit)
except ValueError:
pass
try:
return float(lit)
except ValueError:
pass
try:
return complex(lit)
except ValueError:
pass
return lit
def setup_multiplot(nplots, xlabel=None, ylabel=None, title=None,
subplots_kwargs={}):
'''
fyi subplots args:
nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, **fig_kw
'''
nx = np.round(
|
np.sqrt(nplots)
|
numpy.sqrt
|
# 練習問題8(7)
import numpy as np
import seaborn as sns
import pandas
import matplotlib.pyplot as plt
from matplotlib.figure import figaspect
from matplotlib.gridspec import GridSpec
import mcmc_tools
from scipy.stats import norm
from scipy.stats import gaussian_kde
# id: 個体番号
# pot: 植木鉢番号(A~J)
# f: 処理の違い(C or T)
# y: 種子数
d1 = pandas.read_csv('d1.csv')
print(d1.head())
print(d1.describe())
# モデリング
# 線形結合するのは"処理の違い"とし、ほかはノイズとして扱う。
y = d1['y']
N = len(y)
# pandasでIDを変更するときはSeriesが使える
F2int = pandas.Series([0, 1], index=('C', 'T'))
F = F2int[d1['f']]
print(F)
# こちらもIDを変更するのでSeriesを使う
pots = d1['pot'].unique()
N_Pot = pots.size
Pot2int = pandas.Series(
|
np.arange(N_Pot)
|
numpy.arange
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 27 10:17:43 2021
@author: Hatlab_3
"""
import numpy as np
import time
from scipy.signal import butter, sosfilt, sosfreqz
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.optimize import curve_fit
from matplotlib.patches import Ellipse
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from matplotlib.colors import Normalize as Norm
#DO NOT USE, only here for backwards compatibility
def demod(signal_data, reference_data, mod_freq = 50e6, sampling_rate = 1e9):
'''
#TODO:
Parameters
----------
signal_data : np 1D array - float64
signal datapoints
reference_data : np 1D array - float64
reference datapoints
mod_freq : float64, optional
Modulation frequency in Hz. The default is 50e6.
sampling_rate : float64, optional
sampling rate in samples per second. The default is 1e9.
Returns
-------
sig_I_summed : np 1D array - float64
Signal multiplied by Sine and integrated over each period.
sig_Q_summed : np 1D array - float64
Signal multiplied by Cosine and integrated over each period.
ref_I_summed : np 1D array - float64
reference multiplied by sine and integrated over each period.
ref_Q_summed : np 1D array - float64
Reference multiplied by Cosine and integrated over each period.
'''
'''first pad the arrays to get a multiple of the number of samples in a
demodulation period, this will make the last record technically inaccurate
but there are thousands being
averaged so who cares
'''
#first demodulate both channels
# print("Signal Data Shape: ",np.shape(signal_data))
# print("Reference Data Shape: ",np.shape(reference_data))
period = int(sampling_rate/mod_freq)
print("Integrating over a period of: ", period)
signal_data = np.pad(signal_data, (0,int(period-np.size(signal_data)%period)))
reference_data = np.pad(reference_data, (0,int(period-np.size(reference_data)%period)))
# print("Signal Data Shape: ",np.shape(signal_data))
# print("Reference Data Shape: ",np.shape(reference_data))
point_number = np.arange(np.size(reference_data))
# print('Modulation period: ', period)
SinArray = np.sin(2*np.pi/period*point_number)
CosArray = np.cos(2*np.pi/period*point_number)
sig_I = signal_data*SinArray
sig_Q = signal_data*CosArray
ref_I = reference_data*SinArray
ref_Q = reference_data*CosArray
#now you cut the array up into periods of the sin and cosine modulation, then sum within one period
#the sqrt 2 is the RMS value of sin and cosine squared, period is to get rid of units of time
sig_I_summed = np.sum(sig_I.reshape(np.size(sig_I)//period, period), axis = 1)*(np.sqrt(2)/period)
sig_Q_summed = np.sum(sig_Q.reshape(np.size(sig_I)//period, period), axis = 1)*(np.sqrt(2)/period)
ref_I_summed = np.sum(ref_I.reshape(np.size(sig_I)//period, period), axis = 1)*(np.sqrt(2)/period)
ref_Q_summed = np.sum(ref_Q.reshape(np.size(sig_I)//period, period), axis = 1)*(np.sqrt(2)/period)
return (sig_I_summed, sig_Q_summed, ref_I_summed, ref_Q_summed)
def demod_period(signal_data, reference_data, period = 20, sampling_rate = 1e9, debug = False):
'''
#TODO:
Parameters
----------
signal_data : np 1D array - float64
signal datapoints
reference_data : np 1D array - float64
reference datapoints
mod_freq : float64, optional
Modulation frequency in Hz. The default is 50e6.
sampling_rate : float64, optional
sampling rate in samples per second. The default is 1e9.
Returns
-------
sig_I_summed : np 1D array - float64
Signal multiplied by Sine and integrated over each period.
sig_Q_summed : np 1D array - float64
Signal multiplied by Cosine and integrated over each period.
ref_I_summed : np 1D array - float64
reference multiplied by sine and integrated over each period.
ref_Q_summed : np 1D array - float64
Reference multiplied by Cosine and integrated over each period.
'''
'''first pad the arrays to get a multiple of the number of samples in a
demodulation period, this will make the last record technically inaccurate
but there are thousands being
averaged so who cares
'''
#first demodulate both channels
# print("Signal Data Shape: ",np.shape(signal_data))
# print("Reference Data Shape: ",np.shape(reference_data))
if debug:
print("Integrating over a period of: ", period)
print("Implies a demodulation frequency of: ", sampling_rate/period/1e6, "MHz")
signal_data = np.pad(signal_data, (0,int(period-np.size(signal_data)%period)))
reference_data = np.pad(reference_data, (0,int(period-np.size(reference_data)%period)))
# print("Signal Data Shape: ",np.shape(signal_data))
# print("Reference Data Shape: ",np.shape(reference_data))
point_number = np.arange(np.size(reference_data))
# print('Modulation period: ', period)
SinArray = np.sin(2*np.pi/period*point_number)
CosArray = np.cos(2*np.pi/period*point_number)
sig_I = signal_data*SinArray
sig_Q = signal_data*CosArray
ref_I = reference_data*SinArray
ref_Q = reference_data*CosArray
#now you cut the array up into periods of the sin and cosine modulation, then sum within one period
#the sqrt 2 is the RMS value of sin and cosine squared, period is to get rid of units of time
sig_I_summed = np.sum(sig_I.reshape(np.size(sig_I)//period, period), axis = 1)*(np.sqrt(2)/period)
sig_Q_summed = np.sum(sig_Q.reshape(np.size(sig_I)//period, period), axis = 1)*(np.sqrt(2)/period)
ref_I_summed = np.sum(ref_I.reshape(np.size(sig_I)//period, period), axis = 1)*(np.sqrt(2)/period)
ref_Q_summed = np.sum(ref_Q.reshape(np.size(sig_I)//period, period), axis = 1)*(np.sqrt(2)/period)
return (sig_I_summed, sig_Q_summed, ref_I_summed, ref_Q_summed)
def demod_all_records(s_array: np.ndarray, r_array: np.ndarray, period = 20):
'''
Parameters
----------
s_array : np.ndarray
array of signal data [R, t] where R is records, t is time
r_array : np.ndarray
array of reference data [R, t] where R is records, t is time
period : int, optional
window over which to integrate for demodulation, with 1GS/s the unit is ns. The default is 20ns == 50MHz.
Returns
-------
s_demod_arr : TYPE
array of demodulated
r_demod_arr : TYPE
DESCRIPTION.
'''
sI_arr = []
sQ_arr = []
rI_arr = []
rQ_arr = []
#demodulate each record in windows of (period) width
for rec_sig, rec_ref in zip(s_array, r_array):
sI, sQ, rI, rQ = demod_period(rec_sig, rec_ref, period = period)
sI_arr.append(sI)
sQ_arr.append(sQ)
rI_arr.append(rI)
rQ_arr.append(rQ)
#turn everything into numpy arrays
for data in [sI_arr, sQ_arr, rI_arr, rQ_arr]:
data = np.array(data)
return [sI_arr, sQ_arr], [rI_arr, rQ_arr]
bpf_func = lambda cfreq, BW, order: butter(order, [cfreq-BW/2, cfreq+BW/2], fs = 1e9, output = 'sos', btype = 'bandpass')
def filter_all_records(s_array, r_array, filt):
s_filt_arr = []
r_filt_arr = []
#demodulate each record in windows of (period) width
for rec_sig, rec_ref in zip(s_array, r_array):
s_filt = sosfilt(filt, rec_sig)
r_filt = sosfilt(filt, rec_ref)
s_filt_arr.append(s_filt)
r_filt_arr.append(r_filt)
#turn everything into numpy arrays
for data in [s_filt_arr, r_filt_arr]:
data = np.array(data)
return s_filt_arr, r_filt_arr
def remove_offset(I, Q, window = [0, 40]):
#remove an IQ offset from the data
offset_data = I-np.average(I[:, window[0]: window[1]]), Q-np.average(Q[:, window[0]: window[1]])
return offset_data
def phase_correction(sigI, sigQ, refI, refQ):
'''
Parameters
----------
sigI : np 2D array - (records,samples) float64
demodulated signal - In-phase
sigQ : np 2D array - (records,samples) float64
demodulated signal - Quadrature phase
refI : np 2D array - (records,samples) float64
demodulated reference - In-phase
refQ : np 2D array - (records,samples) float64
demodulated reference - quadrature-phase
Note: reference and signal arrays must all be of the same length
Returns
-------
sigI_corrected : np 2D array - float64
Signal I rotated by reference phase averaged over each record
sigQ_corrected : np 2D array - float64
Signal Q rotated by reference phase averaged over each record
'''
sigI_corrected = np.zeros(np.shape(sigI))
sigQ_corrected = np.zeros(np.shape(sigQ))
rI_trace = np.zeros(np.shape(sigI)[0])
rQ_trace =np.zeros(np.shape(sigI)[0])
for i, (sI_rec, sQ_rec, rI_rec, rQ_rec) in enumerate(zip(sigI,sigQ,refI,refQ)):
rI_avg, rQ_avg = np.average(rI_rec), np.average(rQ_rec)
rI_trace[i], rQ_trace[i] = rI_avg, rQ_avg
Ref_mag = np.sum(np.sqrt(rI_avg**2 + rQ_avg**2))
sigI_corrected[i] = (sI_rec*rI_avg + sQ_rec*rQ_avg)/Ref_mag
sigQ_corrected[i] = (-sI_rec*rQ_avg + sQ_rec*rI_avg)/Ref_mag
return sigI_corrected, sigQ_corrected, rI_trace, rQ_trace
def generate_matched_weight_funcs(data1, data2, bc = False, bc_window = [50, 150]):
d1I, d1Q = data1
d2I, d2Q = data2
if bc == False:
WF_I = np.average(d1I, axis = 0)-np.average(d2I, axis = 0)
WF_Q = np.average(d1Q, axis = 0)-np.average(d2Q, axis = 0)
else:
WF_I = np.zeros(
|
np.shape(d1I)
|
numpy.shape
|
import sys
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
p0 = np.load("prob_run0.npy")
p1 = np.load("prob_run1.npy")
p2 = np.load("prob_run2.npy")
p3 = np.load("prob_run3.npy")
p4 = np.load("prob_run4.npy")
p5 = np.load("prob_run5.npy")
y_test = np.load("../data/audio/ESC-10/esc10_spect_test_labels.npy")
prob = (p0+p1+p2+p3+p4+p5)/6.0
p = np.argmax(prob, axis=1)
cc = np.zeros((10,10))
for i in range(len(y_test)):
cc[y_test[i],p[i]] += 1
print()
print("Ensemble average:")
print()
if (len(sys.argv) > 1):
print(np.array2string(cc.astype("uint32")))
print()
cp = 100.0 * cc / cc.sum(axis=1)
print(np.array2string(cp, precision=1))
print()
print("Overall accuracy = %0.2f%%" % (100.0*np.diag(cc).sum()/cc.sum(),))
print()
print()
print("Ensemble max:")
print()
p = np.zeros(len(y_test), dtype="uint8")
for i in range(len(y_test)):
mx = 0.0
idx = 0
t = np.array([p0[i],p1[i],p2[i],p3[i],p4[i],p5[i]])
p[i] = np.argmax(t.reshape(60)) % 10
cc = np.zeros((10,10))
for i in range(len(y_test)):
cc[y_test[i],p[i]] += 1
if (len(sys.argv) > 1):
print(np.array2string(cc.astype("uint32")))
print()
cp = 100.0 * cc / cc.sum(axis=1)
print(np.array2string(cp, precision=1))
print()
print("Overall accuracy = %0.2f%%" % (100.0*np.diag(cc).sum()/cc.sum(),))
print()
# Voting
print()
print("Ensemble voting:")
print()
t = np.zeros((6,len(y_test)), dtype="uint32")
t[0,:] = np.argmax(p0, axis=1)
t[1,:] =
|
np.argmax(p1, axis=1)
|
numpy.argmax
|
import os
import sys
import json
import torch
import pickle
import argparse
import numpy as np
from tqdm import tqdm
from copy import deepcopy
from torch.utils.data import DataLoader
from numpy.linalg import inv
#sys.path.append(os.path.join(os.getcwd())) # HACK add the root folder
#sys.path.append(os.path.join(os.getcwd(), os.pardir, "openks/models/pytorch/mmd_modules/ThreeDJCG")) # HACK add the lib folder
import openks.models.pytorch.mmd_modules.ThreeDJCG.lib.capeval.bleu.bleu as capblue
import openks.models.pytorch.mmd_modules.ThreeDJCG.lib.capeval.cider.cider as capcider
import openks.models.pytorch.mmd_modules.ThreeDJCG.lib.capeval.rouge.rouge as caprouge
import openks.models.pytorch.mmd_modules.ThreeDJCG.lib.capeval.meteor.meteor as capmeteor
from openks.models.pytorch.mmd_modules.ThreeDJCG.data.scannet.model_util_scannet import ScannetDatasetConfig
from openks.models.pytorch.mmd_modules.ThreeDJCG.lib.config_joint import CONF
from openks.models.pytorch.mmd_modules.ThreeDJCG.lib.ap_helper import parse_predictions
from openks.models.pytorch.mmd_modules.ThreeDJCG.lib.loss_helper.loss_captioning import get_scene_cap_loss, get_object_cap_loss
from openks.models.pytorch.mmd_modules.ThreeDJCG.utils.box_util import box3d_iou_batch_tensor
# constants
DC = ScannetDatasetConfig()
SCANREFER_ORGANIZED = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_organized.json")))
def prepare_corpus(raw_data, max_len=CONF.TRAIN.MAX_DES_LEN):
corpus = {}
for data in raw_data:
scene_id = data["scene_id"]
object_id = data["object_id"]
object_name = data["object_name"]
token = data["token"][:max_len]
description = " ".join(token)
# add start and end token
description = "sos " + description
description += " eos"
key = "{}|{}|{}".format(scene_id, object_id, object_name)
# key = "{}|{}".format(scene_id, object_id)
if key not in corpus:
corpus[key] = []
corpus[key].append(description)
return corpus
def decode_caption(raw_caption, idx2word):
decoded = ["sos"]
for token_idx in raw_caption:
token_idx = token_idx.item()
token = idx2word[str(token_idx)]
decoded.append(token)
if token == "eos": break
if "eos" not in decoded: decoded.append("eos")
decoded = " ".join(decoded)
return decoded
def check_candidates(corpus, candidates):
placeholder = "sos eos"
corpus_keys = list(corpus.keys())
candidate_keys = list(candidates.keys())
missing_keys = [key for key in corpus_keys if key not in candidate_keys]
if len(missing_keys) != 0:
for key in missing_keys:
candidates[key] = [placeholder]
return candidates
def organize_candidates(corpus, candidates):
new_candidates = {}
for key in corpus.keys():
new_candidates[key] = candidates[key]
return new_candidates
def feed_scene_cap(model, device, dataset, dataloader, phase, folder,
is_eval=True, max_len=CONF.TRAIN.MAX_DES_LEN, save_interm=False, min_iou=CONF.EVAL.MIN_IOU_THRESHOLD, organized=SCANREFER_ORGANIZED):
candidates = {}
intermediates = {}
for data_dict in tqdm(dataloader):
# move to cuda
for key in data_dict:
data_dict[key] = data_dict[key].cuda()
with torch.no_grad():
data_dict = model(data_dict, is_eval)
data_dict = get_scene_cap_loss(data_dict, device, DC, weights=dataset.weights, detection=True, caption=False)
# unpack
captions = data_dict["lang_cap"].argmax(-1) # batch_size, num_proposals, max_len - 1
dataset_ids = data_dict["dataset_idx"]
batch_size, num_proposals, _ = captions.shape
# post-process
# config
POST_DICT = {
"remove_empty_box": True,
"use_3d_nms": True,
"nms_iou": 0.25,
"use_old_type_nms": False,
"cls_nms": True,
"per_class_proposal": True,
"conf_thresh": 0.05,
"dataset_config": DC
}
# nms mask
_ = parse_predictions(data_dict, POST_DICT)
nms_masks = torch.LongTensor(data_dict["pred_mask"]).cuda()
# objectness mask
obj_masks = torch.argmax(data_dict["objectness_scores"], 2).long()
# final mask
nms_masks = nms_masks * obj_masks
# pick out object ids of detected objects
detected_object_ids = torch.gather(data_dict["scene_object_ids"], 1, data_dict["object_assignment"])
# bbox corners
assigned_target_bbox_corners = torch.gather(
data_dict["gt_box_corner_label"].float(),
1,
data_dict["object_assignment"].view(batch_size, num_proposals, 1, 1).repeat(1, 1, 8, 3)
) # batch_size, num_proposals, 8, 3
detected_bbox_corners = data_dict["pred_bbox_corner"] # batch_size, num_proposals, 8, 3
# compute IoU between each detected box and each ground truth box
ious = box3d_iou_batch_tensor(
assigned_target_bbox_corners.view(-1, 8, 3), # batch_size * num_proposals, 8, 3
detected_bbox_corners.view(-1, 8, 3) # batch_size * num_proposals, 8, 3
).view(batch_size, num_proposals)
# find good boxes (IoU > threshold)
good_bbox_masks = ious > min_iou # batch_size, num_proposals
# dump generated captions
object_attn_masks = {}
for batch_id in range(batch_size):
dataset_idx = dataset_ids[batch_id].item()
scene_id = dataset.scanrefer[dataset_idx]["scene_id"]
object_attn_masks[scene_id] = np.zeros((num_proposals, num_proposals))
for prop_id in range(num_proposals):
if nms_masks[batch_id, prop_id] == 1 and good_bbox_masks[batch_id, prop_id] == 1:
object_id = str(detected_object_ids[batch_id, prop_id].item())
caption_decoded = decode_caption(captions[batch_id, prop_id], dataset.vocabulary["idx2word"])
# print(scene_id, object_id)
try:
ann_list = list(organized[scene_id][object_id].keys())
object_name = organized[scene_id][object_id][ann_list[0]]["object_name"]
# store
key = "{}|{}|{}".format(scene_id, object_id, object_name)
# key = "{}|{}".format(scene_id, object_id)
candidates[key] = [caption_decoded]
if save_interm:
if scene_id not in intermediates: intermediates[scene_id] = {}
if object_id not in intermediates[scene_id]: intermediates[scene_id][object_id] = {}
intermediates[scene_id][object_id]["object_name"] = object_name
intermediates[scene_id][object_id]["box_corner"] = detected_bbox_corners[batch_id, prop_id].cpu().numpy().tolist()
intermediates[scene_id][object_id]["description"] = caption_decoded
intermediates[scene_id][object_id]["token"] = caption_decoded.split(" ")
# attention context
# extract attention masks for each object
object_attn_weights = data_dict["topdown_attn"][:, :, :num_proposals] # NOTE only consider attention on objects
valid_context_masks = data_dict["valid_masks"][:, :, :num_proposals] # NOTE only consider attention on objects
cur_valid_context_masks = valid_context_masks[batch_id, prop_id] # num_proposals
cur_context_box_corners = detected_bbox_corners[batch_id, cur_valid_context_masks == 1] # X, 8, 3
cur_object_attn_weights = object_attn_weights[batch_id, prop_id, cur_valid_context_masks == 1] # X
intermediates[scene_id][object_id]["object_attn_weight"] = cur_object_attn_weights.cpu().numpy().T.tolist()
intermediates[scene_id][object_id]["object_attn_context"] = cur_context_box_corners.cpu().numpy().tolist()
# cache
object_attn_masks[scene_id][prop_id, prop_id] = 1
except KeyError:
continue
# detected boxes
if save_interm:
print("saving intermediate results...")
interm_path = os.path.join(CONF.PATH.OUTPUT, folder, "interm.json")
with open(interm_path, "w") as f:
json.dump(intermediates, f, indent=4)
return candidates
def update_interm(interm, candidates, bleu, cider, rouge, meteor):
for i, (key, value) in enumerate(candidates.items()):
scene_id, object_id, object_name = key.split("|")
if scene_id in interm:
if object_id in interm[scene_id]:
interm[scene_id][object_id]["bleu_1"] = bleu[1][0][i]
interm[scene_id][object_id]["bleu_2"] = bleu[1][1][i]
interm[scene_id][object_id]["bleu_3"] = bleu[1][2][i]
interm[scene_id][object_id]["bleu_4"] = bleu[1][3][i]
interm[scene_id][object_id]["cider"] = cider[1][i]
interm[scene_id][object_id]["rouge"] = rouge[1][i]
interm[scene_id][object_id]["meteor"] = meteor[1][i]
return interm
def eval_cap(model, device, dataset, dataloader, phase, folder,
is_eval=True, max_len=CONF.TRAIN.MAX_DES_LEN, force=False,
mode="scene", save_interm=False, no_caption=False, no_classify=False, min_iou=CONF.EVAL.MIN_IOU_THRESHOLD):
if no_caption:
bleu = 0
cider = 0
rouge = 0
meteor = 0
if no_classify:
cls_acc = 0
else:
print("evaluating classification accuracy...")
cls_acc = []
for data_dict in tqdm(dataloader):
# move to cuda
for key in data_dict:
data_dict[key] = data_dict[key].to(device)
with torch.no_grad():
data_dict = model(data_dict, is_eval)
# unpack
preds = data_dict["enc_preds"] # (B, num_cls)
targets = data_dict["object_cat"] # (B,)
# classification acc
preds = preds.argmax(-1) # (B,)
acc = (preds == targets).sum().float() / targets.shape[0]
# dump
cls_acc.append(acc.item())
cls_acc =
|
np.mean(cls_acc)
|
numpy.mean
|
"""
Large-scale Point Cloud Semantic Segmentation with Superpoint Graphs
http://arxiv.org/abs/1711.09869
2017 <NAME>, <NAME>
functions for writing and reading features and superpoint graph
"""
import os
import sys
import random
import glob
from plyfile import PlyData, PlyElement
import numpy as np
#from numpy import genfromtxt
import pandas as pd
import h5py
#import laspy
from sklearn.neighbors import NearestNeighbors
import laspy
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(DIR_PATH, '..'))
from partition.ply_c import libply_c
import colorsys
from sklearn.decomposition import PCA
def partition2ply(filename, xyz, components):
"""write a ply with random colors for each components"""
random_color = random.randint
color = np.zeros(xyz.shape)
for i_com in range(0, len(components)):
color[components[i_com], :] = [random_color(0, 255), random_color(0, 255), random_color(0, 255)]
prop = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
vertex_all = np.empty(len(xyz), dtype=prop)
for i in range(0, 3):
vertex_all[prop[i][0]] = xyz[:, i]
for i in range(0, 3):
vertex_all[prop[i+3][0]] = color[:, i]
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=True)
ply.write(filename)
def geof2ply(filename, xyz, geof):
"""write a ply with colors corresponding to geometric features"""
color = np.array(255 * geof[:, [0, 1, 3]], dtype='uint8')
prop = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
vertex_all = np.empty(len(xyz), dtype=prop)
for i in range(0, 3):
vertex_all[prop[i][0]] = xyz[:, i]
for i in range(0, 3):
vertex_all[prop[i+3][0]] = color[:, i]
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=True)
ply.write(filename)
def prediction2ply(filename, xyz, prediction, n_label, dataset):
"""write a ply with colors for each class"""
if len(prediction.shape) > 1 and prediction.shape[1] > 1:
prediction = np.argmax(prediction, axis=1)
color = np.zeros(xyz.shape)
for i_label in range(0, n_label + 1):
color[np.where(prediction == i_label), :] = get_color_from_label(i_label, dataset)
prop = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
vertex_all = np.empty(len(xyz), dtype=prop)
for i in range(0, 3):
vertex_all[prop[i][0]] = xyz[:, i]
for i in range(0, 3):
vertex_all[prop[i+3][0]] = color[:, i]
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=True)
ply.write(filename)
def error2ply(filename, xyz, rgb, labels, prediction):
"""write a ply with green hue for correct classifcation and red for error"""
if len(prediction.shape) > 1 and prediction.shape[1] > 1:
prediction = np.argmax(prediction, axis=1)
if len(labels.shape) > 1 and labels.shape[1] > 1:
labels = np.argmax(labels, axis=1)
color_rgb = rgb/255
for i_ver in range(0, len(labels)):
color_hsv = list(colorsys.rgb_to_hsv(color_rgb[i_ver, 0], color_rgb[i_ver, 1], color_rgb[i_ver, 2]))
if (labels[i_ver] == prediction[i_ver]) or (labels[i_ver] == 0):
color_hsv[0] = 0.333333
else:
color_hsv[0] = 0
color_hsv[1] = min(1, color_hsv[1] + 0.3)
color_hsv[2] = min(1, color_hsv[2] + 0.1)
color_rgb[i_ver, :] = list(colorsys.hsv_to_rgb(color_hsv[0], color_hsv[1], color_hsv[2]))
color_rgb = np.array(color_rgb*255, dtype='u1')
prop = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
vertex_all = np.empty(len(xyz), dtype=prop)
for i in range(0, 3):
vertex_all[prop[i][0]] = xyz[:, i]
for i in range(0, 3):
vertex_all[prop[i+3][0]] = color_rgb[:, i]
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=True)
ply.write(filename)
def spg2ply(filename, spg_graph):
"""write a ply displaying the SPG by adding edges between its centroid"""
vertex_prop = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]
vertex_val = np.empty((spg_graph['sp_centroids']).shape[0], dtype=vertex_prop)
for i in range(0, 3):
vertex_val[vertex_prop[i][0]] = spg_graph['sp_centroids'][:, i]
edges_prop = [('vertex1', 'int32'), ('vertex2', 'int32')]
edges_val = np.empty((spg_graph['source']).shape[0], dtype=edges_prop)
edges_val[edges_prop[0][0]] = spg_graph['source'].flatten()
edges_val[edges_prop[1][0]] = spg_graph['target'].flatten()
ply = PlyData([PlyElement.describe(vertex_val, 'vertex'), PlyElement.describe(edges_val, 'edge')], text=True)
ply.write(filename)
def scalar2ply(filename, xyz, scalar):
"""write a ply with an unisgned integer scalar field"""
prop = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('scalar', 'f4')]
vertex_all = np.empty(len(xyz), dtype=prop)
for i in range(0, 3):
vertex_all[prop[i][0]] = xyz[:, i]
vertex_all[prop[3][0]] = scalar
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=True)
ply.write(filename)
def get_color_from_label(object_label, dataset):
"""associate the color corresponding to the class"""
if dataset == 's3dis': # S3DIS
object_label = {
0: [0, 0, 0], # unlabelled .->. black
1: [233, 229, 107], # 'ceiling' .-> .yellow
2: [95, 156, 196], # 'floor' .-> . blue
3: [179, 116, 81], # 'wall' -> brown
4: [81, 163, 148], # 'column' -> bluegreen
5: [241, 149, 131], # 'beam' -> salmon
6: [77, 174, 84], # 'window' -> bright green
7: [108, 135, 75], # 'door' -> dark green
8: [79, 79, 76], # 'table' -> dark grey
9: [41, 49, 101], # 'chair' -> darkblue
10: [223, 52, 52], # 'bookcase' -> red
11: [89, 47, 95], # 'sofa' -> purple
12: [81, 109, 114], # 'board' -> grey
13: [233, 233, 229], # 'clutter' -> light grey
}.get(object_label, -1)
elif dataset == 'airborne_lidar': # Custom set
object_label = {
1: [0, 0, 0], # unlabelled .->. black
2: [255, 0, 0], # Building -> red
3: [0, 255, 0], # Water -> green
4: [0, 0, 255] # Ground -> Blue
}.get(object_label, -1)
else:
raise ValueError(f"Unknown dataset: {dataset}")
if object_label == -1:
raise ValueError(f"Type not recognized: {object_label}")
return object_label
def read_s3dis_format(raw_path, label_out=True):
"""extract data from a room folder.
S3DIS specefic"""
# room_ver = genfromtxt(raw_path, delimiter=' ')
room_ver = pd.read_csv(raw_path, sep=' ', header=None).values
xyz = np.ascontiguousarray(room_ver[:, 0:3], dtype='float32')
try:
rgb = np.ascontiguousarray(room_ver[:, 3:6], dtype='uint8')
except ValueError:
rgb = np.zeros((room_ver.shape[0], 3), dtype='uint8')
print('WARN - corrupted rgb data for file %s' % raw_path)
if not label_out:
return xyz, rgb
n_ver = len(room_ver)
del room_ver
nn = NearestNeighbors(1, algorithm='kd_tree').fit(xyz)
room_labels = np.zeros((n_ver,), dtype='uint8')
room_object_indices = np.zeros((n_ver,), dtype='uint32')
objects = glob.glob(os.path.dirname(raw_path) + "/Annotations/*.txt")
i_object = 1
for single_object in objects:
object_name = os.path.splitext(os.path.basename(single_object))[0]
print(" adding object " + str(i_object) + " : " + object_name)
object_class = object_name.split('_')[0]
object_label = object_name_to_label(object_class)
# obj_ver = genfromtxt(single_object, delimiter=' ')
obj_ver = pd.read_csv(single_object, sep=' ', header=None).values
distances, obj_ind = nn.kneighbors(obj_ver[:, 0:3])
room_labels[obj_ind] = object_label
room_object_indices[obj_ind] = i_object
i_object = i_object + 1
return xyz, rgb, room_labels, room_object_indices
def read_airborne_lidar_format(raw_path):
"""Extract data from a .las file."""
in_file = laspy.file.File(raw_path, mode='r')
n_points = len(in_file)
x = np.reshape(in_file.x, (n_points, 1))
y = np.reshape(in_file.y, (n_points, 1))
z = np.reshape(in_file.z, (n_points, 1))
intensity = np.reshape(in_file.intensity, (n_points, 1))
nb_return = np.reshape(in_file.num_returns, (n_points, 1))
labels = np.reshape(in_file.classification, (n_points, 1))
labels = format_classes(labels)
xyz = np.hstack((x, y, z)).astype('f4')
return xyz, nb_return, intensity, labels
def format_classes(labels):
"""Format labels array to match the classes of interest.
Specific to airborne_lidar dataset."""
# coi = classses of interest.
# Dict containing the mapping of input (from the .las file) and the output classes (for the training part).
# 6: Building, 9: water, 2: ground.
# All other values have to be set to 1. Details here: https://github.com/loicland/superpoint_graph/issues/83
coi = {'6': 2, '9': 3, '2': 4}
labels2 = np.full(shape=labels.shape, fill_value=1, dtype=int)
for key, value in coi.items():
labels2[labels == int(key)] = value
return labels2
def object_name_to_label(object_class):
"""convert from object name in S3DIS to an int"""
object_label = {
'ceiling': 1,
'floor': 2,
'wall': 3,
'column': 4,
'beam': 5,
'window': 6,
'door': 7,
'table': 8,
'chair': 9,
'bookcase': 10,
'sofa': 11,
'board': 12,
'clutter': 13,
'stairs': 0,
}.get(object_class, 0)
return object_label
def read_ply(filename):
"""convert from a ply file. include the label and the object number"""
# ---read the ply file--------
plydata = PlyData.read(filename)
xyz = np.stack([plydata['vertex'][n] for n in['x', 'y', 'z']], axis=1)
try:
rgb = np.stack([plydata['vertex'][n] for n in ['red', 'green', 'blue']], axis=1).astype(np.uint8)
except ValueError:
rgb = np.stack([plydata['vertex'][n] for n in ['r', 'g', 'b']], axis=1).astype(np.float32)
if np.max(rgb) > 1:
rgb = rgb
try:
object_indices = plydata['vertex']['object_index']
labels = plydata['vertex']['label']
return xyz, rgb, labels, object_indices
except ValueError:
try:
labels = plydata['vertex']['label']
return xyz, rgb, labels
except ValueError:
return xyz, rgb
def read_las(filename):
"""convert from a las file with no rgb"""
in_file = laspy.file.File(filename, mode='r')
n_points = len(in_file)
x = np.reshape(in_file.x, (n_points, 1))
y = np.reshape(in_file.y, (n_points, 1))
z = np.reshape(in_file.z, (n_points, 1))
xyz = np.hstack((x, y, z)).astype('f4')
return xyz
def write_ply_obj(filename, xyz, rgb, labels, object_indices):
"""write into a ply file. include the label and the object number"""
prop = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1'), ('label', 'u1'), ('object_index', 'uint32')]
vertex_all = np.empty(len(xyz), dtype=prop)
for i_prop in range(0, 3):
vertex_all[prop[i_prop][0]] = xyz[:, i_prop]
for i_prop in range(0, 3):
vertex_all[prop[i_prop+3][0]] = rgb[:, i_prop]
vertex_all[prop[6][0]] = labels
vertex_all[prop[7][0]] = object_indices
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=True)
ply.write(filename)
def embedding2ply(filename, xyz, embeddings):
"""write a ply with colors corresponding to geometric features"""
if embeddings.shape[1] > 3:
pca = PCA(n_components=3)
# pca.fit(np.eye(embeddings.shape[1]))
pca.fit(np.vstack((np.zeros((embeddings.shape[1],)), np.eye(embeddings.shape[1]))))
embeddings = pca.transform(embeddings)
# value = (embeddings-embeddings.mean(axis=0))/(2*embeddings.std())+0.5
# value = np.minimum(np.maximum(value,0),1)
# value = (embeddings)/(3 * embeddings.std())+0.5
value = np.minimum(np.maximum((embeddings+1)/2, 0), 1)
color = np.array(255 * value, dtype='uint8')
prop = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
vertex_all = np.empty(len(xyz), dtype=prop)
for i in range(0, 3):
vertex_all[prop[i][0]] = xyz[:, i]
for i in range(0, 3):
vertex_all[prop[i+3][0]] = color[:, i]
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=True)
ply.write(filename)
def edge_class2ply2(filename, edg_class, xyz, edg_source, edg_target):
"""write a ply with edge weight color coded into the midway point"""
n_edg = len(edg_target)
midpoint = (xyz[edg_source, ]+xyz[edg_target, ])/2
color = np.zeros((edg_source.shape[0], 3), dtype='uint8')
color[edg_class == 0, ] = [0, 0, 0]
color[(edg_class == 1).nonzero(), ] = [255, 0, 0]
color[(edg_class == 2).nonzero(), ] = [125, 255, 0]
color[(edg_class == 3).nonzero(), ] = [0, 125, 255]
prop = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
vertex_all = np.empty(n_edg, dtype=prop)
for i in range(0, 3):
vertex_all[prop[i][0]] = np.hstack(midpoint[:, i])
for i in range(3, 6):
vertex_all[prop[i][0]] = color[:, i-3]
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=True)
ply.write(filename)
def write_ply_labels(filename, xyz, rgb, labels):
"""write into a ply file. include the label"""
prop = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1'), ('label', 'u1')]
vertex_all = np.empty(len(xyz), dtype=prop)
for i_prop in range(0, 3):
vertex_all[prop[i_prop][0]] = xyz[:, i_prop]
for i_prop in range(0, 3):
vertex_all[prop[i_prop+3][0]] = rgb[:, i_prop]
vertex_all[prop[6][0]] = labels
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=True)
ply.write(filename)
def write_ply(filename, xyz, rgb):
"""write into a ply file"""
prop = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
vertex_all = np.empty(len(xyz), dtype=prop)
for i_prop in range(0, 3):
vertex_all[prop[i_prop][0]] = xyz[:, i_prop]
for i_prop in range(0, 3):
vertex_all[prop[i_prop+3][0]] = rgb[:, i_prop]
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=True)
ply.write(filename)
def write_features(file_name, geof, xyz, rgb, graph_nn, labels, intensity, nb_return):
"""write the geometric features, labels and clouds in a h5 file"""
if os.path.isfile(file_name):
os.remove(file_name)
data_file = h5py.File(file_name, 'w')
data_file.create_dataset('geof', data=geof, dtype='float32')
data_file.create_dataset('source', data=graph_nn["source"], dtype='uint32')
data_file.create_dataset('target', data=graph_nn["target"], dtype='uint32')
data_file.create_dataset('distances', data=graph_nn["distances"], dtype='float32')
data_file.create_dataset('xyz', data=xyz, dtype='float32')
if len(rgb) > 0:
data_file.create_dataset('rgb', data=rgb, dtype='uint8')
if len(labels) > 0 and len(labels.shape) > 1 and labels.shape[1] > 1:
data_file.create_dataset('labels', data=labels, dtype='uint32')
else:
data_file.create_dataset('labels', data=labels, dtype='uint8')
# Enables the use of intensity
if len(intensity) > 0:
data_file.create_dataset('intensity', data=intensity, dtype='float32')
# Enables the use of the number of return.
if len(nb_return) > 0:
data_file.create_dataset('nb_return', data=nb_return, dtype='uint8')
data_file.close()
def read_features(file_name):
"""read the geometric features, clouds and labels from a h5 file"""
data_file = h5py.File(file_name, 'r')
# fist get the number of vertices
# n_ver = len(data_file["geof"][:, 0])
has_labels = len(data_file["labels"])
# the labels can be empty in the case of a test set
if has_labels:
labels = np.array(data_file["labels"])
else:
labels = []
# ---fill the arrays---
geof = data_file["geof"][:]
xyz = data_file["xyz"][:]
if 'rgb' in data_file:
rgb = data_file["rgb"][:]
else:
rgb = []
source = data_file["source"][:]
target = data_file["target"][:]
distance = data_file["distances"][:]
# Manage intensity and number of returns
if 'intensity' in data_file:
intensity = data_file["intensity"][:]
else:
intensity = []
if 'nb_return' in data_file:
nb_return = data_file["nb_return"][:]
else:
nb_return = []
# ---set the graph---
graph_nn = dict([("is_nn", True)])
graph_nn["source"] = source
graph_nn["target"] = target
graph_nn["distances"] = distance
return geof, xyz, rgb, graph_nn, labels, intensity, nb_return
def write_spg(file_name, graph_sp, components, in_component):
"""save the partition and spg information"""
if os.path.isfile(file_name):
os.remove(file_name)
data_file = h5py.File(file_name, 'w')
grp = data_file.create_group('components')
n_com = len(components)
for i_com in range(0, n_com):
grp.create_dataset(str(i_com), data=components[i_com], dtype='uint32')
data_file.create_dataset('in_component', data=in_component, dtype='uint32')
data_file.create_dataset('sp_labels', data=graph_sp["sp_labels"], dtype='uint32')
data_file.create_dataset('sp_centroids', data=graph_sp["sp_centroids"], dtype='float32')
data_file.create_dataset('sp_length', data=graph_sp["sp_length"], dtype='float32')
data_file.create_dataset('sp_surface', data=graph_sp["sp_surface"], dtype='float32')
data_file.create_dataset('sp_volume', data=graph_sp["sp_volume"], dtype='float32')
data_file.create_dataset('sp_point_count', data=graph_sp["sp_point_count"], dtype='uint64')
data_file.create_dataset('source', data=graph_sp["source"], dtype='uint32')
data_file.create_dataset('target', data=graph_sp["target"], dtype='uint32')
data_file.create_dataset('se_delta_mean', data=graph_sp["se_delta_mean"], dtype='float32')
data_file.create_dataset('se_delta_std', data=graph_sp["se_delta_std"], dtype='float32')
data_file.create_dataset('se_delta_norm', data=graph_sp["se_delta_norm"], dtype='float32')
data_file.create_dataset('se_delta_centroid', data=graph_sp["se_delta_centroid"], dtype='float32')
data_file.create_dataset('se_length_ratio', data=graph_sp["se_length_ratio"], dtype='float32')
data_file.create_dataset('se_surface_ratio', data=graph_sp["se_surface_ratio"], dtype='float32')
data_file.create_dataset('se_volume_ratio', data=graph_sp["se_volume_ratio"], dtype='float32')
data_file.create_dataset('se_point_count_ratio', data=graph_sp["se_point_count_ratio"], dtype='float32')
def read_spg(file_name):
"""read the partition and spg information"""
data_file = h5py.File(file_name, 'r')
graph = dict([("is_nn", False)])
graph["source"] = np.array(data_file["source"], dtype='uint32')
graph["target"] = np.array(data_file["target"], dtype='uint32')
graph["sp_centroids"] = np.array(data_file["sp_centroids"], dtype='float32')
graph["sp_length"] = np.array(data_file["sp_length"], dtype='float32')
graph["sp_surface"] = np.array(data_file["sp_surface"], dtype='float32')
graph["sp_volume"] = np.array(data_file["sp_volume"], dtype='float32')
graph["sp_point_count"] = np.array(data_file["sp_point_count"], dtype='uint64')
graph["se_delta_mean"] = np.array(data_file["se_delta_mean"], dtype='float32')
graph["se_delta_std"] =
|
np.array(data_file["se_delta_std"], dtype='float32')
|
numpy.array
|
'''
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
Created on Apr 9, 2019
@author: <NAME> (<EMAIL>)
'''
import os
import glob
import argparse
import time
import pickle
import platform
import shutil
from random import shuffle
import json
import numpy as np
import pandas as pd
import cv2 as cv
from skimage.io import imread, imsave
from scipy.linalg import norm
import h5py
import matplotlib.pyplot as plt
import ipyparallel as ipp
from keras.models import Model, load_model
from keras.layers import Input, Dense, Lambda, ZeroPadding2D
from keras.layers import LeakyReLU, Flatten, Concatenate, Reshape, ReLU
from keras.layers import Conv2DTranspose, BatchNormalization
from keras.layers.merge import add, subtract
from keras.utils import multi_gpu_model
from keras.utils.data_utils import Sequence
import keras.backend as K
from keras import optimizers
from keras.engine.input_layer import InputLayer
from yolov3_detect import make_yolov3_model, BoundBox, WeightReader, draw_boxes_v3
from face_detection import FaceDetector
# Constants.
DEBUG = True
ALPHA = 0.2
RESOURCE_TYPE_UCCS = 'uccs'
RESOURCE_TYPE_VGGFACE2 = 'vggface2'
def triplet_loss(y_true, y_pred):
# Calculate the difference of both face features and judge a same person.
x = y_pred
return K.mean(K.maximum(K.sqrt(K.sum(K.pow(x[:, 0:64] - x[:, 64:128], 2.0), axis=-1)) \
- K.sqrt(K.sum(K.pow(x[:, 0:64] - x[:, 128:192], 2.0), axis=-1)) + ALPHA, 0.))
def create_db_fi(conf):
"""Create db for face identifier."""
conf = conf['fi_conf']
if conf['resource_type'] == RESOURCE_TYPE_UCCS:
raw_data_path = conf['raw_data_path']
nn_arch = conf['nn_arch']
if not os.path.isdir(os.path.join(raw_data_path, 'subject_faces')):
os.mkdir(os.path.join(raw_data_path, 'subject_faces'))
else:
shutil.rmtree(os.path.join(raw_data_path, 'subject_faces'))
os.mkdir(os.path.join(os.path.join(raw_data_path, 'subject_faces')))
gt_df = pd.read_csv(os.path.join(raw_data_path, 'training', 'training.csv'))
gt_df_g = gt_df.groupby('SUBJECT_ID')
# Collect face region images and create db, by subject ids.
db = pd.DataFrame(columns=['subject_id', 'face_file', 'w', 'h'])
for k in gt_df_g.groups.keys():
if k == -1: continue
df = gt_df_g.get_group(k)
for i in range(df.shape[0]):
file_name = df.iloc[i, 1]
# Load an image.
image = imread(os.path.join(raw_data_path, 'training', file_name))
# Check exception.
res = df.iloc[i, 3:] > 0
if res.all() == False:
continue
# Crop a face region.
l, t, r, b = (int(df.iloc[i, 3])
, int(df.iloc[i, 4])
, int((df.iloc[i, 3] + df.iloc[i, 5] - 1))
, int((df.iloc[i, 4] + df.iloc[i, 6] - 1)))
image = image[(t - 1):(b - 1), (l - 1):(r - 1), :]
# Adjust the original image size into the normalized image size according to the ratio of width, height.
w = image.shape[1]
h = image.shape[0]
pad_t, pad_b, pad_l, pad_r = 0, 0, 0, 0
if w >= h:
w_p = nn_arch['image_size']
h_p = int(h / w * nn_arch['image_size'])
pad = nn_arch['image_size'] - h_p
if pad % 2 == 0:
pad_t = pad // 2
pad_b = pad // 2
else:
pad_t = pad // 2
pad_b = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_NEAREST)
image = cv.copyMakeBorder(image, pad_t, pad_b, 0, 0, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
else:
h_p = nn_arch['image_size']
w_p = int(w / h * nn_arch['image_size'])
pad = nn_arch['image_size'] - w_p
if pad % 2 == 0:
pad_l = pad // 2
pad_r = pad // 2
else:
pad_l = pad // 2
pad_r = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_NEAREST)
image = cv.copyMakeBorder(image, 0, 0, pad_l, pad_r, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
# Write a face region image.
face_file_name = file_name[:-4] + '_' + str(k) + '_' \
+ str(int(df.iloc[i, 3])) + '_' + str(int(df.iloc[i, 4])) + file_name[-4:]
print('Save ' + face_file_name)
imsave(os.path.join(raw_data_path, 'subject_faces', face_file_name), (image).astype('uint8'))
# Add subject face information into db.
db = pd.concat([db, pd.DataFrame({'subject_id': [k]
, 'face_file': [face_file_name]
, 'w': [w]
, 'h': [h]})])
# Save db.
db.to_csv('subject_image_db.csv')
elif conf['resource_type'] == RESOURCE_TYPE_VGGFACE2:
raw_data_path = conf['raw_data_path']
nn_arch = conf['nn_arch']
# Collect face region images and create db, by subject ids.
pClient = ipp.Client()
pView = pClient[:]
pView.push({'raw_data_path': raw_data_path, 'nn_arch': nn_arch})
with pView.sync_imports():
import numpy as np
import pandas as pd
import cv2 as cv
from skimage.io import imread, imsave
if not os.path.isdir(os.path.join(raw_data_path, 'subject_faces_vggface2')):
os.mkdir(os.path.join(raw_data_path, 'subject_faces_vggface2'))
else:
shutil.rmtree(os.path.join(raw_data_path, 'subject_faces_vggface2'))
os.mkdir(os.path.join(os.path.join(raw_data_path, 'subject_faces_vggface2')))
df = pd.read_csv(os.path.join(raw_data_path, 'loose_bb_train.csv'))
db = pd.DataFrame(columns=['subject_id', 'face_file', 'w', 'h'])
dfs = [df.iloc[i] for i in range(df.shape[0])]
#dfs = [df.iloc[i] for i in range(100)]
res = pView.map_sync(save_extracted_face, dfs)
try:
res.remove(None)
except:
pass
db = pd.concat(res)
# Save db.
db.to_csv('subject_image_vggface2_db.csv')
else:
raise ValueError('resource type is not valid.')
def save_extracted_face(df):
global raw_data_path, nn_arch
import os
cv = cv2
pd = pandas
np = numpy
id_filename = df.iloc[0].split('/')
identity = id_filename[0]
file_name = id_filename[1] + '.jpg'
x = df.iloc[1]
y = df.iloc[2]
w = df.iloc[3]
h = df.iloc[4]
if x < 0 or y < 0 or w <=0 or h <=0:
return None
# Load an image.
image = imread(os.path.join(raw_data_path, 'train', identity, file_name))
# Get a face region.
image = image[y:(y + h), x:(x + w), :]
# Adjust the original image size into the normalized image size according to the ratio of width, height.
w = image.shape[1]
h = image.shape[0]
pad_t, pad_b, pad_l, pad_r = 0, 0, 0, 0
if w >= h:
w_p = nn_arch['image_size']
h_p = int(h / w * nn_arch['image_size'])
pad = nn_arch['image_size'] - h_p
if pad % 2 == 0:
pad_t = pad // 2
pad_b = pad // 2
else:
pad_t = pad // 2
pad_b = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_NEAREST)
image = cv.copyMakeBorder(image, pad_t, pad_b, 0, 0, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
else:
h_p = nn_arch['image_size']
w_p = int(w / h * nn_arch['image_size'])
pad = nn_arch['image_size'] - w_p
if pad % 2 == 0:
pad_l = pad // 2
pad_r = pad // 2
else:
pad_l = pad // 2
pad_r = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_NEAREST)
image = cv.copyMakeBorder(image, 0, 0, pad_l, pad_r, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
# Write a face region image.
face_file_name = identity + '_' + file_name
print('Save ' + face_file_name)
imsave(os.path.join(raw_data_path, 'subject_faces_vggface2', face_file_name), (image).astype('uint8'))
# Add subject face information into db.
return pd.DataFrame({'subject_id': [identity]
, 'face_file': [face_file_name]
, 'w': [w]
, 'h': [h]})
class FaceIdentifier(object):
"""Face identifier to use yolov3."""
# Constants.
MODEL_PATH = 'face_identifier.h5'
def __init__(self, conf):
"""
Parameters
----------
conf: dictionary
Face detector configuration dictionary.
"""
# Initialize.
self.conf = conf['fi_conf']
self.raw_data_path = self.conf['raw_data_path']
self.hps = self.conf['hps']
self.nn_arch = self.conf['nn_arch']
self.model_loading = self.conf['model_loading']
if self.model_loading:
if self.conf['multi_gpu']:
self.model = load_model(self.MODEL_PATH, custom_objects={'triplet_loss': triplet_loss})
self.parallel_model = multi_gpu_model(self.model, gpus=self.conf['num_gpus'])
opt = optimizers.Adam(lr=self.hps['lr']
, beta_1=self.hps['beta_1']
, beta_2=self.hps['beta_2']
, decay=self.hps['decay'])
self.parallel_model.compile(optimizer=opt, loss=triplet_loss)
else:
self.model = load_model(self.MODEL_PATH, custom_objects={'triplet_loss': triplet_loss})
else:
# Design the face identification model.
# Inputs.
input_a = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input_a')
input_p = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input_p')
input_n = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input_n')
# Load yolov3 as the base model.
base = self.YOLOV3Base
base.name = 'base'
# Get triplet facial ids.
xa = base(input_a) # Non-linear.
xa = Flatten()(xa)
c_dense_layer = Dense(self.nn_arch['dense1_dim'], activation='relu', name='dense1')
l2_norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=-1), name='l2_norm_layer')
xa = c_dense_layer(xa)
xa = l2_norm_layer(xa)
xp = base(input_p)
xp = Flatten()(xp)
xp = c_dense_layer(xp)
xp = l2_norm_layer(xp)
xn = base(input_n)
xn = Flatten()(xn)
xn = c_dense_layer(xn)
xn = l2_norm_layer(xn)
output = Concatenate(name='output')([xa, xp, xn]) #?
if self.conf['multi_gpu']:
self.model = Model(inputs=[input_a, input_p, input_n], outputs=[output])
opt = optimizers.Adam(lr=self.hps['lr']
, beta_1=self.hps['beta_1']
, beta_2=self.hps['beta_2']
, decay=self.hps['decay'])
self.model.compile(optimizer=opt, loss=triplet_loss)
self.model.summary()
self.parallel_model = multi_gpu_model(Model(inputs=[input_a, input_p, input_n], outputs=[output])
, gpus=self.conf['num_gpus'])
self.parallel_model.compile(optimizer=opt, loss=triplet_loss)
self.parallel_model.summary()
else:
self.model = Model(inputs=[input_a, input_p, input_n], outputs=[output])
opt = optimizers.Adam(lr=self.hps['lr']
, beta_1=self.hps['beta_1']
, beta_2=self.hps['beta_2']
, decay=self.hps['decay'])
self.model.compile(optimizer=opt, loss=triplet_loss)
self.model.summary()
# Create face detector.
self.fd = FaceDetector(conf['fd_conf'])
# Make fid extractor and face identifier.
self._make_fid_extractor()
def _make_fid_extractor(self):
"""Make facial id extractor."""
# Design the face identification model.
# Inputs.
input1 = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input1')
# Load yolov3 as the base model.
base = self.model.get_layer('base')
# Get facial id.
x = base(input1) # Non-linear.
x = Flatten()(x)
x = self.model.get_layer('dense1')(x)
x = self.model.get_layer('l2_norm_layer')(x)
facial_id = x
self.fid_extractor = Model(inputs=[input1], outputs=[facial_id])
@property
def YOLOV3Base(self):
"""Get yolov3 as a base model.
Returns
-------
Model of Keras
Partial yolo3 model from the input layer to the add_23 layer
"""
if self.conf['yolov3_base_model_load']:
base = load_model('yolov3_base.h5')
base.trainable = True
return base
yolov3 = make_yolov3_model()
# Load the weights.
weight_reader = WeightReader('yolov3.weights')
weight_reader.load_weights(yolov3)
# Make a base model.
input1 = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input1')
# 0 ~ 1.
conv_layer = yolov3.get_layer('conv_' + str(0))
x = ZeroPadding2D(1)(input1) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(0))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(1))
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 2 ~ 3.
for i in range(2, 4, 2):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
# 5.
conv_layer = yolov3.get_layer('conv_' + str(5))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(5))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 6 ~ 10.
for i in range(6, 10, 3):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
skip = x #?
# 12.
conv_layer = yolov3.get_layer('conv_' + str(12))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(12))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 13 ~ 35.
for i in range(13, 35, 3):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
skip = x #?
# 37.
conv_layer = yolov3.get_layer('conv_' + str(37))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(37))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 38 ~ 60.
for i in range(38, 60, 3):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
skip = x #?
# 62.
conv_layer = yolov3.get_layer('conv_' + str(62))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(62))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 63 ~ 73.
for i in range(63, 73, 3):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
skip = x #?
output = x
base = Model(inputs=[input1], outputs=[output])
base.trainable = True
base.save('yolov3_base.h5')
return base
def train(self):
"""Train face detector."""
if self.conf['resource_type'] == RESOURCE_TYPE_UCCS:
trGen = self.TrainingSequence(self.raw_data_path, self.hps, self.nn_arch, load_flag=False)
elif self.conf['resource_type'] == RESOURCE_TYPE_VGGFACE2:
trGen = self.TrainingSequenceVGGFace2(self.raw_data_path, self.hps, self.nn_arch, load_flag=False)
else:
raise ValueError('resource type is not valid.')
if self.conf['multi_gpu']:
self.parallel_model.fit_generator(trGen
, steps_per_epoch=self.hps['step'] #?
, epochs=self.hps['epochs']
, verbose=1
, max_queue_size=400
, workers=8
, use_multiprocessing=True)
else:
self.model.fit_generator(trGen
, steps_per_epoch=self.hps['step']
, epochs=self.hps['epochs']
, verbose=1
, max_queue_size=100
, workers=4
, use_multiprocessing=True)
print('Save the model.')
self.model.save(self.MODEL_PATH)
def make_facial_ids_db(self):
"""Make facial ids database."""
if self.conf['resource_type'] == RESOURCE_TYPE_UCCS:
db = pd.read_csv('subject_image_db.csv')
db = db.iloc[:, 1:]
db_g = db.groupby('subject_id')
with h5py.File('subject_facial_ids.h5', 'w') as f:
for subject_id in db_g.groups.keys():
if subject_id == -1:
continue
# Get face images of a subject id.
df = db_g.get_group(subject_id)
images = []
for ff in list(df.iloc[:, 1]):
image = imread(os.path.join(self.raw_data_path, 'subject_faces', ff))
images.append(image/255)
images = np.asarray(images)
# Calculate facial ids and an averaged facial id of a subject id. Mean, Mode, Median?
facial_ids = self.fid_extractor.predict(images)
for k, ff in enumerate(list(df.iloc[:, 1])):
f[ff] = facial_ids[k]
f[ff].attrs['subject_id'] = subject_id
elif self.conf['resource_type'] == RESOURCE_TYPE_VGGFACE2:
db = pd.read_csv('subject_image_vggface2_db.csv')
db = db.iloc[:, 1:]
db_g = db.groupby('subject_id')
with h5py.File('subject_facial_vggface2_ids.h5', 'w') as f:
for subject_id in db_g.groups.keys():
if subject_id == -1:
continue
# Get face images of a subject id.
df = db_g.get_group(subject_id)
images = []
for ff in list(df.iloc[:, 1]):
image = imread(os.path.join(self.raw_data_path, 'subject_faces_vggface2', ff)) #?
images.append(image/255)
images = np.asarray(images)
# Calculate facial ids and an averaged facial id of a subject id. Mean, Mode, Median?
facial_ids = self.fid_extractor.predict(images)
for k, ff in enumerate(list(df.iloc[:, 1])):
f[ff] = facial_ids[k]
f[ff].attrs['subject_id'] = subject_id
else:
raise ValueError('resource type is not valid.')
def register_facial_ids(self):
"""Register facial ids."""
if self.conf['resource_type'] == RESOURCE_TYPE_UCCS:
db = pd.read_csv('subject_image_db.csv')
db = db.iloc[:, 1:]
db_g = db.groupby('subject_id')
db_facial_id = pd.DataFrame(columns=['subject_id', 'facial_id'])
for subject_id in db_g.groups.keys():
if subject_id == -1:
continue
# Get face images of a subject id.
df = db_g.get_group(subject_id)
images = []
for ff in list(df.iloc[:, 1]):
image = imread(os.path.join(self.raw_data_path, 'subject_faces', ff))
images.append(image/255)
images = np.asarray(images)
# Calculate facial ids and an averaged facial id of a subject id. Mean, Mode, Median?
facial_ids = self.fid_extractor.predict(images)
facial_id = np.asarray(pd.DataFrame(facial_ids).mean())
db_facial_id = pd.concat([db_facial_id, pd.DataFrame({'subject_id': [subject_id]
, 'facial_id': [facial_id]})])
# Save db.
db_facial_id.index = db_facial_id.subject_id
db_facial_id = db_facial_id.to_dict()['facial_id']
with open('ref_facial_id_db.pickle', 'wb') as f:
pickle.dump(db_facial_id, f)
elif self.conf['resource_type'] == RESOURCE_TYPE_VGGFACE2:
"""Register facial ids."""
db = pd.read_csv('subject_image_vggface2_db.csv')
db = db.iloc[:, 1:]
db_g = db.groupby('subject_id')
db_facial_id = pd.DataFrame(columns=['subject_id', 'facial_id'])
for subject_id in db_g.groups.keys():
if subject_id == -1:
continue
# Get face images of a subject id.
df = db_g.get_group(subject_id)
images = []
for ff in list(df.iloc[:, 1]):
image = imread(os.path.join(self.raw_data_path, 'subject_faces_vggface2', ff))
images.append(image/255)
images = np.asarray(images)
# Calculate facial ids and an averaged facial id of a subject id. Mean, Mode, Median?
facial_ids = self.fid_extractor.predict(images)
facial_id = np.asarray(pd.DataFrame(facial_ids).mean())
db_facial_id = pd.concat([db_facial_id, pd.DataFrame({'subject_id': [subject_id]
, 'facial_id': [facial_id]})])
# Save db.
db_facial_id.index = db_facial_id.subject_id
db_facial_id = db_facial_id.to_dict()['facial_id']
with open('ref_facial_id_vggface2_db.pickle', 'wb') as f:
pickle.dump(db_facial_id, f)
def evaluate(self):
"""Evaluate."""
test_path = self.conf['test_path']
output_file_path = self.conf['output_file_path']
if not os.path.isdir(os.path.join(test_path, 'results_fi')):
os.mkdir(os.path.join(test_path, 'results_fi'))
else:
shutil.rmtree(os.path.join(test_path, 'results_fi'))
os.mkdir(os.path.join(test_path, 'results_fi'))
gt_df = pd.read_csv(os.path.join(test_path, 'validation.csv'))
gt_df_g = gt_df.groupby('FILE')
file_names = glob.glob(os.path.join(test_path, '*.jpg'))
with open('ref_facial_id_db.pickle', 'rb') as f:
db_facial_id = pickle.load(f)
# Get registered facial id data.
subject_ids = list(db_facial_id.keys())
facial_ids = []
for subject_id in subject_ids:
facial_ids.append(db_facial_id[subject_id])
reg_facial_ids = np.asarray(facial_ids)
# Detect faces, identify faces and save results.
count1 = 1
with open(output_file_path, 'w') as f:
for file_name in file_names:
if DEBUG: print(count1, '/', len(file_names), file_name)
count1 += 1
# Load an image.
image = imread(os.path.join(test_path, file_name))
image_o = image.copy()
image = image/255
# Adjust the original image size into the normalized image size according to the ratio of width, height.
w = image.shape[1]
h = image.shape[0]
pad_t, pad_b, pad_l, pad_r = 0, 0, 0, 0
if w >= h:
w_p = self.nn_arch['image_size']
h_p = int(h / w * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - h_p
if pad % 2 == 0:
pad_t = pad // 2
pad_b = pad // 2
else:
pad_t = pad // 2
pad_b = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, pad_t, pad_b, 0, 0, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
else:
h_p = self.nn_arch['image_size']
w_p = int(w / h * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - w_p
if pad % 2 == 0:
pad_l = pad // 2
pad_r = pad // 2
else:
pad_l = pad // 2
pad_r = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, 0, 0, pad_l, pad_r, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
image = image[np.newaxis, :]
# Detect faces.
boxes = self.fd.detect(image)
# correct the sizes of the bounding boxes
for box in boxes:
if w >= h:
box.xmin = np.min([box.xmin * w / self.nn_arch['image_size'], w])
box.xmax = np.min([box.xmax * w / self.nn_arch['image_size'], w])
box.ymin = np.min([np.max([box.ymin - pad_t, 0]) * w / self.nn_arch['image_size'], h])
box.ymax = np.min([np.max([box.ymax - pad_t, 0]) * w / self.nn_arch['image_size'], h])
else:
box.xmin = np.min([np.max([box.xmin - pad_l, 0]) * h / self.nn_arch['image_size'], w])
box.xmax = np.min([np.max([box.xmax - pad_l, 0]) * h / self.nn_arch['image_size'], w])
box.ymin = np.min([box.ymin * h / self.nn_arch['image_size'], h])
box.ymax = np.min([box.ymax * h / self.nn_arch['image_size'], h])
count = 1
for box in boxes:
if count > 60:
break
# Search for id from registered facial ids.
# Crop a face region.
l, t, r, b = int(box.xmin), int(box.ymin), int(box.xmax), int(box.ymax)
image = image_o[(t - 1):(b - 1), (l - 1):(r - 1), :]
image = image/255
# Adjust the original image size into the normalized image size according to the ratio of width, height.
w = image.shape[1]
h = image.shape[0]
pad_t, pad_b, pad_l, pad_r = 0, 0, 0, 0
# Check exception.
if w == 0 or h == 0:
continue
if w >= h:
w_p = self.nn_arch['image_size']
h_p = int(h / w * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - h_p
if pad % 2 == 0:
pad_t = pad // 2
pad_b = pad // 2
else:
pad_t = pad // 2
pad_b = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, pad_t, pad_b, 0, 0, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
else:
h_p = self.nn_arch['image_size']
w_p = int(w / h * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - w_p
if pad % 2 == 0:
pad_l = pad // 2
pad_r = pad // 2
else:
pad_l = pad // 2
pad_r = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, 0, 0, pad_l, pad_r, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
# Create anchor facial ids.
anchor_facial_id = self.fid_extractor.predict(image[np.newaxis, ...])
anchor_facial_id = np.squeeze(anchor_facial_id)
# Calculate similarity distances for each registered face ids.
sim_dists = []
for i in range(len(subject_ids)):
sim_dists.append(norm(anchor_facial_id - reg_facial_ids[i]))
sim_dists = np.asarray(sim_dists)
cand = np.argmin(sim_dists)
if sim_dists[cand] > self.hps['sim_th']:
continue
subject_id = subject_ids[cand]
box.subject_id = subject_id
if platform.system() == 'Windows':
f.write(file_name.split('\\')[-1] + ',' + str(subject_id) + ',' + str(box.xmin) + ',' + str(box.ymin) + ',')
print(file_name.split('\\')[-1] + ',' + str(subject_id) + ',' + str(box.xmin) + ',' + str(box.ymin) + ',', end=' ')
else:
f.write(file_name.split('/')[-1] + ',' + str(subject_id) + ',' + str(box.xmin) + ',' + str(box.ymin) + ',')
print(file_name.split('/')[-1] + ',' + str(subject_id) + ',' + str(box.xmin) + ',' + str(box.ymin) + ',', end=' ')
f.write(str(box.xmax - box.xmin) + ',' + str(box.ymax - box.ymin) + ',' + str(box.get_score()) + '\n')
print(str(box.xmax - box.xmin) + ',' + str(box.ymax - box.ymin) + ',' + str(box.get_score()))
count +=1
#boxes = [box for box in boxes if box.subject_id != -1]
# Draw bounding boxes of ground truth.
if platform.system() == 'Windows':
file_new_name = file_name.split('\\')[-1]
else:
file_new_name = file_name.split('/')[-1]
try:
df = gt_df_g.get_group(file_new_name)
except KeyError:
continue
gt_boxes = []
for i in range(df.shape[0]):
# Check exception.
res = df.iloc[i, 3:] > 0 #?
if res.all() == False: #or df.iloc[i, 2] == -1:
continue
xmin = int(df.iloc[i, 3])
xmax = int(xmin + df.iloc[i, 5] - 1)
ymin = int(df.iloc[i, 4])
ymax = int(ymin + df.iloc[i, 6] - 1)
gt_box = BoundBox(xmin, ymin, xmax, ymax, objness=1., classes=[1.0], subject_id=df.iloc[i, 2])
gt_boxes.append(gt_box)
# Check exception.
if len(gt_boxes) == 0 or len(boxes) == 0: #?
continue
image1 = draw_boxes_v3(image_o, gt_boxes, self.hps['face_conf_th'], color=(255, 0, 0))
del image_o
# Draw bounding boxes on the image using labels.
image = draw_boxes_v3(image1, boxes, self.hps['face_conf_th'], color=(0, 255, 0))
del image1
# Write the image with bounding boxes to file.
# Draw bounding boxes of ground truth.
if platform.system() == 'Windows':
file_new_name = file_name.split('\\')[-1]
else:
file_new_name = file_name.split('/')[-1]
file_new_name = file_new_name[:-4] + '_detected' + file_new_name[-4:]
print(file_new_name)
imsave(os.path.join(test_path, 'results_fi', file_new_name), (image).astype('uint8'))
def test(self):
"""Test."""
test_path = self.conf['test_path']
output_file_path = self.conf['output_file_path']
file_names = glob.glob(os.path.join(test_path, '*.jpg'))
with open('ref_facial_id_db.pickle', 'rb') as f:
db_facial_id = pickle.load(f)
# Get registered facial id data.
subject_ids = list(db_facial_id.keys())
facial_ids = []
for subject_id in subject_ids:
facial_ids.append(db_facial_id[subject_id])
reg_facial_ids = np.asarray(facial_ids)
# Detect faces, identify faces and save results.
count1 = 1
with open(output_file_path, 'w') as f:
for file_name in file_names:
if DEBUG: print(count1, '/', len(file_names), file_name)
count1 += 1
# Load an image.
image = imread(os.path.join(test_path, file_name))
image_o = image.copy()
image = image/255
# Adjust the original image size into the normalized image size according to the ratio of width, height.
w = image.shape[1]
h = image.shape[0]
pad_t, pad_b, pad_l, pad_r = 0, 0, 0, 0
if w >= h:
w_p = self.nn_arch['image_size']
h_p = int(h / w * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - h_p
if pad % 2 == 0:
pad_t = pad // 2
pad_b = pad // 2
else:
pad_t = pad // 2
pad_b = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, pad_t, pad_b, 0, 0, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
else:
h_p = self.nn_arch['image_size']
w_p = int(w / h * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - w_p
if pad % 2 == 0:
pad_l = pad // 2
pad_r = pad // 2
else:
pad_l = pad // 2
pad_r = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, 0, 0, pad_l, pad_r, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
image = image[np.newaxis, :]
# Detect faces.
boxes = self.fd.detect(image)
# correct the sizes of the bounding boxes
for box in boxes:
if w >= h:
box.xmin = np.min([box.xmin * w / self.nn_arch['image_size'], w])
box.xmax = np.min([box.xmax * w / self.nn_arch['image_size'], w])
box.ymin = np.min([np.max([box.ymin - pad_t, 0]) * w / self.nn_arch['image_size'], h])
box.ymax = np.min([np.max([box.ymax - pad_t, 0]) * w / self.nn_arch['image_size'], h])
else:
box.xmin = np.min([np.max([box.xmin - pad_l, 0]) * h / self.nn_arch['image_size'], w])
box.xmax = np.min([np.max([box.xmax - pad_l, 0]) * h / self.nn_arch['image_size'], w])
box.ymin = np.min([box.ymin * h / self.nn_arch['image_size'], h])
box.ymax = np.min([box.ymax * h / self.nn_arch['image_size'], h])
count = 1
for box in boxes:
if count > 60:
break
# Search for id from registered facial ids.
# Crop a face region.
l, t, r, b = int(box.xmin), int(box.ymin), int(box.xmax), int(box.ymax)
image = image_o[(t - 1):(b - 1), (l - 1):(r - 1), :]
image = image/255
# Adjust the original image size into the normalized image size according to the ratio of width, height.
w = image.shape[1]
h = image.shape[0]
pad_t, pad_b, pad_l, pad_r = 0, 0, 0, 0
# Check exception.
if w == 0 or h == 0:
continue
if w >= h:
w_p = self.nn_arch['image_size']
h_p = int(h / w * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - h_p
if pad % 2 == 0:
pad_t = pad // 2
pad_b = pad // 2
else:
pad_t = pad // 2
pad_b = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, pad_t, pad_b, 0, 0, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
else:
h_p = self.nn_arch['image_size']
w_p = int(w / h * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - w_p
if pad % 2 == 0:
pad_l = pad // 2
pad_r = pad // 2
else:
pad_l = pad // 2
pad_r = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, 0, 0, pad_l, pad_r, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
# Create anchor facial ids.
anchor_facial_id = self.fid_extractor.predict(image[np.newaxis, ...])
anchor_facial_id = np.squeeze(anchor_facial_id)
anchor_facial_ids = np.asarray([anchor_facial_id for _ in range(len(subject_ids))])
# Calculate similarity distances for each registered face ids.
sim_dists = []
for i in range(len(subject_ids)):
sim_dists.append(norm(anchor_facial_ids[i] - reg_facial_ids[i]))
sim_dists = np.asarray(sim_dists)
cand = np.argmin(sim_dists)
if sim_dists[cand] > self.hps['sim_th']:
continue
subject_id = subject_ids[cand]
if platform.system() == 'Windows':
f.write(file_name.split('\\')[-1] + ',' + str(subject_id) + ',' + str(box.xmin) + ',' + str(box.ymin) + ',')
else:
f.write(file_name.split('/')[-1] + ',' + str(subject_id) + ',' + str(box.xmin) + ',' + str(box.ymin) + ',')
f.write(str(box.xmax - box.xmin) + ',' + str(box.ymax - box.ymin) + ',' + str(box.get_score()) + '\n')
count +=1
# Check exception.
if len(boxes) == 0:
continue
def create_face_reconst_model(self):
"""Create the face reconstruction model."""
if hasattr(self, 'model') != True or isinstance(self.model, Model) != True:
raise ValueError('A valid model instance doesn\'t exist.')
if self.conf['face_vijana_recon_load']:
self.recon_model = load_model('face_vijnana_recon.h5')
return
# Get all layers and extract input layers and output layers.
layers = self.model.layers
input_layers = [layer for layer in layers if isinstance(layer, InputLayer) == True]
output_layer_names = [t.name.split('/')[0] for t in self.model.outputs]
output_layers = [layer for layer in layers if layer.name in output_layer_names]
# Input.
input1 = Input(shape=(int(output_layers[0].output_shape[1]/3), ), name='input1')
x = Lambda(lambda x: K.l2_normalize(x, axis=-1), name='l2_norm_layer')(input1) #?
x = ReLU()(x)
dense_layer = Dense(self.model.get_layer('dense1').input_shape[1]
, activation='linear'
, name='dense1')
x = dense_layer(x)
dense_layer.set_weights((self.model.get_layer('dense1').get_weights()[0].T
, np.random.rand(self.model.get_layer('dense1').get_weights()[0].shape[0])))
# Yolov3.
yolov3 = self.model.get_layer('base')
x = Reshape(yolov3.output_shape[1:])(x)
skip = x #?
# 73 ~ 63.
for i in range(73, 63, -3):
conv_layer = yolov3.get_layer('conv_' + str(i))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
conv_layer = yolov3.get_layer('conv_' + str(i - 1))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i - 1))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
x = subtract([x, skip]) #?
skip = x #?
# 62.
conv_layer = yolov3.get_layer('conv_' + str(62))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, strides=conv_layer.strides
, padding='same'
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(62))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
skip = x
# 60 ~ 38.
for i in range(60, 38, -3):
conv_layer = yolov3.get_layer('conv_' + str(i))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
conv_layer = yolov3.get_layer('conv_' + str(i - 1))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i - 1))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
x = subtract([x, skip]) #?
skip = x #??
# 37.
conv_layer = yolov3.get_layer('conv_' + str(37))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, strides=conv_layer.strides
, padding='same'
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(37))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
skip = x
# 35 ~ 13.
for i in range(35, 13, -3):
conv_layer = yolov3.get_layer('conv_' + str(i))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
conv_layer = yolov3.get_layer('conv_' + str(i - 1))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i - 1))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
x = subtract([x, skip]) #?
skip = x #?
# 12.
conv_layer = yolov3.get_layer('conv_' + str(12))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, strides=conv_layer.strides
, padding='same'
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(12))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
skip = x
# 10 ~ 6.
for i in range(10, 6, -3):
conv_layer = yolov3.get_layer('conv_' + str(i))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
conv_layer = yolov3.get_layer('conv_' + str(i - 1))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i - 1))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
x = subtract([x, skip]) #?
skip = x #?
# 5.
conv_layer = yolov3.get_layer('conv_' + str(5))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, strides=conv_layer.strides
, padding='same'
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(5))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
skip = x
# 4 ~ 2.
for i in range(3, 1, -2):
conv_layer = yolov3.get_layer('conv_' + str(i))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
conv_layer = yolov3.get_layer('conv_' + str(i - 1))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i - 1))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
x = subtract([x, skip]) #?
skip = x #?
# 1 ~ 0.
conv_layer = yolov3.get_layer('conv_' + str(1))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, strides=conv_layer.strides
, padding='same'
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(1))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
conv_layer = yolov3.get_layer('conv_' + str(0))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same'
, use_bias=False
, name='output') #?
norm_layer = yolov3.get_layer('bnorm_' + str(0))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
output = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
self.recon_model = Model(inputs=[input1], outputs=[output])
self.recon_model.trainable = True
self.recon_model.save('face_vijnana_recon.h5')
class TrainingSequence(Sequence):
"""Training data set sequence."""
def __init__(self, raw_data_path, hps, nn_arch, load_flag=True):
if load_flag:
with open('img_triplet_pairs.pickle', 'rb') as f:
self.img_triplet_pairs = pickle.load(f)
self.img_triplet_pairs = self.img_triplet_pairs
# Create indexing data of positive and negative cases.
self.raw_data_path = raw_data_path
self.hps = hps
self.nn_arch = nn_arch
self.db = pd.read_csv('subject_image_db.csv')
self.db = self.db.iloc[:, 1:]
self.batch_size = self.hps['batch_size']
self.hps['step'] = len(self.img_triplet_pairs) // self.batch_size
if len(self.img_triplet_pairs) % self.batch_size != 0:
self.hps['step'] +=1
else:
# Create indexing data of positive and negative cases.
self.raw_data_path = raw_data_path
self.hps = hps
self.db = pd.read_csv('subject_image_db.csv')
self.db = self.db.iloc[:, 1:]
self.t_indexes = np.asarray(self.db.index)
self.db_g = self.db.groupby('subject_id')
self.img_triplet_pairs = []
valid_indexes = self.t_indexes
for i in self.db_g.groups.keys():
df = self.db_g.get_group(i)
ex_indexes2 = np.asarray(df.index)
ex_inv_idxes = []
for v in valid_indexes:
if (ex_indexes2 == v).any():
ex_inv_idxes.append(False)
else:
ex_inv_idxes.append(True)
ex_inv_idxes =
|
np.asarray(ex_inv_idxes)
|
numpy.asarray
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace as dc
import numpy as np
@dc.program
def indirection_scalar(A: dc.float32[10]):
i = 0
return A[i]
def test_indirection_scalar():
A = np.random.randn(10).astype(np.float32)
res = indirection_scalar(A)[0]
assert (res == A[0])
@dc.program
def indirection_scalar_assign(A: dc.float32[10]):
i = 2
A[i] = 5
return A[i]
def test_indirection_scalar_assign():
A = np.random.randn(10).astype(np.float32)
res = indirection_scalar_assign(A)[0]
assert (res == 5)
@dc.program
def indirection_scalar_augassign(A: dc.float32[10]):
i = 2
j = 3
A[i] += A[j]
return A[i]
def test_indirection_scalar_augassign():
A = np.random.randn(10).astype(np.float32)
res = indirection_scalar_augassign(np.copy(A))[0]
assert (np.allclose(res, A[2] + A[3]))
@dc.program
def indirection_scalar_nsdfg(A: dc.float32[10], x: dc.int32[10]):
B = np.empty_like(A)
# TODO: This doesn't work with 0:A.shape[0]
for i in dc.map[0:10]:
a = x[i]
B[i] = A[a]
return B
def test_indirection_scalar_nsdfg():
A = np.random.randn(10).astype(np.float32)
x = np.random.randint(0, 10, size=(10,), dtype=np.int32)
res = indirection_scalar_nsdfg(A, x)
assert (np.allclose(res, A[x]))
@dc.program
def indirection_scalar_assign_nsdfg(A: dc.float32[10], x: dc.int32[10]):
B = np.empty_like(A)
# TODO: This doesn't work with 0:A.shape[0]
for i in dc.map[0:10]:
a = x[i]
B[a] = A[a]
return B
def test_indirection_scalar_assign_nsdfg():
A = np.random.randn(10).astype(np.float32)
x = np.random.randint(0, 10, size=(10,), dtype=np.int32)
res = indirection_scalar_assign_nsdfg(A, x)
assert (np.allclose(res[x], A[x]))
@dc.program
def indirection_scalar_augassign_nsdfg(A: dc.float32[10], x: dc.int32[10]):
B = np.full_like(A, 5)
# TODO: This doesn't work with 0:A.shape[0]
for i in dc.map[0:10]:
a = x[i]
B[a] += A[a]
return B
def test_indirection_scalar_augassign_nsdfg():
A = np.random.randn(10).astype(np.float32)
x =
|
np.random.randint(0, 10, size=(10,), dtype=np.int32)
|
numpy.random.randint
|
# -*- coding: utf-8 -*-
"""Copyright 2019 DScribe developers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import unittest
import numpy as np
import sparse
import scipy.linalg
from dscribe.descriptors import ACSF
from testbaseclass import TestBaseClass
from ase import Atoms
from ase.build import molecule
from ase.build import bulk
H2O = Atoms(
cell=[[15.0, 0.0, 0.0], [0.0, 15.0, 0.0], [0.0, 0.0, 15.0]],
positions=[
[0, 0, 0],
[0.95, 0, 0],
[
0.95 * (1 + math.cos(76 / 180 * math.pi)),
0.95 * math.sin(76 / 180 * math.pi),
0.0,
],
],
symbols=["H", "O", "H"],
)
H = Atoms(
cell=[[15.0, 0.0, 0.0], [0.0, 15.0, 0.0], [0.0, 0.0, 15.0]],
positions=[
[0, 0, 0],
],
symbols=["H"],
)
default_desc = ACSF(
rcut=6.0,
species=[1, 8],
g2_params=[[1, 2], [4, 5]],
g3_params=[1, 2, 3, 4],
g4_params=[[1, 2, 3], [3, 1, 4], [4, 5, 6], [7, 8, 9]],
g5_params=[[1, 2, 3], [3, 1, 4], [4, 5, 6], [7, 8, 9]],
)
def cutoff(R, rcut):
return 0.5 * (np.cos(np.pi * R / rcut) + 1)
class ACSFTests(TestBaseClass, unittest.TestCase):
def test_exceptions(self):
"""Tests different invalid parameters that should raise an
exception.
"""
# Invalid species
with self.assertRaises(ValueError):
ACSF(rcut=6.0, species=None)
# Invalid bond_params
with self.assertRaises(ValueError):
ACSF(rcut=6.0, species=[1, 6, 8], g2_params=[1, 2, 3])
# Invalid bond_cos_params
with self.assertRaises(ValueError):
ACSF(rcut=6.0, species=[1, 6, 8], g3_params=[[1, 2], [3, 1]])
# Invalid bond_cos_params
with self.assertRaises(ValueError):
ACSF(rcut=6.0, species=[1, 6, 8], g3_params=[[1, 2, 4], [3, 1]])
# Invalid ang4_params
with self.assertRaises(ValueError):
ACSF(rcut=6.0, species=[1, 6, 8], g4_params=[[1, 2], [3, 1]])
# Invalid ang5_params
with self.assertRaises(ValueError):
ACSF(rcut=6.0, species=[1, 6, 8], g5_params=[[1, 2], [3, 1]])
def test_properties(self):
"""Used to test that changing the setup through properties works as
intended.
"""
# Test changing species
a = ACSF(
rcut=6.0,
species=[1, 8],
g2_params=[[1, 2]],
sparse=False,
)
nfeat1 = a.get_number_of_features()
vec1 = a.create(H2O)
a.species = ["C", "H", "O"]
nfeat2 = a.get_number_of_features()
vec2 = a.create(molecule("CH3OH"))
self.assertTrue(nfeat1 != nfeat2)
self.assertTrue(vec1.shape[1] != vec2.shape[1])
def test_number_of_features(self):
"""Tests that the reported number of features is correct."""
species = [1, 8]
n_elem = len(species)
desc = ACSF(rcut=6.0, species=species)
n_features = desc.get_number_of_features()
self.assertEqual(n_features, n_elem)
desc = ACSF(rcut=6.0, species=species, g2_params=[[1, 2], [4, 5]])
n_features = desc.get_number_of_features()
self.assertEqual(n_features, n_elem * (2 + 1))
desc = ACSF(rcut=6.0, species=[1, 8], g3_params=[1, 2, 3, 4])
n_features = desc.get_number_of_features()
self.assertEqual(n_features, n_elem * (4 + 1))
desc = ACSF(
rcut=6.0,
species=[1, 8],
g4_params=[[1, 2, 3], [3, 1, 4], [4, 5, 6], [7, 8, 9]],
)
n_features = desc.get_number_of_features()
self.assertEqual(n_features, n_elem + 4 * 3)
desc = ACSF(
rcut=6.0,
species=[1, 8],
g2_params=[[1, 2], [4, 5]],
g3_params=[1, 2, 3, 4],
g4_params=[[1, 2, 3], [3, 1, 4], [4, 5, 6], [7, 8, 9]],
)
n_features = desc.get_number_of_features()
self.assertEqual(n_features, n_elem * (1 + 2 + 4) + 4 * 3)
def test_sparse(self):
"""Tests the sparse matrix creation."""
# Sparse
default_desc._sparse = True
vec = default_desc.create(H2O)
self.assertTrue(type(vec) == sparse.COO)
# Dense
default_desc._sparse = False
vec = default_desc.create(H2O)
self.assertTrue(type(vec) == np.ndarray)
def test_parallel_dense(self):
"""Tests creating dense output parallelly."""
samples = [molecule("CO"), molecule("NO")]
desc = ACSF(
rcut=6.0,
species=[6, 7, 8],
g2_params=[[1, 2], [4, 5]],
g3_params=[1, 2, 3, 4],
g4_params=[[1, 2, 3], [3, 1, 4], [4, 5, 6], [7, 8, 9]],
g5_params=[[1, 2, 3], [3, 1, 4], [4, 5, 6], [7, 8, 9]],
)
n_features = desc.get_number_of_features()
# Determining number of jobs based on the amount of CPUs
desc.create(system=samples, n_jobs=-1, only_physical_cores=False)
desc.create(system=samples, n_jobs=-1, only_physical_cores=True)
# Multiple systems, serial job, fixed size
output = desc.create(
system=samples,
positions=[[0, 1], [0, 1]],
n_jobs=1,
)
assumed = np.empty((2, 2, n_features))
assumed[0, 0] = desc.create(samples[0], [0])
assumed[0, 1] = desc.create(samples[0], [1])
assumed[1, 0] = desc.create(samples[1], [0])
assumed[1, 1] = desc.create(samples[1], [1])
self.assertTrue(
|
np.allclose(output, assumed)
|
numpy.allclose
|
##############################################################
# COCO dataset Info Loader. Clear unvalid items.
##############################################################
import os
import numpy as np
import scipy.sparse
import cv2
import copy
import math
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
def annToMask(segm, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
def _annToRLE(segm, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = mask_util.frPyObjects(segm, height, width)
rle = mask_util.merge(rles)
elif isinstance(segm['counts'], list):
# uncompressed RLE
rle = mask_util.frPyObjects(segm, height, width)
else:
# rle
rle = segm
return rle
rle = _annToRLE(segm, height, width)
mask = mask_util.decode(rle)
return mask
class CocoDatasetInfo():
def __init__(self, ImageRoot, AnnoFile, onlyperson=False, loadimg=True):
''' **Just** loading coco dataset, with necessary pre-process:
1. obj['segmentation'] polygons should have >= 3 points, so require >= 6 coordinates
2. obj['area'] should >= GT_MIN_AREA
3. ignore objs with obj['ignore']==1
4. IOU(bbox, img) should > 0, Area(bbox) should > 0
Attributes:
self.category_to_id_map
self.classes
self.num_classes : 81
self.json_category_id_to_contiguous_id
self.contiguous_category_id_to_json_id
self.image_ids : <class 'list'>
self.keypoints
self.keypoint_flip_map
self.keypoints_to_id_map
self.num_keypoints : 17
Tools:
rawdata = self.flip_rawdata(rawdata)
'''
self.GT_MIN_AREA = 0
self.loadimg = loadimg
self.imgroot = ImageRoot
self.COCO = COCO(AnnoFile)
# Set up dataset classes
if onlyperson:
self.category_ids = [1]
else:
self.category_ids = self.COCO.getCatIds()
categories = [c['name'] for c in self.COCO.loadCats(self.category_ids)]
self.category_to_id_map = dict(zip(categories, self.category_ids))
self.classes = ['__background__'] + categories
self.num_classes = len(self.classes)
self.json_category_id_to_contiguous_id = {
v: i + 1
for i, v in enumerate(self.category_ids)
}
self.contiguous_category_id_to_json_id = {
v: k
for k, v in self.json_category_id_to_contiguous_id.items()
}
# self.__len__() reference to self.image_ids
self.image_ids = self.COCO.getImgIds(catIds=self.category_ids)
self.image_ids.sort()
# self.image_ids = self.image_ids[0:200] # for debug.
# self.image_ids = [9,9,9,9,9]
# Initialize COCO keypoint information.
self.keypoints = None
self.keypoint_flip_map = None
self.keypoints_to_id_map = None
self.num_keypoints = 0
# Thus far only the 'person' category has keypoints
if 'person' in self.category_to_id_map:
cat_info = self.COCO.loadCats([self.category_to_id_map['person']])
# Check if the annotations contain keypoint data or not
if 'keypoints' in cat_info[0]:
keypoints = cat_info[0]['keypoints']
self.keypoints_to_id_map = dict(
zip(keypoints, range(len(keypoints))))
self.keypoints = keypoints
self.num_keypoints = len(keypoints)
self.keypoint_flip_map = {
'left_eye': 'right_eye',
'left_ear': 'right_ear',
'left_shoulder': 'right_shoulder',
'left_elbow': 'right_elbow',
'left_wrist': 'right_wrist',
'left_hip': 'right_hip',
'left_knee': 'right_knee',
'left_ankle': 'right_ankle'}
self.roidb = None # Pre-load
def __len__(self):
return len(self.image_ids)
def __getitem__(self, idx):
if self.roidb is None:
return self.getitem(idx)
else:
return self.roidb[idx]
def getitem(self, idx):
''' **Just** loading coco dataset, with necessary pre-process:
1. obj['segmentation'] polygons should have >= 3 points, so require >= 6 coordinates
2. obj['area'] should >= GT_MIN_AREA
3. ignore objs with obj['ignore']==1
4. IOU(bbox, img) should > 0, Area(bbox) should > 0
Return:
rawdata {
dataset': self,
'id': image_id,
'image': os.path.join(self.imgroot, datainfo['file_name']),
'width': datainfo['width'],
'height': datainfo['height'],
'flipped': False,
'has_visible_keypoints': False/True,
'boxes': np.empty((GtN, 4), dtype=np.float32),
'segms': [GtN,],
'gt_classes': np.empty((GtN), dtype=np.int32),
'seg_areas': np.empty((GtN), dtype=np.float32),
'gt_overlaps': scipy.sparse.csr_matrix(
np.empty((GtN, 81), dtype=np.float32)
),
'is_crowd': np.empty((GtN), dtype=np.bool),
'box_to_gt_ind_map': np.empty((GtN), dtype=np.int32)
if self.keypoints is not None:
'gt_keypoints': np.empty((GtN, 3, self.num_keypoints), dtype=np.int32)
}
'''
# ---------------------------
# _prep_roidb_entry()
# ---------------------------
image_id = self.image_ids[idx]
datainfo = self.COCO.loadImgs(image_id)[0]
rawdata = {
#'dataset': self,
#'flickr_url': datainfo['flickr_url'],
'id': image_id,
#'coco_url': datainfo['coco_url'],
'image': os.path.join(self.imgroot, datainfo['file_name']),
'data': cv2.imread(os.path.join(self.imgroot, datainfo['file_name'])) if self.loadimg else None,
'width': datainfo['width'],
'height': datainfo['height'],
'flipped': False,
'has_visible_keypoints': False,
'boxes': np.empty((0, 4), dtype=np.float32),
'segms': [],
'gt_classes': np.empty((0), dtype=np.int32),
'seg_areas': np.empty((0), dtype=np.float32),
'gt_overlaps': scipy.sparse.csr_matrix(
np.empty((0, self.num_classes), dtype=np.float32)
),
'is_crowd': np.empty((0), dtype=np.bool),
# 'box_to_gt_ind_map': Shape is (#rois). Maps from each roi to the index
# in the list of rois that satisfy np.where(entry['gt_classes'] > 0)
'box_to_gt_ind_map': np.empty((0), dtype=np.int32),
# The only difference between gt_classes v.s. max_classes is about 'crowd' objs.
'max_classes': np.empty((0), dtype=np.int32),
'max_overlaps': np.empty((0), dtype=np.float32),
}
if self.keypoints is not None:
rawdata['gt_keypoints'] = np.empty((0, 3, self.num_keypoints), dtype=np.int32)
# ---------------------------
# _add_gt_annotations()
# ---------------------------
# Include ground-truth object annotations
"""Add ground truth annotation metadata to an roidb entry."""
ann_ids = self.COCO.getAnnIds(imgIds=rawdata['id'], catIds=self.category_ids, iscrowd=None)
objs = self.COCO.loadAnns(ann_ids)
# Sanitize bboxes -- some are invalid
valid_objs = []
valid_segms = []
width = rawdata['width']
height = rawdata['height']
for obj in objs:
# crowd regions are RLE encoded and stored as dicts
if isinstance(obj['segmentation'], list):
# Valid polygons have >= 3 points, so require >= 6 coordinates
obj['segmentation'] = [
p for p in obj['segmentation'] if len(p) >= 6
]
if obj['area'] < self.GT_MIN_AREA:
continue
if 'ignore' in obj and obj['ignore'] == 1:
continue
# Convert form (x1, y1, w, h) to (x1, y1, x2, y2)
x1, y1, bboxw, bboxh = obj['bbox']
x1, y1, x2, y2 = [x1, y1, x1 + bboxw - 1, y1 + bboxh - 1] # Note: -1 for h and w
x1 = min(width - 1., max(0., x1))
y1 = min(height - 1., max(0., y1))
x2 = min(width - 1., max(0., x2))
y2 = min(height - 1., max(0., y2))
# Require non-zero seg area and more than 1x1 box size
# print(obj['area'])
# print(str([x1,x2,y1,y2]))
if obj['area'] > 0 and x2 > x1 and y2 > y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
valid_segms.append(obj['segmentation'])
num_valid_objs = len(valid_objs)
if num_valid_objs==0: ## is_valid
# print ('ignore %d'%idx)
return self.getitem(idx+1)
boxes = np.zeros((num_valid_objs, 4), dtype=rawdata['boxes'].dtype)
gt_classes = np.zeros((num_valid_objs), dtype=rawdata['gt_classes'].dtype)
gt_overlaps = np.zeros(
(num_valid_objs, self.num_classes),
dtype=rawdata['gt_overlaps'].dtype
)
seg_areas = np.zeros((num_valid_objs), dtype=rawdata['seg_areas'].dtype)
is_crowd = np.zeros((num_valid_objs), dtype=rawdata['is_crowd'].dtype)
box_to_gt_ind_map = np.zeros(
(num_valid_objs), dtype=rawdata['box_to_gt_ind_map'].dtype
)
if self.keypoints is not None:
gt_keypoints = np.zeros(
(num_valid_objs, 3, self.num_keypoints),
dtype=rawdata['gt_keypoints'].dtype
)
im_has_visible_keypoints = False
for ix, obj in enumerate(valid_objs):
cls = self.json_category_id_to_contiguous_id[obj['category_id']]
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
seg_areas[ix] = obj['area']
is_crowd[ix] = obj['iscrowd']
box_to_gt_ind_map[ix] = ix
if self.keypoints is not None:
gt_keypoints[ix, :, :] = self._get_gt_keypoints(obj)
if np.sum(gt_keypoints[ix, 2, :]) > 0:
im_has_visible_keypoints = True
if obj['iscrowd']:
# Set overlap to -1 for all classes for crowd objects
# so they will be excluded during training
gt_overlaps[ix, :] = -1.0
else:
gt_overlaps[ix, cls] = 1.0
rawdata['boxes'] = np.append(rawdata['boxes'], boxes, axis=0)
rawdata['segms'].extend(valid_segms)
# To match the original implementation:
# rawdata['boxes'] = np.append(
# rawdata['boxes'], boxes.astype(np.int).astype(np.float), axis=0)
rawdata['gt_classes'] = np.append(rawdata['gt_classes'], gt_classes)
rawdata['seg_areas'] = np.append(rawdata['seg_areas'], seg_areas)
rawdata['gt_overlaps'] = np.append(
rawdata['gt_overlaps'].toarray(), gt_overlaps, axis=0
)
rawdata['gt_overlaps'] = scipy.sparse.csr_matrix(rawdata['gt_overlaps'])
rawdata['is_crowd'] = np.append(rawdata['is_crowd'], is_crowd)
rawdata['box_to_gt_ind_map'] = np.append(
rawdata['box_to_gt_ind_map'], box_to_gt_ind_map
)
if self.keypoints is not None:
rawdata['gt_keypoints'] = np.append(
rawdata['gt_keypoints'], gt_keypoints, axis=0
)
rawdata['has_visible_keypoints'] = im_has_visible_keypoints
'''
The only difference between gt_classes v.s. max_classes is about 'crowd' objs.
In max_classes, crowd objs are signed as bg.
bg cls1 cls2 cls3 | gt_classes | max_overlaps | max_classes
obj1 0.0 1.0 0.0 0.0 | 1(cls1) | 1.0 | 1(cls1)
obj2(crowd) -1.0 -1.0 -1.0 -1.0 | 3(cls3) | -1.0 | 0(bg)
ibj3 0.0 0.0 1.0 0.0 | 2(cls2) | 1.0 | 2(cls2)
'''
gt_overlaps = rawdata['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
rawdata['max_classes'] = max_classes
rawdata['max_overlaps'] = max_overlaps
return rawdata
def _get_gt_keypoints(self, obj):
"""Return ground truth keypoints."""
if 'keypoints' not in obj:
return None
kp = np.array(obj['keypoints'])
x = kp[0::3] # 0-indexed x coordinates
y = kp[1::3] # 0-indexed y coordinates
# 0: not labeled; 1: labeled, not inside mask;
# 2: labeled and inside mask
v = kp[2::3]
num_keypoints = len(obj['keypoints']) / 3
assert num_keypoints == self.num_keypoints
gt_kps = np.ones((3, self.num_keypoints), dtype=np.int32)
for i in range(self.num_keypoints):
gt_kps[0, i] = x[i]
gt_kps[1, i] = y[i]
gt_kps[2, i] = v[i]
return gt_kps
def transform_rawdata(self, rawdata, matrix, dstwidth, dstheight):
'''
See `get_affine_matrix` about the document of `matrix`.
Note that the padding strategies for image and segms are both (0,0,0). I recomand you to sub MEAN before
this operation. If you have other request, you should overwrite this function. (warning)
size_related_keys = ['width', 'height', 'seg_areas', 'data', 'boxes', 'segms', 'gt_keypoints']
'''
assert matrix.shape == (2,3)
# image
rawdata['data'] = cv2.warpAffine(rawdata['data'], matrix, (dstwidth, dstheight), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=(0,0,0))
GtN = len(rawdata['segms'])
# segm
for i in range(GtN):
if isinstance(rawdata['segms'][i], dict):
mask = annToMask(rawdata['segms'][i], rawdata['height'], rawdata['width'])
mask = cv2.warpAffine(mask, matrix, (dstwidth, dstheight), flags=cv2.INTER_NEAREST,
borderMode=cv2.BORDER_CONSTANT, borderValue=0) # or 255
rawdata['segms'][i] = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
elif isinstance(rawdata['segms'][i], list):
for poly_id, poly in enumerate(rawdata['segms'][i]):
cors = np.array(poly).reshape(-1, 2)
cors_new = np.hstack((cors, np.ones((len(cors), 1), np.float32))).dot(matrix.T)
cors_new[:, 0] = np.clip(cors_new[:, 0], 0, dstwidth)
cors_new[:, 1] = np.clip(cors_new[:, 1], 0, dstheight)
rawdata['segms'][i][poly_id] = cors_new.flatten().tolist()[0]
else:
print ('segm type error!')
# box: (GtN,2) -> (GtN,3)(dot)(3,2) -> (GtN,2)
rawdata['boxes'][:, 0:2] = np.hstack((rawdata['boxes'][:, 0:2], np.ones((GtN, 1), np.float32))).dot(matrix.T)
rawdata['boxes'][:, 2:4] = np.hstack((rawdata['boxes'][:, 2:4], np.ones((GtN, 1), np.float32))).dot(matrix.T)
rawdata['boxes'][:, 0::2] = np.clip(rawdata['boxes'][:, 0::2], 0, dstwidth) # -1 ?
rawdata['boxes'][:, 1::2] = np.clip(rawdata['boxes'][:, 1::2], 0, dstheight)
if self.keypoints is not None:
# gt_keypoint: (GtN,2,NumKpt) -> (GtN,NumKpt,3)(dot)(3,2) -> (GtN,NumKpt,2) -> (GtN,2,NumKpt)
rawdata['gt_keypoints'][:, 0:2, :] = \
np.stack((rawdata['gt_keypoints'][:, 0:2, :].transpose((0, 2, 1)),
np.ones((GtN, self.num_keypoints, 1), np.float32)), axis=2).dot(matrix.T).transpose((0, 2, 1))
inds = np.where(rawdata['gt_keypoints'][:, 2, :] == 0)
rawdata['gt_keypoints'][inds[0], 0, inds[1]] = 0
rawdata['gt_keypoints'][:, 0, :] =
|
np.clip(rawdata['gt_keypoints'][:, 0, :], 0, dstwidth)
|
numpy.clip
|
from itertools import product
import numpy as np
import scipy.sparse
import regreg.api as rr
from regreg.identity_quadratic import identity_quadratic as sq
import nose.tools as nt
def test_centering():
"""
This test verifies that the normalized transform
of affine correctly implements the linear
transform that multiplies first by X, then centers.
"""
# N - number of data points
# P - number of columns in design == number of betas
N, P = 40, 30
# an arbitrary positive offset for data and design
offset = 50
# design - with ones as last column
X = np.ones((N,P))
X[:,:-1] = np.random.normal(size=(N,P-1)) + offset
X2 = X - X.mean(axis=0)[np.newaxis,:]
L = rr.normalize(X, center=True, scale=False)
# coef for loss
for _ in range(10):
beta = np.random.normal(size=(P,))
v = L.linear_map(beta)
v2 = np.dot(X, beta)
v2 -= v2.mean()
v3 = np.dot(X2, beta)
v4 = L.affine_map(beta)
np.testing.assert_almost_equal(v, v3)
np.testing.assert_almost_equal(v, v2)
np.testing.assert_almost_equal(v, v4)
y = np.random.standard_normal(N)
u1 = L.adjoint_map(y)
y2 = y - y.mean()
u2 = np.dot(X.T, y2)
np.testing.assert_almost_equal(u1, u2)
def test_scaling():
"""
This test verifies that the normalized transform
of affine correctly implements the linear
transform that multiplies first by X, then centers.
"""
# N - number of data points
# P - number of columns in design == number of betas
N, P = 40, 30
# an arbitrary positive offset for data and design
offset = 50
# design - with ones as last column
X = np.ones((N,P))
X[:,:-1] = np.random.normal(size=(N,P-1)) + offset
L = rr.normalize(X, center=False, scale=True)
# coef for loss
scalings = np.sqrt((X**2).sum(0) / N)
scaling_matrix = np.diag(1./scalings)
for _ in range(10):
beta = np.random.normal(size=(P,))
v = L.linear_map(beta)
v2 = np.dot(X, np.dot(scaling_matrix, beta))
v3 = L.affine_map(beta)
np.testing.assert_almost_equal(v, v2)
np.testing.assert_almost_equal(v, v3)
y = np.random.standard_normal(N)
u1 = L.adjoint_map(y)
u2 = np.dot(scaling_matrix, np.dot(X.T, y))
np.testing.assert_almost_equal(u1, u2)
def test_scaling_and_centering():
"""
This test verifies that the normalized transform
of affine correctly implements the linear
transform that multiplies first by X, then centers.
"""
# N - number of data points
# P - number of columns in design == number of betas
N, P = 40, 30
# an arbitrary positive offset for data and design
offset = 50
# design - with no colum of ones!
X = np.random.normal(size=(N,P)) + offset
L = rr.normalize(X, center=True, scale=True) # the default
# coef for loss
scalings = np.std(X, 0)
scaling_matrix = np.diag(1./scalings)
for _ in range(10):
beta = np.random.normal(size=(P,))
v = L.linear_map(beta)
v2 = np.dot(X, np.dot(scaling_matrix, beta))
v2 -= v2.mean()
np.testing.assert_almost_equal(v, v2)
y = np.random.standard_normal(N)
u1 = L.adjoint_map(y)
y2 = y - y.mean()
u2 = np.dot(scaling_matrix, np.dot(X.T, y2))
np.testing.assert_almost_equal(u1, u2)
def test_centering_fit(debug=False):
# N - number of data points
# P - number of columns in design == number of betas
N, P = 40, 30
# an arbitrary positive offset for data and design
offset = 50
# design - with ones as last column
X = np.ones((N,P))
X = np.random.normal(size=(N,P)) + offset
X2 = X - X.mean(axis=0)[np.newaxis,:]
# the normalizer
L = rr.normalize(X, center=True, scale=False)
# data
Y = np.random.normal(size=(N,)) + offset
# coef for loss
coef = 0.5
# lagrange for penalty
lagrange = .1
# Loss function (squared difference between fitted and actual data)
loss = rr.quadratic.affine(L, -Y, coef=coef)
penalties = [rr.constrained_positive_part(25, lagrange=lagrange),
rr.nonnegative(5)]
groups = [slice(0,25), slice(25,30)]
penalty = rr.separable((P,), penalties,
groups)
initial = np.random.standard_normal(P)
composite_form = rr.separable_problem.fromatom(penalty, loss)
solver = rr.FISTA(composite_form)
solver.debug = debug
solver.fit(tol=1.0e-12, min_its=200)
coefs = solver.composite.coefs
# Solve the problem with X2
loss2 = rr.quadratic.affine(X2, -Y, coef=coef)
initial2 = np.random.standard_normal(P)
composite_form2 = rr.separable_problem.fromatom(penalty, loss2)
for _ in range(10):
beta = np.random.standard_normal(P)
g1 = loss.smooth_objective(beta, mode='grad')
g2 = loss2.smooth_objective(beta, mode='grad')
np.testing.assert_almost_equal(g1, g2)
b1 = penalty.proximal(sq(1, beta, g1, 0))
b2 = penalty.proximal(sq(1, beta, g1, 0))
np.testing.assert_almost_equal(b1, b2)
f1 = composite_form.objective(beta)
f2 = composite_form2.objective(beta)
np.testing.assert_almost_equal(f1, f2)
solver2 = rr.FISTA(composite_form2)
solver2.debug = debug
solver2.fit(tol=1.0e-12, min_its=200)
coefs2 = solver2.composite.coefs
np.testing.assert_almost_equal(composite_form.objective(coefs), composite_form.objective(coefs2))
np.testing.assert_almost_equal(composite_form2.objective(coefs), composite_form2.objective(coefs2))
nt.assert_true(np.linalg.norm(coefs - coefs2) / max(np.linalg.norm(coefs),1) < 1.0e-04)
def test_scaling_fit(debug=False):
# N - number of data points
# P - number of columns in design == number of betas
N, P = 40, 30
# an arbitrary positive offset for data and design
offset = 2
# design - with ones as last column
X = np.ones((N,P))
X[:,:-1] = np.random.normal(size=(N,P-1)) + offset
X2 = X / (np.sqrt((X**2).sum(0) / N))[np.newaxis,:]
L = rr.normalize(X, center=False, scale=True)
# data
Y = np.random.normal(size=(N,)) + offset
# coef for loss
coef = 0.5
# lagrange for penalty
lagrange = .1
# Loss function (squared difference between fitted and actual data)
loss = rr.quadratic.affine(L, -Y, coef=coef)
penalties = [rr.constrained_positive_part(25, lagrange=lagrange),
rr.nonnegative(5)]
groups = [slice(0,25), slice(25,30)]
penalty = rr.separable((P,), penalties,
groups)
initial = np.random.standard_normal(P)
composite_form = rr.separable_problem.fromatom(penalty, loss)
solver = rr.FISTA(composite_form)
solver.debug = debug
solver.fit(tol=1.0e-12, min_its=200)
coefs = solver.composite.coefs
# Solve the problem with X2
loss2 = rr.quadratic.affine(X2, -Y, coef=coef)
initial2 = np.random.standard_normal(P)
composite_form2 = rr.separable_problem.fromatom(penalty, loss2)
solver2 = rr.FISTA(composite_form2)
solver2.debug = debug
solver2.fit(tol=1.0e-12, min_its=200)
coefs2 = solver2.composite.coefs
for _ in range(10):
beta = np.random.standard_normal(P)
g1 = loss.smooth_objective(beta, mode='grad')
g2 = loss2.smooth_objective(beta, mode='grad')
np.testing.assert_almost_equal(g1, g2)
b1 = penalty.proximal(sq(1, beta, g1, 0))
b2 = penalty.proximal(sq(1, beta, g2, 0))
np.testing.assert_almost_equal(b1, b2)
f1 = composite_form.objective(beta)
f2 = composite_form2.objective(beta)
np.testing.assert_almost_equal(f1, f2)
np.testing.assert_almost_equal(composite_form.objective(coefs), composite_form.objective(coefs2))
np.testing.assert_almost_equal(composite_form2.objective(coefs), composite_form2.objective(coefs2))
nt.assert_true(np.linalg.norm(coefs - coefs2) / max(np.linalg.norm(coefs),1) < 1.0e-04)
def test_scaling_and_centering_fit(debug=False):
# N - number of data points
# P - number of columns in design == number of betas
N, P = 40, 30
# an arbitrary positive offset for data and design
offset = 2
# design - with ones as last column
X = np.random.normal(size=(N,P)) + offset
X2 = X - X.mean(0)[np.newaxis,:]
X2 = X2 / np.std(X2,0)[np.newaxis,:]
L = rr.normalize(X, center=True, scale=True)
# data
Y = np.random.normal(size=(N,)) + offset
# coef for loss
coef = 0.5
# lagrange for penalty
lagrange = .1
# Loss function (squared difference between fitted and actual data)
loss = rr.quadratic.affine(L, -Y, coef=coef)
penalties = [rr.constrained_positive_part(25, lagrange=lagrange),
rr.nonnegative(5)]
groups = [slice(0,25), slice(25,30)]
penalty = rr.separable((P,), penalties,
groups)
initial = np.random.standard_normal(P)
composite_form = rr.separable_problem.fromatom(penalty, loss)
solver = rr.FISTA(composite_form)
solver.debug = debug
solver.fit(tol=1.0e-12, min_its=200)
coefs = solver.composite.coefs
# Solve the problem with X2
loss2 = rr.quadratic.affine(X2, -Y, coef=coef)
initial2 = np.random.standard_normal(P)
composite_form2 = rr.separable_problem.fromatom(penalty, loss2)
solver2 = rr.FISTA(composite_form2)
solver2.debug = debug
solver2.fit(tol=1.0e-12, min_its=200)
coefs2 = solver2.composite.coefs
for _ in range(10):
beta = np.random.standard_normal(P)
g1 = loss.smooth_objective(beta, mode='grad')
g2 = loss2.smooth_objective(beta, mode='grad')
np.testing.assert_almost_equal(g1, g2)
b1 = penalty.proximal(sq(1, beta, g1, 0))
b2 = penalty.proximal(sq(1, beta, g2, 0))
np.testing.assert_almost_equal(b1, b2)
f1 = composite_form.objective(beta)
f2 = composite_form2.objective(beta)
np.testing.assert_almost_equal(f1, f2)
np.testing.assert_almost_equal(composite_form.objective(coefs), composite_form.objective(coefs2))
np.testing.assert_almost_equal(composite_form2.objective(coefs), composite_form2.objective(coefs2))
nt.assert_true(np.linalg.norm(coefs - coefs2) / max(np.linalg.norm(coefs),1) < 1.0e-04)
def test_scaling_and_centering_fit_inplace(debug=False):
# N - number of data points
# P - number of columns in design == number of betas
N, P = 40, 30
# an arbitrary positive offset for data and design
offset = 2
# design
X =
|
np.random.normal(size=(N,P))
|
numpy.random.normal
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Summarizers for Score Predictors
"""
from __future__ import (
print_function,
division,
absolute_import,
unicode_literals)
from six.moves import xrange
# =============================================================================
# Imports
# =============================================================================
import sys
import logging
import json
import numpy as np
from sklearn.utils import (
as_float_array, assert_all_finite, check_consistent_length)
import sklearn.metrics as skm
from . import mean_absolute_error, mean_squared_error, score_histogram
# =============================================================================
# Metadata variables
# =============================================================================
# =============================================================================
# Public symbols
# =============================================================================
__all__ = []
# =============================================================================
# Constants
# =============================================================================
# =============================================================================
# Variables
# =============================================================================
# =============================================================================
# Functions
# =============================================================================
def score_predictor_report(y_true, y_pred, disp=True):
"""
Report brief summary of prediction performance
* mean absolute error
* root mean squared error
* number of data
* mean and standard dev. of true scores
* mean and standard dev. of predicted scores
Parameters
----------
y_true : array, shape(n_samples,)
Ground truth scores
y_pred : array, shape(n_samples,)
Predicted scores
disp : bool, optional, default=True
if True, print report
Returns
-------
stats : dict
belief summary of prediction performance
"""
# check inputs
assert_all_finite(y_true)
y_true = as_float_array(y_true)
assert_all_finite(y_pred)
y_pred = as_float_array(y_pred)
check_consistent_length(y_true, y_pred)
# calc statistics
stats = {
'mean absolute error': skm.mean_absolute_error(y_true, y_pred),
'root mean squared error':
np.sqrt(np.maximum(skm.mean_squared_error(y_true, y_pred), 0.)),
'n_samples': y_true.size,
'true': {'mean': np.mean(y_true), 'stdev':
|
np.std(y_true)
|
numpy.std
|
# coding: utf-8
""" Misc utility functions """
from __future__ import (division, print_function, absolute_import,
unicode_literals)
from six import string_types
import numpy as np
from .robust_polyfit import gaussfit
from scipy import interpolate, signal, stats
import emcee
import time
from astropy import units
from astropy.stats.biweight import biweight_location, biweight_scale
from astropy import coordinates as coord
def struct2array(x):
""" Convert numpy structured array of simple type to normal numpy array """
Ncol = len(x.dtype)
type = x.dtype[0].type
assert np.all([x.dtype[i].type == type for i in range(Ncol)])
return x.view(type).reshape((-1,Ncol))
def vac2air(lamvac):
"""
http://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion
Morton 2000
"""
s2 = (1e4/lamvac)**2
n = 1 + 0.0000834254 + 0.02406147 / (130 - s2) + 0.00015998 / (38.9 - s2)
return lamvac/n
def air2vac(lamair):
"""
http://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion
Piskunov
"""
s2 = (1e4/lamair)**2
n = 1 + 0.00008336624212083 + 0.02408926869968 / (130.1065924522 - s2) + 0.0001599740894897 / (38.92568793293 - s2)
return lamair*n
##def find_distribution_peak(x, x0, s0=None, bins='auto'):
## """
## Take a histogram of the data and find the location of the peak closest to x0
## x: data (no nan's allowed)
## x0: peak location guess
## s0: width of peak guess (default std(x))
## """
## h, x = np.histogram(x, bins=bins)
## x = (x[1:]+x[:-1])/2.
## # find positive peak locations based on derivative
## dh = np.gradient(h)
## peaklocs = np.where((dh[:-1] > 0) & (dh[1:] < 0))[0]
## if len(peaklocs)==0:
## raise ValueError("No peaks found!")
## # get the best peak
## xpeaks = x[peaklocs+1]
## bestix = np.argmin(np.abs(xpeaks - x0))
## xbest = x[bestix]
## ybest = h[bestix]
## if s0 is None:
## s_est = np.std(x)
## else:
## s_est = s0
## # fit a Gaussian and get the peak
## A, xfit, s = gaussfit(x, h, [ybest, xbest, s_est], maxfev=99999)
## return xfit
def get_cdf_raw(x):
"""
Take a set of points x and find the CDF.
Returns xcdf, ycdf, where ycdf = p(x < xcdf)
Defined such that p(min(x)) = 1/len(x), p(max(x)) = 1
"""
xcdf = np.sort(x)
ycdf = np.arange(len(x)).astype(float)/float(len(x))
return xcdf, ycdf
def find_distribution_mode(x, percentile=5., full_output=False):
"""
Find the mode of the PDF of a set of elements.
Algorithm inspired by <NAME>
"""
xcdf, ycdf = get_cdf_raw(x)
xval = xcdf[1:]
pdf = np.diff(ycdf)
# Take the lowest percentile differences, these bunch up near the mode
pdfcut = np.percentile(pdf, percentile)
# Take the median of the x's where they bunch up
return np.median(xval[pdf < pdfcut])
def get_cdf(x, smoothiter=3, window=None, order=1, **kwargs):
"""
Take a set of points x and find the CDF.
Use get_cdf_raw, fit and return an interpolating function.
By default we use scipy.interpolate.Akima1DInterpolator
kwargs are passed to the interpolating function.
(We do not fit a perfectly interpolating spline right now, because if two points have identical x's,
you get infinity for the derivative.)
Note: scipy v1 has a bug in all splines right now that rejects any distribution of points with exactly equal x's.
You can obtain the PDF by taking the first derivative.
"""
xcdf, ycdf = get_cdf_raw(x)
# Smooth the CDF
if window is None:
window = int(len(xcdf)/100.)
else:
window = int(window)
if window % 2 == 0: window += 1
F = interpolate.PchipInterpolator(xcdf,ycdf,extrapolate=False)#**kwargs)
for i in range(smoothiter):
ycdf = signal.savgol_filter(F(xcdf), window, order)
F = interpolate.PchipInterpolator(xcdf,ycdf,extrapolate=False)#**kwargs)
#if "ext" not in kwargs:
# kwargs["ext"] = 3 #return the boundary value rather than extrapolating
#F = interpolate.UnivariateSpline(xcdf, ycdf, **kwargs)
#F = interpolate.Akima1DInterpolator(xcdf,ycdf,**kwargs)
return F
def find_distribution_peak(x, xtol, full_output=False):
"""
Finds largest peak from an array of real numbers (x).
Algorithm:
- Compute the CDF by fitting cubic smoothing spline
- find PDF with derivative (2nd order piecewise poly)
- Sample the PDF at xtol/2 points
- Find peaks in the PDF as the maximum points
- Return the biggest PDF peak
"""
Fcdf = get_cdf(x)
fpdf = Fcdf.derivative()
xsamp = np.arange(np.min(x), np.max(x)+xtol, xtol/2.)
pdf = fpdf(xsamp)
ix = np.argmax(pdf)
xpeak = xsamp[ix]
if full_output:
return xpeak, xsamp, pdf
else:
return xpeak
def find_confidence_region(x, p, xtol, full_output=False):
"""
Finds smallest confidence region from an array of real numbers (x).
Algorithm:
- use find_distribution_peak to get the peak value and pdf
The pdf is uniformly sampled at xtol/2 from min(x) to max(x)
- initialize two tracers x1 and x2 on either side of the peak value
- step x1 and x2 down (by xtol/2), picking which one locally adds more to the enclosed probability
Note this does not work so well with multimodal things.
It is also pretty slow, and not so accurate (does not interpolate the PDF or do adaptive stepping)
"""
assert (p > 0) and (p < 1)
xpeak, xsamp, pdf = find_distribution_peak(x, xtol, full_output=True)
pdf = pdf / np.sum(pdf) # normalize this to 1
## Initialize the interval
ipeak = np.argmin(np.abs(xsamp-xpeak))
i1 = ipeak-1; i2 = ipeak+1
p1 = pdf[i1]; p2 = pdf[i2]
current_prob = pdf[ipeak]
def step_left():
current_prob += p1
i1 -= 1
p1 = pdf[i1]
def step_right():
current_prob += p2
i2 += 1
p2 = pdf[i2]
## Expand the interval until you get enough probability
while current_prob < p:
# If you reached the end, expand the left/right until you're done
if i1 <= 0:
while current_prob < p:
step_right()
break
# If you reached the end, expand the left until you're done
if i2 >= len(pdf):
while current_prob < p:
step_left()
break
# Step in the direction
if p1 > p2:
step_left()
elif p1 < p2:
step_right()
else: # Pick a direction at random if exactly equal
if np.random.random() > 0.5:
step_right()
else:
step_left()
if full_output:
return xsamp[i1], xpeak, xsamp[i2], current_prob, xsamp, pdf
return xsamp[i1], xpeak, xsamp[i2]
def box_select(x,y,topleft,topright,botleft,botright):
"""
Select x, y within a box defined by the corner points
I think this fails if the box is not convex.
"""
assert len(x) == len(y)
x = np.ravel(x)
y = np.ravel(y)
selection = np.ones_like(x, dtype=bool)
# Check the directions all make sense
# I think I assume the box is convex
assert botleft[1] <= topleft[1], (botleft, topleft)
assert botright[1] <= topright[1], (botright, topright)
assert topleft[0] <= topright[0], (topleft, topright)
assert botleft[0] <= botright[0], (botleft, botright)
# left boundary
(x1,y1), (x2,y2) = botleft, topleft
m = (x2-x1)/(y2-y1)
selection[x < m*(y-y1) + x1] = False
# right boundary
(x1,y1), (x2,y2) = botright, topright
m = (x2-x1)/(y2-y1)
selection[x > m*(y-y1) + x1] = False
# top boundary
(x1,y1), (x2,y2) = topleft, topright
m = (y2-y1)/(x2-x1)
selection[y > m*(x-x1) + y1] = False
# bottom boundary
(x1,y1), (x2,y2) = botleft, botright
m = (y2-y1)/(x2-x1)
selection[y < m*(x-x1) + y1] = False
return selection
def linefit_2d(x, y, ex, ey, fit_outliers=False, full_output=False,
nwalkers=20, Nburn=200, Nrun=1000, percentiles=[5,16,50,84,95]):
"""
Fits a line to a set of data (x, y) with independent gaussian errors (ex, ey) using MCMC.
Based on Hogg et al. 2010
Returns simple estimate for m, b, and uncertainties.
If fit_outliers=True, fits a background model that is a very flat/wide gaussian.
Then also returns estimates for
If full_output=True, return the full MCMC sampler
y = m x + b
"""
assert len(x)==len(y)==len(ex)==len(ey)
assert np.all(np.isfinite(x))
assert np.all(np.isfinite(y))
X = np.vstack([x,y]).T
ex2 = ex**2
ey2 = ey**2
# m = tan(theta)
# bt = b cos(theta)
if fit_outliers:
raise NotImplementedError
else:
def lnprior(params):
theta, bt = params
if theta < -np.pi/2 or theta >= np.pi/2: return -np.inf
return 0
def lnlkhd(params):
theta, bt = params
v = np.array([-np.sin(theta), np.cos(theta)])
Delta = X.dot(v) - bt
Sigma2 = v[0]*v[0]*ex2 + v[1]*v[1]*ey2
return lnprior(params) - 0.5 * np.sum(Delta**2/Sigma2)
# Initialize walkers
ymin, ymax = np.min(y), np.max(y)
bt0 = np.random.uniform(ymin,ymax,nwalkers)
theta0 = np.random.uniform(-np.pi/2,np.pi/2,nwalkers)
p0 = np.vstack([theta0,bt0]).T
ndim = 2
sampler = emcee.EnsembleSampler(nwalkers,ndim,lnlkhd)
sampler.run_mcmc(p0,Nburn)
pos = sampler.chain[:,-1,:]
sampler.reset()
sampler.run_mcmc(pos,Nrun)
theta, bt = sampler.flatchain.T
m = np.tan(theta)
b = bt/np.cos(theta)
m_out = np.nanpercentile(m, percentiles)
b_out = np.nanpercentile(b, percentiles)
if full_output:
return m_out, b_out, m, b, sampler
return m_out, b_out
def parse_m2fs_fibermap(fname):
import pandas as pd
from astropy.coordinates import SkyCoord
def mungehms(hmsstr,sep=':'):
h,m,s = hmsstr.split(sep)
return h+'h'+m+'m'+s+'s'
def mungedms(dmsstr,sep=':'):
d,m,s = dmsstr.split(sep)
return d+'d'+m+'m'+s+'s'
def parse_assignments(cols, assignments):
colnames = cols.split()
data = []
for line in assignments:
data.append(line.split())
return pd.DataFrame(data, columns=colnames)
with open(fname,"r") as fp:
lines = fp.readlines()
center_radec = lines[3]
center_radec = center_radec.split()
center_ra = mungehms(center_radec[2])
center_dec= mungedms(center_radec[3])
center = SkyCoord(ra=center_ra, dec=center_dec)
for i, line in enumerate(lines):
if "[assignments]" in line: break
lines = lines[i+1:]
line = lines[0]
cols_assignments = lines[0]
assignments = []
for i,line in enumerate(lines[1:]):
if "]" in line: break
assignments.append(line)
lines = lines[i+2:]
cols_guides = lines[0]
guides = []
for i,line in enumerate(lines[1:]):
if "]" in line: break
guides.append(line)
df1 = parse_assignments(cols_assignments, assignments)
df2 = parse_assignments(cols_guides, guides)
return df1, df2
def quick_healpix(coo, nside, galactic=False):
import healpy as hp
from astropy import units as u
npix = hp.nside2npix(nside)
area = hp.nside2pixarea(nside, degrees=True)
hpmap = np.zeros(npix)
if galactic:
theta, phi = np.pi/2 - coo.b.radian, coo.l.wrap_at(180*u.deg).radian
else:
theta, phi = np.pi/2 - coo.dec.radian, coo.ra.wrap_at(180*u.deg).radian
pixels = hp.ang2pix(nside, theta, phi)
np.add.at(hpmap, pixels, 1)
hp.mollview(hpmap)
return hpmap, area
def xbin_yscat(x, y, xbins, q=[2.5,16,50,84,97.5], Nmin=1):
"""
Take x,y pairs. Bin in x, find percentiles in y.
Input: x and y, xbins
q : default [2.5, 16, 50, 84, 97.5]
percentiles to compute (passed to np.percentile)
Nmin : default 1
minimum number of points per bin to be used (otherwise nan)
Return: xbins centers, ydata percentiles (Nbin x Npercentile)
"""
assert len(x) == len(y)
xp = (xbins[1:]+xbins[:-1])/2
Nbins = len(xp)
ydat = np.zeros((Nbins, len(q))) + np.nan
bin_nums = np.digitize(x, xbins)
for ibin in range(Nbins):
bin_num = ibin + 1
vals = y[bin_nums == bin_num]
if len(vals) < Nmin: continue
ydat[ibin, :] = np.percentile(vals, q)
return xp, ydat
def xbin_ybwt(x, y, xbins, Nmin=1):
"""
Take x,y pairs. Bin in x, find biweight location and scale
Input: x and y, xbins
Nmin : default 1
minimum number of points per bin to be used (otherwise nan)
Return: xbins centers, yloc, yscale
"""
assert len(x) == len(y)
xp = (xbins[1:]+xbins[:-1])/2
Nbins = len(xp)
yloc = np.zeros(Nbins) + np.nan
yscale = np.zeros(Nbins) + np.nan
bin_nums = np.digitize(x, xbins)
for ibin in range(Nbins):
bin_num = ibin + 1
vals = y[bin_nums == bin_num]
if len(vals) < Nmin: continue
yloc[ibin] = biweight_location(vals)
yscale[ibin] = biweight_scale(vals)
return xp, yloc, yscale
def xbin_ymean(x, y, xbins, Nmin=1):
"""
Take x,y pairs. Bin in x, find mean and stdev
Input: x and y, xbins
Nmin : default 1
minimum number of points per bin to be used (otherwise nan)
Return: xbins centers, ymeans, ystdevs
"""
assert len(x) == len(y)
xp = (xbins[1:]+xbins[:-1])/2
Nbins = len(xp)
ymeans = np.zeros(Nbins) + np.nan
ystdevs = np.zeros(Nbins) + np.nan
bin_nums = np.digitize(x, xbins)
for ibin in range(Nbins):
bin_num = ibin + 1
vals = y[bin_nums == bin_num]
if len(vals) < Nmin: continue
ymeans[ibin] = np.nanmean(vals)
ystdevs[ibin] = np.nanstd(vals)
return xp, ymeans, ystdevs
def plot_gaussian_distrs(ax, df, xcol, ecol, xplot,
plot_individual_stars=True,
color='k', lw=5, ls='-',
scale_ind=1.0, ls_ind='-', lw_ind=0.5, label=None):
"""
Plots the distribution assuming all data is sums of individual little Gaussians
ax: where to plot
df: data frame
xcol, ecol: which columns to use for the mean and stdev of each individual datapoint
xplot: range of x to compute the total
plot_individual_stars: if True, plots little gaussians for everything
Other plotting kws:
label, color, lw, ls
ls_ind, lw_ind (for individual stars)
"""
x = df[xcol].values
err = df[ecol].values
finite = np.isfinite(x) & np.isfinite(err)
if np.sum(finite) != len(finite):
print("Dropping {} stars".format(len(finite)-np.sum(finite)))
print(df.index[~finite])
xs, errs = x[finite], err[finite]
N = len(xs)
all_yplot = np.zeros((len(xplot),N))
print(len(xplot), all_yplot.shape)
for i,(x,err) in enumerate(zip(xs,errs)):
all_yplot[:,i] = stats.norm.pdf(xplot,loc=x,scale=err)
total_yplot = np.ravel(np.nansum(all_yplot,axis=1))/N
if plot_individual_stars:
for i in range(N):
ax.plot(xplot,scale_ind*all_yplot[:,i],'-',ls=ls_ind,lw=lw_ind,color=color,zorder=-9)
ax.plot(xplot,total_yplot,'-',ls=ls,lw=lw,color=color,zorder=9,label=label)
def get_position_angle(coo1, coo2):
"""
Based on https://idlastro.gsfc.nasa.gov/ftp/pro/astro/posang.pro
"""
dRA = coo2.ra - coo1.ra
numer = np.sin(dRA)
denom = np.cos(coo1.dec)*np.tan(coo2.dec) - np.sin(coo1.dec)*np.cos(dRA)
PA = np.arctan2(numer,denom)
#print(PA)
if PA < 0: PA += 2*np.pi*units.rad
return PA
def rv_to_gsr(c, v_sun=None):
"""
Accessed 2020-12-09
https://docs.astropy.org/en/stable/generated/examples/coordinates/rv-to-gsr.html
Transform a barycentric radial velocity to the Galactic Standard of Rest
(GSR).
The input radial velocity must be passed in as a
Parameters
----------
c : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
The radial velocity, associated with a sky coordinates, to be
transformed.
v_sun : `~astropy.units.Quantity`, optional
The 3D velocity of the solar system barycenter in the GSR frame.
Defaults to the same solar motion as in the
`~astropy.coordinates.Galactocentric` frame.
Returns
-------
v_gsr : `~astropy.units.Quantity`
The input radial velocity transformed to a GSR frame.
"""
if v_sun is None:
v_sun = coord.Galactocentric().galcen_v_sun.to_cartesian()
gal = c.transform_to(coord.Galactic)
cart_data = gal.data.to_cartesian()
unit_vector = cart_data / cart_data.norm()
v_proj = v_sun.dot(unit_vector)
return c.radial_velocity + v_proj
def reflex_correct(coords):
""" https://gala-astro.readthedocs.io/en/latest/_modules/gala/coordinates/reflex.html#reflex_correct """
galactocentric_frame = coord.Galactocentric()
c = coord.SkyCoord(coords)
v_sun = galactocentric_frame.galcen_v_sun
observed = c.transform_to(galactocentric_frame)
rep = observed.cartesian.without_differentials()
rep = rep.with_differentials(observed.cartesian.differentials['s'] + v_sun)
fr = galactocentric_frame.realize_frame(rep).transform_to(c.frame)
return coord.SkyCoord(fr)
def medscat(x):
med = np.median(x)
scat = 0.5 * np.sum(np.diff(np.percentile(x, [16, 50, 84])))
return med, scat
"""
This next part from <NAME>
"""
import astropy.coordinates as acoo
import astropy.units as auni
vlsr0 = 232.8 # from mcmillan 2017
def correct_pm(ra, dec, pmra, pmdec, dist, vlsr=vlsr0, vz=0, split=None):
if split is None:
return correct_pm0(ra, dec, pmra, pmdec, dist, vlsr=vlsr0, vz=vz)
else:
N = len(ra)
n1 = N // split
ra1 = np.array_split(ra, n1)
dec1 =
|
np.array_split(dec, n1)
|
numpy.array_split
|
#!/bin/python
# this python template is used to plot data from ??????.dat files
# produced by test_build.sh(Process accuracy test)
# it is executed automatically by test_build.sh script
#
# You can also launch it manually according to readme_python_matplotlib_script
# Header
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.offsetbox as offsetbox
import sys
import glob
# Update default matplotlib settings
plt.rcParams['mathtext.fontset'] = 'cm'
plt.rcParams['mathtext.rm'] = 'serif'
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.size'] = '22.325'
plt.rcParams['xtick.minor.visible'] = True
plt.rcParams['ytick.minor.visible'] = True
plt.rcParams['xtick.major.pad'] = 15 # distance from axis to Mticks label
plt.rcParams['ytick.major.pad'] = 15 # distance from axis to Mticks label
plt.rcParams['xtick.major.size'] = 24
plt.rcParams['xtick.minor.size'] = 16
plt.rcParams['ytick.major.size'] = 24
plt.rcParams['ytick.minor.size'] = 16
plt.rcParams['xtick.major.width'] = 1.5
plt.rcParams['xtick.minor.width'] = 1.0
plt.rcParams['ytick.major.width'] = 1.5
plt.rcParams['ytick.minor.width'] = 1.0
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['figure.figsize'] = [3508./300, 2480./300] # A4 at 300 dpi
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['figure.dpi'] = 100
plt.rcParams['image.cmap'] = 'jet'
plt.rcParams['legend.frameon'] = True
plt.rcParams['legend.fancybox'] = False
plt.rcParams['legend.framealpha'] = 1
plt.rcParams['legend.edgecolor'] = 'k'
plt.rcParams['legend.labelspacing'] = 0
plt.rcParams['legend.handlelength'] = 1.5
plt.rcParams['legend.handletextpad'] = 0.5
plt.rcParams['legend.columnspacing'] = 0.1
plt.rcParams['legend.borderpad'] = 0.1
plt.rcParams['lines.linewidth'] = 2.
plt.rcParams['lines.markeredgewidth'] = 2.0
plt.rcParams['lines.markersize'] = 15
plt.rcParams['legend.numpoints'] = 2
# arg1 -> y, arg2 -> H, returns (3.0*(y/(0.5*H))*(1.0-y/H)
y_profile = lambda arg1, arg2: np.array(3.0*(arg1/(0.5*arg2))*(1.0-arg1/arg2));
files = np.sort(glob.glob('??????.dat'))
# Layout
fig, ax = plt.subplots(1, 1)
plt.subplots_adjust(left=0.10, right=1.0, top=0.97, bottom=0.09, \
hspace=0.15, wspace=0.15)
x_data = np.array([], dtype=np.int64)
y_data = np.array([], dtype=np.double)
z_data = np.array([], dtype=np.double)
# print '{0:<1}{1:>17}{2:>18}{3:>18}'.format('#', 'Ny', 'L2(u)', 'LInf(u)')
for fi in range(len(files)):
#-Input data
y, u = np.loadtxt(files[fi], unpack=True)
N_y = np.shape(y)[0]
l2_error = np.sqrt(np.sum((u - y_profile(y, 1.0))**2)/N_y)
linf_error = np.max(np.abs(u - y_profile(y, 1.0)))
# print '{0: 18d}{1: 18.8E}{2: 18.8E}'.format(N_y, l2_error, linf_error)
x_data =
|
np.append(x_data, N_y)
|
numpy.append
|
import fractions
import logging
import networkx as nx
import numpy as np
import psyneulink as pnl
import pytest
import types
import graph_scheduler
from graph_scheduler import Scheduler
from psyneulink import _unit_registry
from psyneulink.core.components.functions.stateful.integratorfunctions import DriftDiffusionIntegrator, SimpleIntegrator
from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear
from psyneulink.core.components.mechanisms.processing.integratormechanism import IntegratorMechanism
from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism
from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection
from psyneulink.core.compositions.composition import Composition, EdgeType
from psyneulink.core.globals.keywords import VALUE
from psyneulink.core.scheduling.condition import AfterNCalls, AfterNPasses, AfterNEnvironmentStateUpdates, AfterPass, All, AllHaveRun, Always, Any, AtNCalls, AtPass, BeforeNCalls, BeforePass, EveryNCalls, EveryNPasses, JustRan, TimeInterval, WhenFinished
from psyneulink.core.scheduling.time import TimeScale
from psyneulink.library.components.mechanisms.processing.integrator.ddm import DDM
logger = logging.getLogger(__name__)
class TestScheduler:
stroop_paths = [
['Color_Input', 'Color_Hidden', 'Output', 'Decision'],
['Word_Input', 'Word_Hidden', 'Output', 'Decision'],
['Reward']
]
stroop_consideration_queue = [
{'Color_Input', 'Word_Input', 'Reward'},
{'Color_Hidden', 'Word_Hidden'},
{'Output'},
{'Decision'}
]
@pytest.mark.parametrize(
'graph, expected_consideration_queue',
[
(
pytest.helpers.create_graph_from_pathways(*stroop_paths),
stroop_consideration_queue
),
(
nx.DiGraph(pytest.helpers.create_graph_from_pathways(*stroop_paths)),
stroop_consideration_queue
)
]
)
def test_construction(self, graph, expected_consideration_queue):
sched = Scheduler(graph)
assert sched.consideration_queue == expected_consideration_queue
def test_copy(self):
pass
def test_deepcopy(self):
pass
def test_create_multiple_contexts(self):
graph = {'A': set()}
scheduler = Scheduler(graph)
scheduler.get_clock(scheduler.default_execution_id)._increment_time(TimeScale.ENVIRONMENT_STATE_UPDATE)
eid = 'eid'
eid1 = 'eid1'
scheduler._init_counts(execution_id=eid)
assert scheduler.clocks[eid].time.environment_state_update == 0
scheduler.get_clock(scheduler.default_execution_id)._increment_time(TimeScale.ENVIRONMENT_STATE_UPDATE)
assert scheduler.clocks[eid].time.environment_state_update == 0
scheduler._init_counts(execution_id=eid1, base_execution_id=scheduler.default_execution_id)
assert scheduler.clocks[eid1].time.environment_state_update == 2
@pytest.mark.psyneulink
def test_two_compositions_one_scheduler(self):
comp1 = Composition()
comp2 = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
comp1.add_node(A)
comp2.add_node(A)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp1))
sched.add_condition(A, BeforeNCalls(A, 5, time_scale=TimeScale.LIFE))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(6)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNPasses(1)
comp1.run(
inputs={A: [[0], [1], [2], [3], [4], [5]]},
scheduler=sched,
termination_processing=termination_conds
)
output = sched.execution_list[comp1.default_execution_id]
expected_output = [
A, A, A, A, A, set()
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
comp2.run(
inputs={A: [[0], [1], [2], [3], [4], [5]]},
scheduler=sched,
termination_processing=termination_conds
)
output = sched.execution_list[comp2.default_execution_id]
expected_output = [
A, A, A, A, A, set()
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
@pytest.mark.psyneulink
def test_one_composition_two_contexts(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
comp.add_node(A)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, BeforeNCalls(A, 5, time_scale=TimeScale.LIFE))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(6)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNPasses(1)
eid = 'eid'
comp.run(
inputs={A: [[0], [1], [2], [3], [4], [5]]},
scheduler=sched,
termination_processing=termination_conds,
context=eid,
)
output = sched.execution_list[eid]
expected_output = [
A, A, A, A, A, set()
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
comp.run(
inputs={A: [[0], [1], [2], [3], [4], [5]]},
scheduler=sched,
termination_processing=termination_conds,
context=eid,
)
output = sched.execution_list[eid]
expected_output = [
A, A, A, A, A, set(), set(), set(), set(), set(), set(), set()
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
eid = 'eid1'
comp.run(
inputs={A: [[0], [1], [2], [3], [4], [5]]},
scheduler=sched,
termination_processing=termination_conds,
context=eid,
)
output = sched.execution_list[eid]
expected_output = [
A, A, A, A, A, set()
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
@pytest.mark.psyneulink
def test_change_termination_condition(self):
D = DDM(function=DriftDiffusionIntegrator(threshold=10))
C = Composition(pathways=[D])
D.set_log_conditions(VALUE)
def change_termination_processing():
if C.termination_processing is None:
C.scheduler.termination_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: WhenFinished(D)}
C.termination_processing = {TimeScale.ENVIRONMENT_STATE_UPDATE: WhenFinished(D)}
elif isinstance(C.termination_processing[TimeScale.ENVIRONMENT_STATE_UPDATE], AllHaveRun):
C.scheduler.termination_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: WhenFinished(D)}
C.termination_processing = {TimeScale.ENVIRONMENT_STATE_UPDATE: WhenFinished(D)}
else:
C.scheduler.termination_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: AllHaveRun()}
C.termination_processing = {TimeScale.ENVIRONMENT_STATE_UPDATE: AllHaveRun()}
change_termination_processing()
C.run(inputs={D: [[1.0], [2.0]]},
# termination_processing={TimeScale.ENVIRONMENT_STATE_UPDATE: WhenFinished(D)},
call_after_trial=change_termination_processing,
reset_stateful_functions_when=pnl.AtConsiderationSetExecution(0),
num_trials=4)
# EnvironmentStateUpdate 0:
# input = 1.0, termination condition = WhenFinished
# 10 passes (value = 1.0, 2.0 ... 9.0, 10.0)
# EnvironmentStateUpdate 1:
# input = 2.0, termination condition = AllHaveRun
# 1 pass (value = 2.0)
expected_results = [[np.array([10.]), np.array([10.])],
[np.array([2.]), np.array([1.])],
[np.array([10.]), np.array([10.])],
[np.array([2.]), np.array([1.])]]
assert np.allclose(expected_results, np.asfarray(C.results))
@pytest.mark.psyneulink
def test_default_condition_1(self):
A = pnl.TransferMechanism(name='A')
B = pnl.TransferMechanism(name='B')
C = pnl.TransferMechanism(name='C')
comp = pnl.Composition(pathways=[[A, C], [A, B, C]])
comp.scheduler.add_condition(A, AtPass(1))
comp.scheduler.add_condition(B, Always())
output = list(comp.scheduler.run())
expected_output = [B, A, B, C]
assert output == pytest.helpers.setify_expected_output(expected_output)
@pytest.mark.psyneulink
def test_default_condition_2(self):
A = pnl.TransferMechanism(name='A')
B = pnl.TransferMechanism(name='B')
C = pnl.TransferMechanism(name='C')
comp = pnl.Composition(pathways=[[A, B], [C]])
comp.scheduler.add_condition(C, AtPass(1))
output = list(comp.scheduler.run())
expected_output = [A, B, {C, A}]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_exact_time_mode(self):
sched = Scheduler(
{'A': set(), 'B': {'A'}},
mode=graph_scheduler.SchedulingMode.EXACT_TIME
)
# these cannot run at same execution set unless in EXACT_TIME
sched.add_condition('A', TimeInterval(start=1))
sched.add_condition('B', TimeInterval(start=1))
list(sched.run())
assert sched.mode == graph_scheduler.SchedulingMode.EXACT_TIME
assert sched.execution_list[sched.default_execution_id] == [{'A', 'B'}]
assert sched.execution_timestamps[sched.default_execution_id][0].absolute == 1 * graph_scheduler._unit_registry.ms
def test_run_with_new_execution_id(self):
sched = Scheduler({'A': set()})
sched.add_condition('A', graph_scheduler.AtPass(1))
output = list(sched.run(execution_id='eid'))
assert output == [set(), {'A'}]
assert 'eid' in sched.execution_list
assert sched.execution_list['eid'] == output
assert sched.get_clock('eid') == sched.get_clock(types.SimpleNamespace(default_execution_id='eid'))
def test_delete_counts(self):
sched = Scheduler(
{
'A': set(),
'B': {'A'},
'C': {'A'},
'D': {'C', 'B'}
}
)
sched.add_condition_set(
{
'A': graph_scheduler.EveryNPasses(2),
'B': graph_scheduler.EveryNCalls('A', 2),
'C': graph_scheduler.EveryNCalls('A', 3),
'D': graph_scheduler.AfterNCallsCombined('B', 'C', n=6)
}
)
eid_delete = 'eid'
eid_repeat = 'eid2'
del_run_1 = list(sched.run(execution_id=eid_delete))
repeat_run_1 = list(sched.run(execution_id=eid_repeat))
sched._delete_counts(eid_delete)
del_run_2 = list(sched.run(execution_id=eid_delete))
repeat_run_2 = list(sched.run(execution_id=eid_repeat))
assert del_run_1 == del_run_2
assert repeat_run_1 == repeat_run_2
assert sched.execution_list[eid_delete] == del_run_1
assert sched.execution_list[eid_repeat] == repeat_run_2 + repeat_run_2
@pytest.mark.psyneulink
class TestLinear:
@classmethod
def setup_class(self):
self.orig_is_finished_flag = TransferMechanism.is_finished_flag
self.orig_is_finished = TransferMechanism.is_finished
TransferMechanism.is_finished_flag = True
TransferMechanism.is_finished = lambda self, context: self.is_finished_flag
@classmethod
def teardown_class(self):
del TransferMechanism.is_finished_flag
del TransferMechanism.is_finished
TransferMechanism.is_finished_flag = self.orig_is_finished_flag
TransferMechanism.is_finished = self.orig_is_finished
def test_no_termination_conds(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), B, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, EveryNCalls(B, 3))
output = list(sched.run())
expected_output = [
A, A, B, A, A, B, A, A, B, C,
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
# tests below are copied from old scheduler, need renaming
def test_1(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), B, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, EveryNCalls(B, 3))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 4, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [
A, A, B, A, A, B, A, A, B, C,
A, A, B, A, A, B, A, A, B, C,
A, A, B, A, A, B, A, A, B, C,
A, A, B, A, A, B, A, A, B, C,
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_1b(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), B, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, Any(EveryNCalls(A, 2), AfterPass(1)))
sched.add_condition(C, EveryNCalls(B, 3))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 4, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [
A, A, B, A, B, A, B, C,
A, B, A, B, A, B, C,
A, B, A, B, A, B, C,
A, B, A, B, A, B, C,
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_2(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), B, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, EveryNCalls(B, 2))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 1, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, A, B, A, A, B, C]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_3(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), B, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, All(AfterNCalls(B, 2), EveryNCalls(B, 1)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 4, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [
A, A, B, A, A, B, C, A, A, B, C, A, A, B, C, A, A, B, C
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_6(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), B, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, BeforePass(5))
sched.add_condition(B, AfterNCalls(A, 5))
sched.add_condition(C, AfterNCalls(B, 1))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 3)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [
A, A, A, A, A, B, C, B, C, B, C
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_6_two_environment_state_updates(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), B, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, BeforePass(5))
sched.add_condition(B, AfterNCalls(A, 5))
sched.add_condition(C, AfterNCalls(B, 1))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(2)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 3)
comp.run(
inputs={A: [[0], [1], [2], [3], [4], [5]]},
scheduler=sched,
termination_processing=termination_conds
)
output = sched.execution_list[comp.default_execution_id]
expected_output = [
A, A, A, A, A, B, C, B, C, B, C,
A, A, A, A, A, B, C, B, C, B, C
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_7(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
for m in [A, B]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 2))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = Any(AfterNCalls(A, 1), AfterNCalls(B, 1))
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_8(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
for m in [A, B]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 2))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = All(AfterNCalls(A, 1), AfterNCalls(B, 1))
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, A, B]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_9(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
for m in [A, B]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, WhenFinished(A))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(B, 2)
output = []
i = 0
A.is_finished_flag = False
for step in sched.run(termination_conds=termination_conds):
if i == 3:
A.is_finished_flag = True
output.append(step)
i += 1
expected_output = [A, A, A, A, B, A, B]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_9b(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
A.is_finished_flag = False
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
for m in [A, B]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, WhenFinished(A))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, A, A, A, A]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_10(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
A.is_finished_flag = True
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
for m in [A, B]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, Any(WhenFinished(A), AfterNCalls(A, 3)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(B, 5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, B, A, B, A, B, A, B, A, B]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_10b(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
A.is_finished_flag = False
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
for m in [A, B]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, Any(WhenFinished(A), AfterNCalls(A, 3)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(B, 4)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, A, A, B, A, B, A, B, A, B]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_10c(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
A.is_finished_flag = True
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
for m in [A, B]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, All(WhenFinished(A), AfterNCalls(A, 3)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(B, 4)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, A, A, B, A, B, A, B, A, B]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_10d(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
A.is_finished_flag = False
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
for m in [A, B]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, All(WhenFinished(A), AfterNCalls(A, 3)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(10)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, A, A, A, A, A, A, A, A, A]
assert output == pytest.helpers.setify_expected_output(expected_output)
########################################
# tests with linear compositions
########################################
def test_linear_AAB(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
for m in [A, B]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 2))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNCalls(B, 2, time_scale=TimeScale.ENVIRONMENT_SEQUENCE)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(B, 2, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, A, B, A, A, B]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_linear_ABB(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
for m in [A, B]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, Any(AtPass(0), EveryNCalls(B, 2)))
sched.add_condition(B, Any(EveryNCalls(A, 1), EveryNCalls(B, 1)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(B, 8, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, B, B, A, B, B, A, B, B, A, B, B]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_linear_ABBCC(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), B, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, Any(AtPass(0), EveryNCalls(C, 2)))
sched.add_condition(B, Any(JustRan(A), JustRan(B)))
sched.add_condition(C, Any(EveryNCalls(B, 2), JustRan(C)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 4, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, B, B, C, C, A, B, B, C, C]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_linear_ABCBC(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), B, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, Any(AtPass(0), EveryNCalls(C, 2)))
sched.add_condition(B, Any(EveryNCalls(A, 1), EveryNCalls(C, 1)))
sched.add_condition(C, EveryNCalls(B, 1))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 4, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, B, C, B, C, A, B, C, B, C]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_one_run_twice(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5,
)
)
c = Composition(pathways=[A])
term_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: AfterNCalls(A, 2)}
stim_list = {A: [[1]]}
c.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mech = A
expected_output = [
np.array([1.]),
]
for i in range(len(expected_output)):
np.testing.assert_allclose(expected_output[i], terminal_mech.get_output_values(c)[i])
def test_two_AAB(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
B = TransferMechanism(
name='B',
default_variable=[0],
function=Linear(slope=2.0),
)
c = Composition(pathways=[A, B])
term_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: AfterNCalls(B, 1)}
stim_list = {A: [[1]]}
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(c))
sched.add_condition(B, EveryNCalls(A, 2))
c.scheduler = sched
c.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mech = B
expected_output = [
np.array([2.]),
]
for i in range(len(expected_output)):
np.testing.assert_allclose(expected_output[i], terminal_mech.get_output_values(c)[i])
def test_two_ABB(self):
A = TransferMechanism(
name='A',
default_variable=[0],
function=Linear(slope=2.0),
)
B = IntegratorMechanism(
name='B',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
c = Composition(pathways=[A, B])
term_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: AfterNCalls(B, 2)}
stim_list = {A: [[1]]}
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(c))
sched.add_condition(A, Any(AtPass(0), AfterNCalls(B, 2)))
sched.add_condition(B, Any(JustRan(A), JustRan(B)))
c.scheduler = sched
c.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mech = B
expected_output = [
np.array([2.]),
]
for i in range(len(expected_output)):
np.testing.assert_allclose(expected_output[i], terminal_mech.get_output_values(c)[i])
########################################
# tests with small branching compositions
########################################
@pytest.mark.psyneulink
class TestBranching:
@classmethod
def setup_class(self):
self.orig_is_finished_flag = TransferMechanism.is_finished_flag
self.orig_is_finished = TransferMechanism.is_finished
TransferMechanism.is_finished_flag = True
TransferMechanism.is_finished = lambda self, context: self.is_finished_flag
@classmethod
def teardown_class(self):
del TransferMechanism.is_finished_flag
del TransferMechanism.is_finished
TransferMechanism.is_finished_flag = self.orig_is_finished_flag
TransferMechanism.is_finished = self.orig_is_finished
# triangle: A
# / \
# B C
def test_triangle_1(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), A, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 1))
sched.add_condition(C, EveryNCalls(A, 1))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 3, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [
A, set([B, C]),
A, set([B, C]),
A, set([B, C]),
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_triangle_2(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), A, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 1))
sched.add_condition(C, EveryNCalls(A, 2))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 3, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [
A, B,
A, set([B, C]),
A, B,
A, set([B, C]),
A, B,
A, set([B, C]),
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_triangle_3(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), A, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, EveryNCalls(A, 3))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 2, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [
A, A, B, A, C, A, B, A, A, set([B, C])
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
# this is test 11 of original constraint_scheduler.py
def test_triangle_4(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), A, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, All(WhenFinished(A), AfterNCalls(B, 3)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 1)
output = []
i = 0
A.is_finished_flag = False
for step in sched.run(termination_conds=termination_conds):
if i == 3:
A.is_finished_flag = True
output.append(step)
i += 1
expected_output = [A, A, B, A, A, B, A, A, set([B, C])]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_triangle_4b(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), A, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, All(WhenFinished(A), AfterNCalls(B, 3)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 1)
output = []
i = 0
A.is_finished_flag = False
for step in sched.run(termination_conds=termination_conds):
if i == 10:
A.is_finished_flag = True
output.append(step)
i += 1
expected_output = [A, A, B, A, A, B, A, A, B, A, A, set([B, C])]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
# inverted triangle: A B
# \ /
# C
# this is test 4 of original constraint_scheduler.py
# this test has an implicit priority set of A<B !
def test_invtriangle_1(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, C)
comp.add_projection(MappingProjection(), B, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, Any(AfterNCalls(A, 3), AfterNCalls(B, 3)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 4, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [
A, set([A, B]), A, C, set([A, B]), C, A, C, set([A, B]), C
]
# pprint.pprint(output)
assert output == pytest.helpers.setify_expected_output(expected_output)
# this is test 5 of original constraint_scheduler.py
# this test has an implicit priority set of A<B !
def test_invtriangle_2(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, C)
comp.add_projection(MappingProjection(), B, C)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, All(AfterNCalls(A, 3), AfterNCalls(B, 3)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 2, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [
A, set([A, B]), A, set([A, B]), A, set([A, B]), C, A, C
]
assert output == pytest.helpers.setify_expected_output(expected_output)
# checkmark: A
# \
# B C
# \ /
# D
# testing toposort
def test_checkmark_1(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
D = TransferMechanism(function=Linear(intercept=.5), name='scheduler-pytests-D')
for m in [A, B, C, D]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), B, D)
comp.add_projection(MappingProjection(), C, D)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, Always())
sched.add_condition(B, Always())
sched.add_condition(C, Always())
sched.add_condition(D, Always())
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(D, 1, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [
set([A, C]), B, D
]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_checkmark_2(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
D = TransferMechanism(function=Linear(intercept=.5), name='scheduler-pytests-D')
for m in [A, B, C, D]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), B, D)
comp.add_projection(MappingProjection(), C, D)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, EveryNCalls(A, 2))
sched.add_condition(D, All(EveryNCalls(B, 2), EveryNCalls(C, 2)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(D, 1, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [
A, set([A, C]), B, A, set([A, C]), B, D
]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_checkmark2_1(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='scheduler-pytests-A')
B = TransferMechanism(function=Linear(intercept=4.0), name='scheduler-pytests-B')
C = TransferMechanism(function=Linear(intercept=1.5), name='scheduler-pytests-C')
D = TransferMechanism(function=Linear(intercept=.5), name='scheduler-pytests-D')
for m in [A, B, C, D]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), A, D)
comp.add_projection(MappingProjection(), B, D)
comp.add_projection(MappingProjection(), C, D)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, EveryNCalls(A, 2))
sched.add_condition(D, All(EveryNCalls(B, 2), EveryNCalls(C, 2)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(D, 1, time_scale=TimeScale.ENVIRONMENT_STATE_UPDATE)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [
A, set([A, C]), B, A, set([A, C]), B, D
]
assert output == pytest.helpers.setify_expected_output(expected_output)
# multi source: A1 A2
# / \ / \
# B1 B2 B3
# \ / \ /
# C1 C2
def test_multisource_1(self):
comp = Composition()
A1 = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A1')
A2 = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A2')
B1 = TransferMechanism(function=Linear(intercept=4.0), name='B1')
B2 = TransferMechanism(function=Linear(intercept=4.0), name='B2')
B3 = TransferMechanism(function=Linear(intercept=4.0), name='B3')
C1 = TransferMechanism(function=Linear(intercept=1.5), name='C1')
C2 = TransferMechanism(function=Linear(intercept=.5), name='C2')
for m in [A1, A2, B1, B2, B3, C1, C2]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A1, B1)
comp.add_projection(MappingProjection(), A1, B2)
comp.add_projection(MappingProjection(), A2, B1)
comp.add_projection(MappingProjection(), A2, B2)
comp.add_projection(MappingProjection(), A2, B3)
comp.add_projection(MappingProjection(), B1, C1)
comp.add_projection(MappingProjection(), B2, C1)
comp.add_projection(MappingProjection(), B1, C2)
comp.add_projection(MappingProjection(), B3, C2)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
for m in comp.nodes:
sched.add_condition(m, Always())
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = All(AfterNCalls(C1, 1), AfterNCalls(C2, 1))
output = list(sched.run(termination_conds=termination_conds))
expected_output = [
set([A1, A2]), set([B1, B2, B3]), set([C1, C2])
]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_multisource_2(self):
comp = Composition()
A1 = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A1')
A2 = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A2')
B1 = TransferMechanism(function=Linear(intercept=4.0), name='B1')
B2 = TransferMechanism(function=Linear(intercept=4.0), name='B2')
B3 = TransferMechanism(function=Linear(intercept=4.0), name='B3')
C1 = TransferMechanism(function=Linear(intercept=1.5), name='C1')
C2 = TransferMechanism(function=Linear(intercept=.5), name='C2')
for m in [A1, A2, B1, B2, B3, C1, C2]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A1, B1)
comp.add_projection(MappingProjection(), A1, B2)
comp.add_projection(MappingProjection(), A2, B1)
comp.add_projection(MappingProjection(), A2, B2)
comp.add_projection(MappingProjection(), A2, B3)
comp.add_projection(MappingProjection(), B1, C1)
comp.add_projection(MappingProjection(), B2, C1)
comp.add_projection(MappingProjection(), B1, C2)
comp.add_projection(MappingProjection(), B3, C2)
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition_set({
A1: Always(),
A2: Always(),
B1: EveryNCalls(A1, 2),
B3: EveryNCalls(A2, 2),
B2: All(EveryNCalls(A1, 4), EveryNCalls(A2, 4)),
C1: Any(AfterNCalls(B1, 2), AfterNCalls(B2, 2)),
C2: Any(AfterNCalls(B2, 2), AfterNCalls(B3, 2)),
})
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = All(AfterNCalls(C1, 1), AfterNCalls(C2, 1))
output = list(sched.run(termination_conds=termination_conds))
expected_output = [
set([A1, A2]), set([A1, A2]), set([B1, B3]), set([A1, A2]), set([A1, A2]), set([B1, B2, B3]), set([C1, C2])
]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_three_ABAC(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
B = TransferMechanism(
name='B',
default_variable=[0],
function=Linear(slope=2.0),
)
C = TransferMechanism(
name='C',
default_variable=[0],
function=Linear(slope=2.0),
)
c = Composition(pathways=[[A,B],[A,C]])
term_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: AfterNCalls(C, 1)}
stim_list = {A: [[1]]}
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(c))
sched.add_condition(B, Any(AtNCalls(A, 1), EveryNCalls(A, 2)))
sched.add_condition(C, EveryNCalls(A, 2))
c.scheduler = sched
c.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mechs = [B, C]
expected_output = [
[
np.array([1.]),
],
[
np.array([2.]),
],
]
for m in range(len(terminal_mechs)):
for i in range(len(expected_output[m])):
np.testing.assert_allclose(expected_output[m][i], terminal_mechs[m].get_output_values(c)[i])
def test_three_ABAC_convenience(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
B = TransferMechanism(
name='B',
default_variable=[0],
function=Linear(slope=2.0),
)
C = TransferMechanism(
name='C',
default_variable=[0],
function=Linear(slope=2.0),
)
c = Composition(pathways=[[A,B],[A,C]])
term_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: AfterNCalls(C, 1)}
stim_list = {A: [[1]]}
c.scheduler.add_condition(B, Any(AtNCalls(A, 1), EveryNCalls(A, 2)))
c.scheduler.add_condition(C, EveryNCalls(A, 2))
c.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mechs = [B, C]
expected_output = [
[
np.array([1.]),
],
[
np.array([2.]),
],
]
for m in range(len(terminal_mechs)):
for i in range(len(expected_output[m])):
np.testing.assert_allclose(expected_output[m][i], terminal_mechs[m].get_output_values(c)[i])
def test_three_ABACx2(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
B = TransferMechanism(
name='B',
default_variable=[0],
function=Linear(slope=2.0),
)
C = TransferMechanism(
name='C',
default_variable=[0],
function=Linear(slope=2.0),
)
c = Composition(pathways=[[A,B],[A,C]])
term_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: AfterNCalls(C, 2)}
stim_list = {A: [[1]]}
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(c))
sched.add_condition(B, Any(AtNCalls(A, 1), EveryNCalls(A, 2)))
sched.add_condition(C, EveryNCalls(A, 2))
c.scheduler = sched
c.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mechs = [B, C]
expected_output = [
[
np.array([3.]),
],
[
np.array([4.]),
],
]
for m in range(len(terminal_mechs)):
for i in range(len(expected_output[m])):
np.testing.assert_allclose(expected_output[m][i], terminal_mechs[m].get_output_values(c)[i])
def test_three_2_ABC(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
B = IntegratorMechanism(
name='B',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
C = TransferMechanism(
name='C',
default_variable=[0],
function=Linear(slope=2.0),
)
c = Composition(pathways=[[A,C],[B,C]])
term_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: AfterNCalls(C, 1)}
stim_list = {A: [[1]], B: [[2]]}
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(c))
sched.add_condition(C, All(EveryNCalls(A, 1), EveryNCalls(B, 1)))
c.scheduler = sched
c.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mechs = [C]
expected_output = [
[
np.array([5.]),
],
]
for m in range(len(terminal_mechs)):
for i in range(len(expected_output[m])):
np.testing.assert_allclose(expected_output[m][i], terminal_mechs[m].get_output_values(c)[i])
def test_three_2_ABCx2(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
B = IntegratorMechanism(
name='B',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
C = TransferMechanism(
name='C',
default_variable=[0],
function=Linear(slope=2.0),
)
c = Composition(pathways=[[A,C],[B,C]])
term_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: AfterNCalls(C, 2)}
stim_list = {A: [[1]], B: [[2]]}
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(c))
sched.add_condition(C, All(EveryNCalls(A, 1), EveryNCalls(B, 1)))
c.scheduler = sched
c.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mechs = [C]
expected_output = [
[
np.array([10.]),
],
]
for m in range(len(terminal_mechs)):
for i in range(len(expected_output[m])):
np.testing.assert_allclose(expected_output[m][i], terminal_mechs[m].get_output_values(c)[i])
def test_three_integrators(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
B = IntegratorMechanism(
name='B',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
C = IntegratorMechanism(
name='C',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
c = Composition(pathways=[[A,C],[B,C]])
term_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: AfterNCalls(C, 2)}
stim_list = {A: [[1]], B: [[1]]}
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(c))
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, Any(EveryNCalls(A, 1), EveryNCalls(B, 1)))
c.scheduler = sched
c.run(
inputs=stim_list,
termination_processing=term_conds
)
mechs = [A, B, C]
expected_output = [
[
np.array([2.]),
],
[
np.array([1.]),
],
[
np.array([4.]),
],
]
for m in range(len(mechs)):
for i in range(len(expected_output[m])):
np.testing.assert_allclose(expected_output[m][i], mechs[m].get_output_values(c)[i])
def test_four_ABBCD(self):
A = TransferMechanism(
name='A',
default_variable=[0],
function=Linear(slope=2.0),
)
B = IntegratorMechanism(
name='B',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
C = IntegratorMechanism(
name='C',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
D = TransferMechanism(
name='D',
default_variable=[0],
function=Linear(slope=1.0),
)
c = Composition(pathways=[[A,B,D],[A,C,D]])
term_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: AfterNCalls(D, 1)}
stim_list = {A: [[1]]}
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(c))
sched.add_condition(B, EveryNCalls(A, 1))
sched.add_condition(C, EveryNCalls(A, 2))
sched.add_condition(D, Any(EveryNCalls(B, 3), EveryNCalls(C, 3)))
c.scheduler = sched
c.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mechs = [D]
expected_output = [
[
np.array([4.]),
],
]
for m in range(len(terminal_mechs)):
for i in range(len(expected_output[m])):
np.testing.assert_allclose(expected_output[m][i], terminal_mechs[m].get_output_values(c)[i])
def test_four_integrators_mixed(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
B = IntegratorMechanism(
name='B',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
C = IntegratorMechanism(
name='C',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
D = IntegratorMechanism(
name='D',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
c = Composition(pathways=[[A,C],[A,D],[B,C],[B,D]])
term_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: All(AfterNCalls(C, 1), AfterNCalls(D, 1))}
stim_list = {A: [[1]], B: [[1]]}
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(c))
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, EveryNCalls(A, 1))
sched.add_condition(D, EveryNCalls(B, 1))
c.scheduler = sched
c.run(
inputs=stim_list,
termination_processing=term_conds
)
mechs = [A, B, C, D]
expected_output = [
[
np.array([2.]),
],
[
np.array([1.]),
],
[
np.array([4.]),
],
[
np.array([3.]),
],
]
for m in range(len(mechs)):
for i in range(len(expected_output[m])):
np.testing.assert_allclose(expected_output[m][i], mechs[m].get_output_values(c)[i])
def test_five_ABABCDE(self):
A = TransferMechanism(
name='A',
default_variable=[0],
function=Linear(slope=2.0),
)
B = TransferMechanism(
name='B',
default_variable=[0],
function=Linear(slope=2.0),
)
C = IntegratorMechanism(
name='C',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
D = TransferMechanism(
name='D',
default_variable=[0],
function=Linear(slope=1.0),
)
E = TransferMechanism(
name='E',
default_variable=[0],
function=Linear(slope=2.0),
)
c = Composition(pathways=[[A,C,D],[B,C,E]])
term_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: AfterNCalls(E, 1)}
stim_list = {A: [[1]], B: [[2]]}
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(c))
sched.add_condition(C, Any(EveryNCalls(A, 1), EveryNCalls(B, 1)))
sched.add_condition(D, EveryNCalls(C, 1))
sched.add_condition(E, EveryNCalls(C, 1))
c.scheduler = sched
c.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mechs = [D, E]
expected_output = [
[
np.array([3.]),
],
[
np.array([6.]),
],
]
for m in range(len(terminal_mechs)):
for i in range(len(expected_output[m])):
np.testing.assert_allclose(expected_output[m][i], terminal_mechs[m].get_output_values(c)[i])
#
# A B
# |\/|
# C D
# |\/|
# E F
#
def test_six_integrators_threelayer_mixed(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
B = IntegratorMechanism(
name='B',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
C = IntegratorMechanism(
name='C',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
D = IntegratorMechanism(
name='D',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
E = IntegratorMechanism(
name='E',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
F = IntegratorMechanism(
name='F',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
c = Composition(pathways=[[A,C,E],[A,C,F],[A,D,E],[A,D,F],[B,C,E],[B,C,F],[B,D,E],[B,D,F]])
term_conds = {TimeScale.ENVIRONMENT_STATE_UPDATE: All(AfterNCalls(E, 1), AfterNCalls(F, 1))}
stim_list = {A: [[1]], B: [[1]]}
sched = pnl.Scheduler(**pytest.helpers.composition_to_scheduler_args(c))
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, EveryNCalls(A, 1))
sched.add_condition(D, EveryNCalls(B, 1))
sched.add_condition(E, EveryNCalls(C, 1))
sched.add_condition(F, EveryNCalls(D, 2))
c.scheduler = sched
c.run(
inputs=stim_list,
termination_processing=term_conds
)
# Intermediate consideration set executions
#
# 0 1 2 3
#
# A 1 2 3 4
# B 1 2
# C 1 4 8 14
# D 3 9
# E 1 8 19 42
# F 23
#
expected_output = {
A: [
|
np.array([4.])
|
numpy.array
|
import numpy as np
from .unet import UNet
from pathlib import Path
from random import sample
import pdb
import math
from math import ceil
import pickle
import cv2
from ..tools.pytorch_batchsize import *
from ..tools.heatmap_to_points import *
from ..tools.helper import *
from ..tools.image_tools import *
from .basic import *
from .unet_revised import SE_Res_UNet
from PIL import Image
import glob
import sys
from fastprogress.fastprogress import master_bar, progress_bar
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
#from .basic import *
from torch import nn
import random
import platform
import matplotlib.pyplot as plt
import pickle
import os
import sys
import warnings
from .hourglass import hg
__all__ = ["DataAugmentation", "HeatmapLearner", "HeatLoss_OldGen_0", "HeatLoss_OldGen_1", "HeatLoss_OldGen_2", "HeatLoss_OldGen_3", "HeatLoss_OldGen_4", "HeatLoss_NextGen_0", "HeatLoss_NextGen_1",
"HeatLoss_NextGen_2", "HeatLoss_NextGen_3", "Loss_weighted"]
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
class CustomHeatmapDataset(Dataset):
"CustomImageDataset with `image_files`,`y_func`, `convert_mode` and `size(height, width)`"
def __init__(self, data, hull_path, size=(512,512), grayscale=False, normalize_mean=None, normalize_std=None, data_aug=None,
is_valid=False, do_normalize=True, clahe=True):
self.data = data
self.size = size
if normalize_mean is None:
if grayscale:
normalize_mean = [0.131]
else:
normalize_mean = [0.485, 0.456, 0.406]
if normalize_std is None:
if grayscale:
normalize_std = [0.308]
else:
normalize_std = [0.229, 0.224, 0.225]
self.normalize = transforms.Normalize(normalize_mean,normalize_std)
self.un_normalize = UnNormalize(mean=normalize_mean, std=normalize_std)
self.hull_path = hull_path
self.convert_mode = "L" if grayscale else "RGB"
self.data_aug = data_aug
self.to_t = transforms.ToTensor()
self.is_valid = is_valid
self.do_normalize = do_normalize
self.clahe = cv2.createCLAHE(clipLimit=20.0,tileGridSize=(30,30)) if clahe else None
def __len__(self):
return len(self.data)
def load_labels(self,idx):
heatmaps = []
for fname in self.data[idx][1]:
mask_file = fname.parent/(fname.stem + "_mask"+ fname.suffix)
heatmaps.append([load_heatmap(fname, size=self.size),load_heatmap(mask_file, size=self.size)])
return heatmaps
def load_hull(self, idx):
return load_heatmap(self.hull_path/self.data[idx][0].name, size=self.size)
def apply_clahe(self,img):
if self.clahe == None:
return img
img = np.asarray(img)
img = self.clahe.apply(img)
return Image.fromarray(img)
def __getitem__(self, idx):
image = load_image(self.data[idx][0], size=self.size, convert_mode=self.convert_mode, to_numpy=False)
labels = self.load_labels(idx)
hull = self.load_hull(idx)
image = self.apply_clahe(image)
if (not self.is_valid) and (self.data_aug is not None):
image, labels, hull = self.data_aug.transform(image, labels, hull)
hull = torch.squeeze(self.to_t(hull),dim=0).type(torch.bool)
labels_extraced = [label[0] for label in labels]
masks_extraced = [label[1] for label in labels]
labels_extraced = self.to_t(np.stack(labels_extraced, axis=2))
masks_extraced = self.to_t(np.stack(masks_extraced, axis=2)).type(torch.bool)
image = self.to_t(image)
if self.do_normalize:
image = self.normalize(image)
return self.data[idx][0].stem, image, labels_extraced, masks_extraced, hull
class RandomRotationImageTarget(transforms.RandomRotation):
def __call__(self, img, targets, hull):
angle = self.get_params(self.degrees)
img = transforms.functional.rotate(img, angle, self.resample, self.expand, self.center)
hull = transforms.functional.rotate(hull, angle, self.resample, self.expand, self.center)
for idx in range(len(targets)):
targets[idx][0] = transforms.functional.rotate(targets[idx][0], angle, self.resample, self.expand, self.center)
targets[idx][1] = transforms.functional.rotate(targets[idx][1], angle, self.resample, self.expand, self.center)
return img, targets, hull
class RandomHorizontalFlipImageTarget(transforms.RandomHorizontalFlip):
def __call__(self, img, targets, hull):
if random.random() < self.p:
img = transforms.functional.hflip(img)
hull = transforms.functional.hflip(hull)
for idx in range(len(targets)):
targets[idx][0] = transforms.functional.hflip(targets[idx][0])
targets[idx][1] = transforms.functional.hflip(targets[idx][1])
return img,targets,hull
class RandomVerticalFlipImageTarget(transforms.RandomVerticalFlip):
def __call__(self, img, targets, hull):
if random.random() < self.p:
img = transforms.functional.vflip(img)
hull = transforms.functional.vflip(hull)
for idx in range(len(targets)):
targets[idx][0] = transforms.functional.vflip(targets[idx][0])
targets[idx][1] = transforms.functional.vflip(targets[idx][1])
return img,targets,hull
class RandomPerspectiveImageTarget(transforms.RandomPerspective):
def __call__(self, img, targets, hull):
if not transforms.functional._is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if random.random() < self.p:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="torch.lstsq")
width, height = img.size
startpoints, endpoints = self.get_params(width, height, self.distortion_scale)
img = transforms.functional.perspective(img, startpoints, endpoints, self.interpolation, self.fill)
hull = transforms.functional.perspective(hull, startpoints, endpoints, self.interpolation, self.fill)
for idx in range(len(targets)):
targets[idx][0] = transforms.functional.perspective(targets[idx][0], startpoints, endpoints, self.interpolation, self.fill)
targets[idx][1] = transforms.functional.perspective(targets[idx][1], startpoints, endpoints, self.interpolation, self.fill)
return img,targets,hull
class ComposeImageTarget(transforms.Compose):
def __call__(self, img, targets, hull):
for t in self.transforms:
img,targets,hull = t(img, targets, hull)
return img,targets,hull
class DataAugmentation:
"DataAugmentation class with `size(height,width)`"
def __init__(self, rotation=20,horizontal_flip_p=0.5,
vertical_flip_p=0.5,warp=0.3,warp_p=0.5, zoom=0.8,
brightness=0.6, contrast=0.6, GaussianBlur=1):
self.lightning_transforms = transforms.Compose([transforms.ColorJitter(brightness=brightness,contrast=contrast),
#transforms.GaussianBlur(kernel_size=GaussianBlur)
])
self.affine_transforms = ComposeImageTarget([
RandomRotationImageTarget(degrees=(-rotation,rotation)),
RandomHorizontalFlipImageTarget(p=horizontal_flip_p),
RandomVerticalFlipImageTarget(p=vertical_flip_p),
RandomPerspectiveImageTarget(distortion_scale=warp, p=warp_p),
#transforms.RandomResizedCrop(size=size,scale=(zoom,1.0),ratio=(1.0,1.0))
])
def transform(self,features,labels, hull):
#do lighting transforms for features
features = self.lightning_transforms(features)
#do affine transforms for features and labels
features,labels,hull = self.affine_transforms(features, labels, hull)
return features,labels,hull
class heatmap_metric(LearnerCallback):
def __init__(self, features, true_positive_threshold=10, metric_counter=1):
self.__counter_epoch = 0
self.__metric_counter = metric_counter
self.__custom_metrics = {"metrics":[],"types":[]}
self.__features = features
self.__true_positive_threshold = true_positive_threshold
self.numeric_metric = 1
self.accuracy_metric = 2
for item in self.__features.keys():
self.__custom_metrics["metrics"].append(item+"_pos_train")
self.__custom_metrics["types"].append(self.numeric_metric)
self.__custom_metrics["metrics"].append(item+"_pos_valid")
self.__custom_metrics["types"].append(self.numeric_metric)
self.__custom_metrics["metrics"].append(item+"_accuracy_train")
self.__custom_metrics["types"].append(self.accuracy_metric)
self.__custom_metrics["metrics"].append(item+"_accuracy_valid")
self.__custom_metrics["types"].append(self.accuracy_metric)
def get_metric_names(self):
return self.__custom_metrics["metrics"]
def __calc_metrics(self, targets, outputs, metric_values, train):
ext = "train" if train else "valid"
for target,output,feature in zip(targets, outputs, list(self.__features.keys())):
type_of = self.__features[feature]["type"]
if (type_of == "circle"):
points_target = heatmap_to_circle(target)
points_output = heatmap_to_circle(output)
if (points_target is not None):
metric_values[feature+"_accuracy_"+ext]["total_targets"] += 1
if (points_target is not None) and (points_output is not None):
mid_point_output = np.round(np.sum(points_output, axis=0)/len(points_output)).astype(np.int)
mid_point_target = np.round(np.sum(points_target, axis=0)/len(points_target)).astype(np.int)
diff_circle_midpoint = np.sqrt(np.sum((mid_point_output - mid_point_target)**2))
metric_values[feature+"_pos_"+ext].append(diff_circle_midpoint)
if diff_circle_midpoint < self.__true_positive_threshold:
metric_values[feature+"_accuracy_"+ext]["total_true_positives"] += 1
elif type_of == "single_point":
center_point_target = heatmap_to_max_confidence_point(target)
center_point_output = heatmap_to_max_confidence_point(output)
if (center_point_target is not None):
metric_values[feature+"_accuracy_"+ext]["total_targets"] += 1
if (center_point_target is not None) and (center_point_output is not None):
diff_center = np.sqrt(np.sum((center_point_output - center_point_target)**2))
metric_values[feature+"_pos_"+ext].append(diff_center)
if diff_center < self.__true_positive_threshold:
metric_values[feature+"_accuracy_"+ext]["total_true_positives"] += 1
elif type_of == "multi_point":
all_peaks_target = heatmap_to_multiple_points(target)
all_peaks_output = heatmap_to_multiple_points(output)
if (all_peaks_target is not None):
metric_values[feature+"_accuracy_"+ext]["total_targets"] += len(all_peaks_target)
if (all_peaks_target is not None) and (all_peaks_output is not None):
diffs = []
for peak_target in all_peaks_target:
if len(all_peaks_output) == 0:
break
s = np.argmin(np.sqrt(np.sum((all_peaks_output - peak_target)**2, axis=1)))
diffs.append(np.sqrt(np.sum((all_peaks_output[s] - peak_target)**2)))
if diffs[-1] < self.__true_positive_threshold:
metric_values[feature+"_accuracy_"+ext]["total_true_positives"] += 1
all_peaks_output = np.delete(all_peaks_output, s, axis=0)
diff_nut_edges = np.array(diffs).mean()
metric_values[feature+"_pos_"+ext].append(diff_nut_edges)
else:
raise("The Heatmaptype " + type_of + " is not implemented yet.")
return metric_values
def on_batch_end(self, last_output, last_target, train):
if self.__counter_epoch % self.__metric_counter == 0:
last_target = last_target.numpy()
last_output = last_output.numpy()
for target_batch,output_batch in zip(last_target, last_output):
self.metrics_values = self.__calc_metrics(target_batch,output_batch,
self.metrics_values, train)
def on_epoch_begin(self):
if self.__counter_epoch % self.__metric_counter == 0:
self.metrics_values = {}
for idx,metric in enumerate(self.__custom_metrics["metrics"]):
if self.__custom_metrics["types"][idx] == self.numeric_metric:
self.metrics_values[metric] = []
else:
self.metrics_values[metric] = {"total_targets":0,"total_true_positives":0}
def on_epoch_end(self):
metrics = list(np.zeros(len(self.__custom_metrics["metrics"]), dtype=np.float32))
if self.__counter_epoch % self.__metric_counter == 0:
for idx,metric in enumerate(self.__custom_metrics["metrics"]):
if self.__custom_metrics["types"][idx] == self.numeric_metric:
if len(self.metrics_values[metric]) == 0:
metrics[idx] = 0
else:
metrics[idx] = np.array(self.metrics_values[metric]).mean()
else:
if self.metrics_values[metric]["total_targets"] != 0:
metrics[idx] = self.metrics_values[metric]["total_true_positives"] / self.metrics_values[metric]["total_targets"]
else:
metrics[idx] = 0
self.__counter_epoch += 1
return metrics
class HeatLoss_OldGen_0(nn.Module):
def __init__(self):
super().__init__()
r"""Class for HeatLoss calculation. This variant includes no masks, simple Mean absolute error over all pixles:
"""
def forward(self, input, target, masks, hull):
return torch.mean(torch.abs(input - target))
class HeatLoss_OldGen_1(nn.Module):
def __init__(self, print_out_losses=False):
super().__init__()
r"""Class for HeatLoss calculation. This variant includes the masks of following objects:
- specific features
"""
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
m1 = (target > 0.0)
ret1 = torch.abs(input[m1] - target[m1])
mean1 = torch.mean(ret1)
if self.print_out_losses:
print("specific features:",mean1.item(), end="\r")
return mean1
class HeatLoss_OldGen_2(nn.Module):
def __init__(self, print_out_losses=False):
r"""Class for HeatLoss calculation. This variant includes the masks of following objects:
- Background (no heat at all)
- specific features
"""
super().__init__()
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
m1 = (target > 0.0)
m2 = torch.logical_not(m1)
ret1 = torch.abs(input[m1] - target[m1])
ret2 = torch.abs(input[m2] - target[m2])
mean1 = torch.mean(ret1)
mean2 = torch.mean(ret2)
if self.print_out_losses:
print("specific features:",mean1.item(), "background:",mean2.item(), end="\r")
return (mean1+mean2)/2
class HeatLoss_OldGen_3(nn.Module):
def __init__(self, print_out_losses=False):
super().__init__()
r"""Class for HeatLoss calculation. This variant includes the masks of following objects:
- specific feature
- all features in a image
"""
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
m1 = (target > 0.0)
m2 = torch.zeros(m1.shape, dtype=torch.bool, device=input.device)
for dset in range(input.shape[0]):
logor = torch.zeros((input.shape[2], input.shape[3]), dtype=torch.bool, device=input.device)
for i in range(input.shape[1]):
logor = logor | m1[dset,i,:,:]
for i in range(input.shape[1]):
m2[dset,i,:,:] = logor
ret1 = torch.abs(input[m1] - target[m1])
ret2 = torch.abs(input[m2] - target[m2])
mean1 = torch.mean(ret1)
mean2 = torch.mean(ret2)
if self.print_out_losses:
print("specific feature:",mean1.item(), "all features:",mean2.item(), end="\r")
return (mean1+mean2)/2
class HeatLoss_OldGen_4(nn.Module):
def __init__(self, print_out_losses=False):
super().__init__()
r"""Class for HeatLoss calculation. This variant includes the masks of following objects:
- specific feature
- all features in a image
- background
"""
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
m1 = (target > 0.0)
m2 = torch.zeros(m1.shape, dtype=torch.bool, device=input.device)
m3 = torch.logical_not(m1)
for dset in range(input.shape[0]):
logor = torch.zeros((input.shape[2], input.shape[3]), dtype=torch.bool, device=input.device)
for i in range(input.shape[1]):
logor = logor | m1[dset,i,:,:]
for i in range(input.shape[1]):
m2[dset,i,:,:] = logor
ret1 = torch.abs(input[m1] - target[m1])
ret2 = torch.abs(input[m2] - target[m2])
ret3 = torch.abs(input[m3] - target[m3])
mean1 = torch.mean(ret1)
mean2 = torch.mean(ret2)
mean3 = torch.mean(ret3)
if self.print_out_losses:
print("specific feature:",mean1.item(), "all features:",mean2.item(), "background:",mean3.item(), end="\r")
return (mean1+mean2+mean3)/3
class HeatLoss_NextGen_0(nn.Module):
def __init__(self, print_out_losses=False):
super().__init__()
r"""Class for Next Generation HeatLoss calculation. This variant includes offline generated masks of following objects:
- specific feature with mask dilation (single loss calculation for every feature)
- convex hull of all featureswith maks dilation
- background
"""
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
hull_not = torch.logical_not(hull)
feature_count = target.shape[1]
loss_items = torch.zeros(feature_count, dtype=torch.float32, device=input.device)
for idx in range(feature_count):
diff = torch.abs(input[:,idx,:,:][masks[:,idx,:,:]] - target[:,idx,:,:][masks[:,idx,:,:]])
if len(diff) > 0:
loss_items[idx] = torch.mean(diff)
loss_hull = torch.mean(torch.abs(input[hull] - target[hull]))
loss_backgrond = torch.mean(torch.abs(input[hull_not] - target[hull_not]))
if self.print_out_losses:
# print loss begin
out_str = ""
print_items_loss = []
sum_items_loss = torch.zeros(1, dtype=torch.float32, device=input.device)
for idx in range(len(loss_items)):
out_str = out_str + "loss_item_"+str(idx) + " {:.4f} "
print_items_loss.append(round(loss_items[idx].item(),4))
sum_items_loss += loss_items[idx]
print_items_loss.append(round(loss_hull.item(),4))
print_items_loss.append(round(loss_backgrond.item(),4))
print((out_str+" loss_hull {:.4f} loss_backgrond {:.4f}").format(*print_items_loss), end="\r")
# print loss end
return (sum_items_loss+loss_hull+loss_backgrond)/(feature_count+2)
class HeatLoss_NextGen_1(nn.Module):
def __init__(self):
super().__init__(print_out_losses=False)
r"""Class for Next Generation HeatLoss calculation. This variant includes offline generated masks of following objects:
- specific feature with mask dilation (calculation of feature loss all the same)
- convex hull of all featureswith maks dilation
- background
"""
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
hull_not = torch.logical_not(hull)
loss_features = torch.mean(torch.abs(input[masks] - target[masks]))
loss_hull = torch.mean(torch.abs(input[hull] - target[hull]))
loss_backgrond = torch.mean(torch.abs(input[hull_not] - target[hull_not]))
if self.print_out_losses:
print(("loss_features {:.4f} loss_hull {:.4f} loss_backgrond {:.4f}").format(loss_features,loss_hull,loss_backgrond), end="\r")
return (loss_features+loss_hull+loss_backgrond)/3
class HeatLoss_NextGen_2(nn.Module):
def __init__(self, print_out_losses=False):
super().__init__()
r"""Class for Next Generation HeatLoss calculation. This variant includes offline generated masks of following objects:
- specific feature with mask dilation (calculation of feature loss all the same)
- all features in a image (calculation of feature loss all the same)
- background (calculation of feature loss all the same)
"""
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
all_mask = torch.any(masks,dim=1)[:,None].repeat(1,target.shape[1],1,1)
mask_not = torch.logical_not(masks)
loss_features = torch.mean(torch.abs(input[masks] - target[masks]))
loss_all_features = torch.mean(torch.abs(input[all_mask] - target[all_mask]))
loss_backgrond = torch.mean(torch.abs(input[mask_not] - target[mask_not]))
if self.print_out_losses:
print(("loss_features {:.4f} loss_all_features {:.4f} loss_backgrond {:.4f}").format(loss_features.item(),loss_all_features.item(),loss_backgrond.item()), end="\r")
return (loss_features+loss_all_features+loss_backgrond)/3
class HeatLoss_NextGen_3(nn.Module):
def __init__(self, print_out_losses=False):
super().__init__()
r"""Class for Next Generation HeatLoss calculation. This variant includes offline generated masks of following objects:
- specific feature with mask dilation (single loss calculation for every feature)
- all features in a image (single loss calculation for every feature)
- background (single loss calculation for every feature)
"""
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
feature_count = target.shape[1]
mask_not = torch.logical_not(masks)
all_mask = torch.any(masks,dim=1)[:,None].repeat(1,target.shape[1],1,1)
loss_features = torch.zeros(feature_count, dtype=torch.float32, device=input.device)
loss_backgrond = torch.zeros(feature_count, dtype=torch.float32, device=input.device)
loss_all_features = torch.zeros(feature_count, dtype=torch.float32, device=input.device)
for idx in range(feature_count):
diff = torch.abs(input[:,idx,:,:][masks[:,idx,:,:]] - target[:,idx,:,:][masks[:,idx,:,:]])
diff_not = torch.abs(input[:,idx,:,:][mask_not[:,idx,:,:]] - target[:,idx,:,:][mask_not[:,idx,:,:]])
diff_all = torch.abs(input[:,idx,:,:][all_mask[:,idx,:,:]] - target[:,idx,:,:][all_mask[:,idx,:,:]])
if len(diff) > 0:
loss_features[idx] = torch.mean(diff)
if len(diff_not) > 0:
loss_backgrond[idx] = torch.mean(diff_not)
if len(diff_all) > 0:
loss_all_features[idx] = torch.mean(diff_all)
loss_features = torch.mean(loss_features)
loss_backgrond = torch.mean(loss_backgrond)
loss_all_features = torch.mean(loss_all_features)
if self.print_out_losses:
print(("loss_features {:.4f} loss_all_features {:.4f} loss_backgrond {:.4f}").format(loss_features.item(),loss_all_features.item(),loss_backgrond.item()), end="\r")
return (loss_features+loss_all_features+loss_backgrond)/3
class AWing(nn.Module):
def __init__(self, alpha=2.1, omega=14, epsilon=1, theta=0.5):
super().__init__()
self.alpha = float(alpha)
self.omega = float(omega)
self.epsilon = float(epsilon)
self.theta = float(theta)
def forward(self, y_pred , y):
lossMat = torch.zeros_like(y_pred)
A = self.omega * (1/(1+(self.theta/self.epsilon)**(self.alpha-y)))*(self.alpha-y)*((self.theta/self.epsilon)**(self.alpha-y-1))/self.epsilon
C = self.theta*A - self.omega*torch.log(1+(self.theta/self.epsilon)**(self.alpha-y))
case1_ind = torch.abs(y-y_pred) < self.theta
case2_ind = torch.abs(y-y_pred) >= self.theta
lossMat[case1_ind] = self.omega*torch.log(1+torch.abs((y[case1_ind]-y_pred[case1_ind])/self.epsilon)**(self.alpha-y[case1_ind]))
lossMat[case2_ind] = A[case2_ind]*torch.abs(y[case2_ind]-y_pred[case2_ind]) - C[case2_ind]
return lossMat
class Loss_weighted(nn.Module):
def __init__(self, W=10, alpha=2.1, omega=14, epsilon=1, theta=0.5):
super().__init__()
self.W = float(W)
self.Awing = AWing(alpha, omega, epsilon, theta)
def forward(self, y_pred , y, M, hull):
#pdb.set_trace()
M = M.float()
Loss = self.Awing(y_pred,y)
weighted = Loss * (self.W * M + 1.)
return weighted.mean()
class HeatmapLearner:
def __init__(self, features, root_path, images_path, hull_path, size=(512,512), bs=-1, items_count=-1, gpu_id=0,
norm_stats=None, data_aug=None, preload=False, sample_results_path="sample_results",
unet_init_features=16, valid_images_store="valid_images.npy", image_convert_mode="L", metric_counter=1,
sample_img=None, true_positive_threshold=0.02, ntype="unet", lr=1e-03, file_filters_include=None, file_filters_exclude=None, clahe=False,
disable_metrics=False, file_prefix="", loss_func=None, weight_decay=0, num_load_workers=None, dropout=True, dropout_rate=0.15, save_counter=10):
r"""Class for train an Unet-style Neural Network, for heatmap based image recognition
Args:
features : Heatmap features for the neural net. This must be a dict. The Keys must be the folder names for the heatmap features.
Every entry is a dict with the feature types: single_point, multi_point, circle
Example:
{"feature_1":{"type":"single_point"},
"feature_2":{"type":"multi_point"},
"feature_3":{"type":"circle"}}
root_path : The root path, where image files and label files are located
images_path : The path, where the images are located, in relation to the root_path
size : Size of images to pass through the neural network. The sizes must be a power of two with the dimensions of (Heigth, Width).
bs : The Batch Size
norm_stats : Normalize values for images in the form (mean,std).
file_filters_incluce : incluce file filter in images_path, must be a list with include search strings
file_filters_exclude : exclude file filter in images_path, must be a list with exclude search strings
Example:
"""
# check assertions
assert power_of_2(size), "size must be a power of 2, to work with this class"
#static variables (they are fix)
heatmap_paths = list(features.keys())
for idx in range(len(heatmap_paths)):
heatmap_paths[idx] = Path(heatmap_paths[idx])
self.features = features
self.__size = size
self.save_counter = save_counter
self.__num_load_workers = num_load_workers
self.__root_path = Path(root_path)
self.__images_path = Path(images_path)
self.__hull_path = self.__root_path/Path(hull_path)
self.__sample_results_path = Path(sample_results_path)
(self.__root_path/self.__sample_results_path).mkdir(parents=True, exist_ok=True)
self.stacked_net = True if ntype=="stacked_hourglass" else False
self.__gpu_id = gpu_id
self.__file_prefix = file_prefix
data_aug = DataAugmentation() if data_aug is None else data_aug
if norm_stats is None:
norm_stats = ([0.131],[0.308]) if image_convert_mode == "L" else ([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
if sample_img is None:
t_img_files = glob.glob(str(self.__root_path/self.__images_path/"*"))
idx = random.randint(0, len(t_img_files)-1)
self.__sample_img = self.__root_path/self.__images_path/Path(t_img_files[idx]).name
else:
self.__sample_img = self.__root_path/self.__images_path/sample_img
true_positive_threshold = round(true_positive_threshold*self.__size[0])
# dynamic variables (they can change during learning)
self.__epochs = 0
self.__train_losses = None
self.__valid_losses = None
self.__metrics = None
file_filters_include = np.array(file_filters_include) if file_filters_include is not None else None
file_filters_exclude = np.array(file_filters_exclude) if file_filters_exclude is not None else None
self.__create_learner(file_filters_include=file_filters_include, file_filters_exclude=file_filters_exclude, valid_images_store=valid_images_store, items_count=items_count,
features=features, bs=bs, data_aug=data_aug,image_convert_mode=image_convert_mode,
heatmap_paths=heatmap_paths, true_positive_threshold=true_positive_threshold, metric_counter=metric_counter,
lr=lr, clahe=clahe, norm_stats=norm_stats, unet_init_features=unet_init_features, ntype=ntype, disable_metrics=disable_metrics, loss_func=loss_func,
weight_decay = weight_decay, dropout=dropout, dropout_rate=dropout_rate)
def __create_learner(self, file_filters_include, file_filters_exclude, valid_images_store, items_count, features, bs, data_aug,
image_convert_mode, heatmap_paths, true_positive_threshold, metric_counter, lr, clahe, norm_stats,
unet_init_features, ntype, disable_metrics, loss_func, weight_decay, dropout, dropout_rate):
training_data, valid_data = self.__load_data(features=features,file_filters_include=file_filters_include,
file_filters_exclude = file_filters_exclude, valid_images_store=valid_images_store,
items_count=items_count)
self.__unet_in_channels = 1 if image_convert_mode == "L" else 3
self.__unet_out_channls = len(heatmap_paths)
heatmap_files_sample = []
for feat in features.keys():
heatmap_files_sample.append(self.__root_path/feat/self.__sample_img.name)
self.sample_dataset = CustomHeatmapDataset(data=[[self.__sample_img,heatmap_files_sample]], hull_path=self.__hull_path, grayscale=image_convert_mode == "L",
normalize_mean=norm_stats[0],normalize_std=norm_stats[1], is_valid=True,
clahe=clahe, size=self.__size)
self.train_dataset = CustomHeatmapDataset(data=training_data, hull_path=self.__hull_path, grayscale=image_convert_mode == "L",
normalize_mean=norm_stats[0],normalize_std=norm_stats[1], data_aug=data_aug, clahe=clahe,
size=self.__size)
self.valid_dataset = CustomHeatmapDataset(data=valid_data, hull_path=self.__hull_path, grayscale=image_convert_mode == "L",
normalize_mean=norm_stats[0],normalize_std=norm_stats[1], is_valid=True, clahe=clahe,
size=self.__size)
sample_img = None
if self.__sample_img is not None:
to_t = transforms.ToTensor()
img = to_t(load_image(self.__root_path/self.__images_path/self.__sample_img,
convert_mode=image_convert_mode, size=self.__size, to_numpy=False))
masks = []
for idx in range(len(heatmap_paths)):
heat = to_t(load_heatmap(self.__root_path/heatmap_paths[idx]/self.__sample_img))
masks.append(heat)
sample_img = (img,masks)
metric = None if disable_metrics else heatmap_metric(features = features, true_positive_threshold = true_positive_threshold, metric_counter = metric_counter)
net = self.__get_net(ntype, unet_init_features, dropout, dropout_rate).to(torch.device("cuda:"+str(self.__gpu_id)))
opt = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=weight_decay)
loss_func = HeatLoss_NextGen_1() if loss_func is None else loss_func
loss_func = loss_func.to(torch.device("cuda:"+str(self.__gpu_id)))
if bs == -1:
batch_estimator = Batch_Size_Estimator(net=net, opt=opt,
loss_func=loss_func,
gpu_id=self.__gpu_id, dataset = self.train_dataset)
bs = batch_estimator.find_max_bs()
train_dl = DataLoader(self.train_dataset, batch_size=bs, shuffle=True, num_workers=num_workers() if self.__num_load_workers is None else self.__num_load_workers, pin_memory=False)
valid_dl = DataLoader(self.valid_dataset, batch_size=bs, shuffle=True, num_workers=num_workers() if self.__num_load_workers is None else self.__num_load_workers, pin_memory=False)
self.learner = Learner(model=net,loss_func=loss_func, train_dl=train_dl, valid_dl=valid_dl,
optimizer=opt, learner_callback= metric,gpu_id= self.__gpu_id,
predict_smaple_func=self.predict_sample, save_func=self.save_func,
stacked_net= self.stacked_net)
def __get_net(self, ntype, unet_init_features, dropout, dropout_rate):
if ntype == "res_unet++":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=True, squeeze_excite=True,
aspp=True, attention=True, bn_relu_at_first=True, bn_relu_at_end=False)
elif ntype == "res_unet_bn_relu_end":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=True, squeeze_excite=False,
aspp=False, attention=False, bn_relu_at_first=False, bn_relu_at_end=True)
elif ntype == "attention_unet":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=False, squeeze_excite=False,
aspp=False, attention=True, bn_relu_at_first=False, bn_relu_at_end=True)
elif ntype == "aspp_unet":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=False, squeeze_excite=False,
aspp=True, attention=False, bn_relu_at_first=False, bn_relu_at_end=True)
elif ntype == "squeeze_excite_unet":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=False, squeeze_excite=True,
aspp=False, attention=False, bn_relu_at_first=False, bn_relu_at_end=True)
elif ntype == "res_unet_bn_relu_first":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=True, squeeze_excite=False,
aspp=False, attention=False, bn_relu_at_first=True, bn_relu_at_end=False)
elif ntype == "unet":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=False, squeeze_excite=False,
aspp=False, attention=False, bn_relu_at_first=False, bn_relu_at_end=True)
elif ntype == "res34":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=True, squeeze_excite=False,
aspp=False, attention=False, bn_relu_at_first=True, bn_relu_at_end=False,
block_sizes_down = res34_downsample, downsample_method=downsample_stride,
blocksize_bottleneck = 2, block_sizes_up=[2,2,2,2])
elif ntype == "sp_unet":
net = SE_Res_UNet(n_channels=self.__unet_in_channels, n_classes= self.__unet_out_channls,
init_features=unet_init_features, dropout=dropout, rate=dropout_rate)
elif ntype == "stacked_hourglass":
net = hg(num_stacks=4,num_blocks=2,num_classes=self.__unet_out_channls, input_features=self.__unet_in_channels)
else:
raise("Net type ´"+ ntype+"´ is not implemented!")
return net
def __load_data(self, features, file_filters_include, file_filters_exclude, valid_images_store, items_count):
def find_valid_imgs_func(all_image_files):
if not (self.__root_path/valid_images_store).is_file():
valid_images = [img for img in sample(list(all_image_files), int(len(all_image_files)*0.2))]
np.save(self.__root_path/valid_images_store, valid_images, allow_pickle=True)
return list(np.load(self.__root_path/valid_images_store, allow_pickle=True))
def filter_files(all_image_files):
if file_filters_include is not None:
new_filenames = []
for filename in all_image_files:
for ffilter in file_filters_include:
if filename.find((ffilter)) != -1:
new_filenames.append(filename)
break
all_image_files =
|
np.array(new_filenames)
|
numpy.array
|
import numpy as np
from .weightedLSByState import _weighted_LS_by_state
from .collectWLSInfo import _collect_WLS_info
from .waveletTransform import _w_corr
from .fastASD import _fast_ASD_weighted_group
def _fit_analog_filters(stim, analog_symb, gamma, xi, analog_emit_w, options, train_data):
new_stim = []
analog_emit_std = np.array([])
num_analog_params = analog_symb[0].shape[0]
ar_corr1 = np.zeros((options['num_states'], num_analog_params))
ar_corr2 = np.zeros(num_analog_params)
if options['evaluate'] == True:
for analog_num in range(0, num_analog_params):
for trial in range(0, len(train_data)):
new_stim.append({'gamma' : gamma[train_data[trial]], 'xi' : xi[train_data[trial]], 'num_states' : options['num_emissions']})
new_stim[trial]['symb'] = analog_symb[train_data[trial]][analog_num, :]
new_stim[trial]['good_emit'] = ~np.isnan(analog_symb[train_data[trial]][analog_num, :])
[these_stim, these_symb, these_gamma] = _collect_WLS_info(new_stim)
for states in range(0, options['num_states']):
ar_corr1[states, analog_num] = _w_corr(these_stim * analog_emit_w[states, analog_num, :], these_symb, these_gamma[states, :].T)
ar_corr2[analog_num] = np.sum(np.mean(these_gamma, axis = 1) * ar_corr1[:, analog_num], axis = 0)
else:
for analog_num in range(0, num_analog_params):
for trial in range(0, len(train_data)):
new_stim.append({'num_states' : options['num_emissions']})
new_stim[trial]['symb'] = analog_symb[train_data[trial]][analog_num, :]
new_stim[trial]['good_emit'] = ~np.isnan(analog_symb[train_data[trial]][analog_num, :])
[these_stim, these_symb, these_gamma] = _collect_WLS_info(new_stim)
# If more than this, loop until we have gone through all of them. How to deal with e.g. ~1k over this max? Overlapping subsets? Could just do e.g. 4 sub-samples, or however many depending on amount >15k
max_good_pts = 15000
num_analog_iter = np.ceil(these_stim.shape[0] / max_good_pts)
if num_analog_iter > 1:
analog_offset = (these_stim.shape[0] - max_good_pts) / (num_analog_iter - 1)
iter_stim = np.zeros((num_analog_iter, 2))
for nai in range(0, num_analog_iter):
iter_stim[nai, :] = np.floor(analog_offset * (nai - 1)) + [1, max_good_pts]
else:
iter_stim = [1, these_stim.shape[0]]
randomized_stim = np.random.permutation(these_stim.shape[0])
ae_w = np.zeros((num_analog_iter, options['num_states'], analog_emit_w.shape[2]))
ae_std =
|
np.zeros((num_analog_iter, options['num_states'], analog_emit_w.shape[2]))
|
numpy.zeros
|
"""Tests for models."""
import time
import numpy as np
from predicators.src.torch_models import (NeuralGaussianRegressor,
MLPClassifier, MLPRegressor)
from predicators.src import utils
def test_basic_mlp_regressor():
"""Tests for MLPRegressor."""
utils.reset_config({
"mlp_regressor_max_itr": 100,
"mlp_regressor_clip_gradients": True
})
input_size = 3
output_size = 2
num_samples = 5
model = MLPRegressor()
X = np.ones((num_samples, input_size))
Y = np.zeros((num_samples, output_size))
model.fit(X, Y)
x = np.ones(input_size)
predicted_y = model.predict(x)
expected_y = np.zeros(output_size)
assert predicted_y.shape == expected_y.shape
assert np.allclose(predicted_y, expected_y, atol=1e-2)
# Test with nonzero outputs.
Y = 75 * np.ones((num_samples, output_size))
model.fit(X, Y)
x = np.ones(input_size)
predicted_y = model.predict(x)
expected_y = 75 * np.ones(output_size)
assert predicted_y.shape == expected_y.shape
assert np.allclose(predicted_y, expected_y, atol=1e-2)
def test_neural_gaussian_regressor():
"""Tests for NeuralGaussianRegressor."""
utils.reset_config({"neural_gaus_regressor_max_itr": 100})
input_size = 3
output_size = 2
num_samples = 5
model = NeuralGaussianRegressor()
X = np.ones((num_samples, input_size))
Y = np.zeros((num_samples, output_size))
model.fit(X, Y)
x = np.ones(input_size)
mean = model.predict_mean(x)
expected_y = np.zeros(output_size)
assert mean.shape == expected_y.shape
assert np.allclose(mean, expected_y, atol=1e-2)
rng = np.random.default_rng(123)
sample = model.predict_sample(x, rng)
assert sample.shape == expected_y.shape
def test_mlp_classifier():
"""Tests for MLPClassifier."""
utils.reset_config()
input_size = 3
num_class_samples = 5
X = np.concatenate([
np.zeros((num_class_samples, input_size)),
np.ones((num_class_samples, input_size))
])
y = np.concatenate(
[np.zeros((num_class_samples)),
np.ones((num_class_samples))])
model = MLPClassifier(input_size, 100)
model.fit(X, y)
prediction = model.classify(np.zeros(input_size))
assert not prediction
prediction = model.classify(np.ones(input_size))
assert prediction
# Test for early stopping
start_time = time.time()
utils.reset_config({
"mlp_classifier_n_iter_no_change": 1,
"learning_rate": 1e-2
})
model = MLPClassifier(input_size, 10000)
model.fit(X, y)
assert time.time() - start_time < 3, "Didn't early stop"
# Test with no positive examples.
num_class_samples = 1000
X = np.concatenate([
np.zeros((num_class_samples, input_size)),
np.ones((num_class_samples, input_size))
])
y = np.zeros(len(X))
model = MLPClassifier(input_size, 10000)
start_time = time.time()
model.fit(X, y)
assert time.time() - start_time < 1, "Fitting was not instantaneous"
prediction = model.classify(np.zeros(input_size))
assert not prediction
prediction = model.classify(np.ones(input_size))
assert not prediction
# Test with no negative examples.
y = np.ones(len(X))
model = MLPClassifier(input_size, 10000)
start_time = time.time()
model.fit(X, y)
assert time.time() - start_time < 1, "Fitting was not instantaneous"
prediction = model.classify(np.zeros(input_size))
assert prediction
prediction = model.classify(
|
np.ones(input_size)
|
numpy.ones
|
# -*- coding: utf-8 -*-
"""
Tools for diffraction and FEMTO analysis
Created on Tue Apr 12 14:28:22 2016
@author: esposito_v
"""
import numpy as np
import diffractionAngles_modes as diff_mode
def hklFromAngles(E, delta, gamma, omega, alpha, U, B):
"""
Calculate the hkl vector for a given set of angles. For horizontal geometry
"""
wavelength = 12.3984/E;
K = 2*np.pi/wavelength;
delta = np.deg2rad(delta)
gamma = -np.deg2rad(gamma) #sign convention
omega = np.deg2rad(omega)
alpha = np.deg2rad(alpha)
"""rotation matrices"""
Delta = np.array([[1, 0, 0],
[0, np.cos(delta), -np.sin(delta)],
[0, np.sin(delta), np.cos(delta)]])
Gamma = np.array([[np.cos(gamma), -np.sin(gamma), 0],
[np.sin(gamma), np.cos(gamma), 0],
[0, 0, 1]])
Omega = np.array([[np.cos(omega), -np.sin(omega), 0],
[np.sin(omega), np.cos(omega), 0],
[0, 0, 1]])
Alpha = np.array([[1, 0, 0],
[0, np.cos(alpha), -np.sin(alpha)],
[0, np.sin(alpha), np.cos(alpha)]])
""" calculate H """
UBH =
|
np.dot(Gamma, Delta)
|
numpy.dot
|
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import colors
from matplotlib.ticker import AutoLocator
from matplotlib.transforms import Bbox
import py21cmfast as p21c
from py21cmfast import plotting
from tf_keras_vis.saliency import Saliency
import os
# Change the sigmoid activation of the last layer to a linear one. This is required for creating saliency maps
def model_modifier(m):
m.layers[-1].activation = tf.keras.activations.linear
return m
# Read light-cones from tfrecords files
def parse_function(files):
keys_to_features = {"label":tf.io.FixedLenFeature((6),tf.float32),
"image":tf.io.FixedLenFeature((),tf.string),
"tau":tf.io.FixedLenFeature((),tf.float32),
"gxH":tf.io.FixedLenFeature((92),tf.float32),
"z":tf.io.FixedLenFeature((92),tf.float32),}
parsed_features = tf.io.parse_example(files, keys_to_features)
image = tf.io.decode_raw(parsed_features["image"],tf.float16)
image = tf.reshape(image,(140,140,2350))
return image, parsed_features["label"] # Image, m_WDM,Omega_m,L_X,E_0,T_vir,zeta
# Plot the saliency maps over the light-cones. Axis scales depend on the value of Omega_m. This function expects all light-cones to have the same Omega_m
def plot(filename,sim_lightcone,mock_lightcone,parameters,saliency_maps=False):
# Define parameter names, ranges and latex code
parameter_list=[["WDM",0.3,10,"$m_{WDM}$"],["OMm",0.2,0.4,"$\Omega_m$"],["LX",38,42,"$L_X$"],["E0",100,1500,"$E_0$"],["Tvir",4,5.3,"$T_{vir}$"],["Zeta",10,250,"$\zeta$"]]
fig, ax = plt.subplots(
2*len(parameters),
1,
sharex=False,
sharey=False,
figsize=(
2350 * (0.007) + 0.5 ,
140*0.02*len(parameters)
),
)
# Create opt mock and bare simulation saliency maps for each of the requested parameters
for x,para in enumerate(parameters+parameters):
# Plot bare simulations in the first half
if x<len(parameters):
fig, ax[x]=plotting.lightcone_sliceplot(sim_lightcone,slice_axis=0,slice_index=70,fig=fig,ax=ax[x],zticks="frequency")
ax[x].images[-1].colorbar.remove()
# Plot opt mocks in the second half
else:
fig, ax[x]=plotting.lightcone_sliceplot(mock_lightcone,slice_axis=0,slice_index=70,fig=fig,ax=ax[x])
ax[x].images[-1].colorbar.remove()
# Plot saliency maps
if saliency_maps is not False:
extent = (
0,
2350*200/140,
0,
200,
)
ax[x].imshow(saliency_maps[x],origin="lower",cmap=cm.hot,alpha=0.7,extent=extent)
# Adjust the design
if x>0 and x<2*len(parameters)-1:
ax[x].set_xticks([])
ax[x].set_xlabel("")
ax[x].tick_params(labelsize=12)
ax[x].text(10,10,"$\delta "+parameter_list[para][3][1:],color="w",fontsize=14)
ax[x].set_ylabel("")
fig.text(0.01,0.62+0.02*len(parameters),"y-axis [Mpc]",rotation="vertical",fontsize=12)
fig.text(0.01,0.23-0.005*len(parameters),"y-axis [Mpc]",rotation="vertical",fontsize=12)
ax[0].xaxis.tick_top()
ax[0].set_xlabel('Frequency [MHz]',fontsize=12)
ax[0].xaxis.set_label_position('top')
ax[x].set_xlabel("Redshift",fontsize=12)
plt.tight_layout()
for y in range(len(parameters)):
pos1=ax[x-y].get_position().get_points()+[[0.02,0.08/len(parameters)-0.02],[0.02,0.08/len(parameters)-0.02]]
pos2=ax[y].get_position().get_points()+[[0.02,0.08/len(parameters)-0.03],[0.02,0.08/len(parameters)-0.03]]
ax[x-y].set_position(Bbox(pos1-[[0,0.018],[0,0.018]]))
ax[y].set_position(Bbox(pos2+[[0,0.018],[0,0.018]]))
ax[y].text(10,150,"Sim",color="w",fontsize=14)
ax[x-y].text(10,150,"Opt Mock",color="w",fontsize=14)
# Use a colorbar with the "EoR" cmap from 21cmFAST
cbar = fig.colorbar(cm.ScalarMappable(norm=colors.Normalize(vmin=-150,vmax=30), cmap="EoR"), ax=ax,aspect=10*len(parameters))
cbar_label = r"$\delta T_B$ [mK]"
cbar.ax.set_ylabel(cbar_label,fontsize=12)
cbar.ax.tick_params(labelsize=12)
os.makedirs(os.path.dirname(filename),exist_ok=True)
plt.savefig(filename)
plt.close()
def create_saliency_maps(filename,sim_lightcones,sim_model,mock_lightcones,mock_model,parameters,OMm):
sim_saliency_maps=False
mock_saliency_maps=False
sim_saliency = Saliency(sim_model,
model_modifier=model_modifier,
clone=True)
mock_saliency = Saliency(mock_model,
model_modifier=model_modifier,
clone=True)
# Generate saliency maps for the requested parameters
for para in parameters:
def loss(output):
return output[0][para]
combined_sim_saliency=np.zeros((140,2350))
combined_mock_saliency=
|
np.zeros((140,2350))
|
numpy.zeros
|
import numpy as np
from scipy.interpolate import interp1d
from scipy import ndimage
import scipy.constants as sc
import astropy.constants as const
import astropy.units as u
default_cmap = "inferno"
sigma_to_FWHM = 2.0 * np.sqrt(2.0 * np.log(2))
FWHM_to_sigma = 1.0 / sigma_to_FWHM
arcsec = np.pi / 648000
def bin_image(im, n, func=np.sum):
# bin an image in blocks of n x n pixels
# return a image of size im.shape/n
nx = im.shape[0]
nx_new = nx // n
x0 = (nx - nx_new * n) // 2
ny = im.shape[1]
ny_new = ny // n
y0 = (ny - ny_new * n) // 2
return np.reshape(
np.array(
[
func(im[x0 + k1 * n : (k1 + 1) * n, y0 + k2 * n : (k2 + 1) * n])
for k1 in range(nx_new)
for k2 in range(ny_new)
]
),
(nx_new, ny_new),
)
def Wm2_to_Jy(nuFnu, nu):
'''
Convert from W.m-2 to Jy
nu [Hz]
'''
return 1e26 * nuFnu / nu
def Jy_to_Wm2(Fnu, nu):
'''
Convert from Jy to W.m-2
nu [Hz]
'''
return 1e-26 * Fnu * nu
def Jybeam_to_Tb(Fnu, nu, bmaj, bmin):
'''
Convert Flux density in Jy/beam to brightness temperature [K]
Flux [Jy]
nu [Hz]
bmaj, bmin in [arcsec]
T [K]
'''
beam_area = bmin * bmaj * arcsec ** 2 * np.pi / (4.0 * np.log(2.0))
exp_m1 = 1e26 * beam_area * 2.0 * sc.h / sc.c ** 2 * nu ** 3 / Fnu
hnu_kT = np.log1p(np.maximum(exp_m1, 1e-10))
Tb = sc.h * nu / (hnu_kT * sc.k)
return Tb
def Jy_to_Tb(Fnu, nu, pixelscale):
'''
Convert Flux density in Jy/pixel to brightness temperature [K]
Flux [Jy]
nu [Hz]
bmaj, bmin in [arcsec]
T [K]
'''
pixel_area = (pixelscale * arcsec) ** 2
exp_m1 = 1e16 * pixel_area * 2.0 * sc.h / sc.c ** 2 * nu ** 3 / Fnu
hnu_kT = np.log1p(exp_m1 + 1e-10)
Tb = sc.h * nu / (hnu_kT * sc.k)
return Tb
def Wm2_to_Tb(nuFnu, nu, pixelscale):
"""Convert flux converted from Wm2/pixel to K using full Planck law.
Convert Flux density in Jy/beam to brightness temperature [K]
Flux [W.m-2/pixel]
nu [Hz]
bmaj, bmin, pixelscale in [arcsec]
"""
pixel_area = (pixelscale * arcsec) ** 2
exp_m1 = pixel_area * 2.0 * sc.h * nu ** 4 / (sc.c ** 2 * nuFnu)
hnu_kT = np.log1p(exp_m1)
Tb = sc.h * nu / (sc.k * hnu_kT)
return Tb
def telescope_beam(wl,D):
""" wl and D in m, returns FWHM in arcsec"""
return 0.989 * wl/D / 4.84814e-6
def make_cut(z, x0,y0,x1,y1,num=None,plot=False):
"""
Make a cut in image 'z' along a line between (x0,y0) and (x1,y1)
x0, y0,x1,y1 are pixel coordinates
"""
if plot:
vmax = np.max(z)
vmin = vmax * 1e-6
norm = colors.LogNorm(vmin=vmin, vmax=vmax, clip=True)
plt.imshow((test.last_image[:,:]),origin="lower", norm=norm)
plt.plot([x0,x1],[y0,y1])
if num is not None:
# Extract the values along the line, using cubic interpolation
x, y = np.linspace(x0, x1, num), np.linspace(y0, y1, num)
zi = ndimage.map_coordinates(z, np.vstack((y,x)))
else:
# Extract the values along the line at the pixel spacing
length = int(np.hypot(x1-x0, y1-y0))
x, y = np.linspace(x0, x1, length), np.linspace(y0, y1, length)
zi = z[y.astype(np.int), x.astype(np.int)]
return zi
class DustExtinction:
import os
__dirname__ = os.path.dirname(__file__)
wl = []
kext = []
_extinction_dir = __dirname__ + "/extinction_laws"
_filename_start = "kext_albedo_WD_MW_"
_filename_end = "_D03.all"
V = 5.47e-1 # V band wavelength in micron
def __init__(self, Rv=3.1, **kwargs):
self.filename = (
self._extinction_dir
+ "/"
+ self._filename_start
+ str(Rv)
+ self._filename_end
)
self._read(**kwargs)
def _read(self):
with open(self.filename, 'r') as file:
f = []
for line in file:
if (not line.startswith("#")) and (
len(line) > 1
): # Skipping comments and empty lines
line = line.split()
self.wl.append(float(line[0]))
kpa = float(line[4])
albedo = float(line[1])
self.kext.append(kpa / (1.0 - albedo))
# Normalize extinction in V band
kext_interp = interp1d(self.wl, self.kext)
kextV = kext_interp(self.V)
self.kext /= kextV
def redenning(self, wl, Av):
"""
Computes extinction factor to apply for a given Av
Flux_red = Flux * redenning
wl in micron
"""
kext_interp = interp1d(self.wl, self.kext)
kext = kext_interp(wl)
tau_V = 0.4 * np.log(10.0) * Av
return np.exp(-tau_V * kext)
def Hill_radius():
pass
#d * (Mplanet/3*Mstar)**(1./3)
def splash2mcfost(anglex, angley, anglez):
#Convert the splash angles to mcfost angles
# Base unit vector
x0 = [1,0,0]
y0 = [0,1,0]
z0 = [0,0,1]
# Splash rotated vectors
x = _rotate_splash_axes(x0,-anglex,-angley,-anglez)
y = _rotate_splash_axes(y0,-anglex,-angley,-anglez)
z = _rotate_splash_axes(z0,-anglex,-angley,-anglez)
# MCFOST angles
mcfost_i = np.arccos(np.dot(z,z0)) * 180./np.pi
if abs(mcfost_i) > 1e-30:
print("test1")
# angle du vecteur z dans le plan (-y0,x0)
mcfost_az = (np.arctan2(np.dot(z,x0), -np.dot(z,y0)) ) * 180./np.pi
# angle du vecteur z0 dans le plan x_image, y_image (orientation astro + 90deg)
mcfost_PA = -( np.arctan2(np.dot(x,z0), np.dot(y,z0)) ) * 180./np.pi
else:
print("test2")
mcfost_az = 0.
# angle du vecteur y dans le plan x0, y0
mcfost_PA = (np.arctan2(np.dot(y,x0),np.dot(y,y0)) ) * 180./np.pi
print("anglex =",anglex, "angley=", angley, "anglez=", anglez,"\n")
print("Direction to oberver=",z)
print("x-image=",x)
print("y_image = ", y,"\n")
print("MCFOST parameters :")
print("inclination =", mcfost_i)
print("azimuth =", mcfost_az)
print("PA =", mcfost_PA)
return [mcfost_i, mcfost_az, mcfost_PA]
def _rotate_splash(xyz, anglex, angley, anglez):
# Defines rotations as in splash
# This function is to rotate the data
x = xyz[0]
y = xyz[1]
z = xyz[2]
# rotate about z
if np.abs(anglez) > 1e-30:
r = np.sqrt(x**2+y**2)
phi = np.arctan2(y,x)
phi -= anglez/180*np.pi
x = r*np.cos(phi)
y = r*np.sin(phi)
# rotate about y
if np.abs(angley) > 1e-30:
r = np.sqrt(z**2+x**2)
phi = np.arctan2(z,x)
phi -= angley/180*np.pi
x = r*np.cos(phi)
z = r*np.sin(phi)
# rotate about x
if np.abs(anglex) > 1e-30:
r = np.sqrt(y**2+z**2)
phi = np.arctan2(z,y)
phi -= anglex/180*np.pi
y = r*np.cos(phi)
z = r*np.sin(phi)
return np.array([x,y,z])
def _rotate_splash_axes(xyz, anglex, angley, anglez):
# Defines rotations as in splash, but in reserve order
# as we rotate the axes instead of the data
x = xyz[0]
y = xyz[1]
z = xyz[2]
# rotate about x
if np.abs(anglex) > 1e-30:
r = np.sqrt(y**2+z**2)
phi = np.arctan2(z,y)
phi -= anglex/180*np.pi
y = r*np.cos(phi)
z = r*np.sin(phi)
# rotate about y
if np.abs(angley) > 1e-30:
r = np.sqrt(z**2+x**2)
phi = np.arctan2(z,x)
phi -= angley/180*np.pi
x = r*np.cos(phi)
z = r*np.sin(phi)
# rotate about z
if np.abs(anglez) > 1e-30:
r = np.sqrt(x**2+y**2)
phi =
|
np.arctan2(y,x)
|
numpy.arctan2
|
import os
import pickle
import numpy as np
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
import codes.my_helper as helper
import codes.my_models as my_models
def main():
# DATA
# Train images: read from images folder, resize, normalize to 0..1 range
data_path = 'datas/'
images_camvid = helper.read_images(data_path + 'images_camvid/image/')
size = (320, 256)
images_camvid = helper.resize_images(images_camvid, size)
images_camvid = np.array(images_camvid) / 255
images_roma = helper.read_images(data_path + 'images_roma/image/')
images_roma = helper.resize_images(images_roma, size)
images_roma /
|
np.array(images_roma)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 06 16:20:36 2017
@author: marcus
"""
from psychopy import visual
from collections import deque
import numpy as np
from psychopy.tools.monitorunittools import cm2deg, deg2pix
import copy
from matplotlib.patches import Ellipse
import sys
import warnings
if sys.version_info[0] == 2: # if Python 2:
range = xrange
pass
#%%
def tobii2norm(pos):
''' Converts from Tobiis coordinate system [0, 1] to PsychoPy's 'norm' (-1, 1).
Note that the Tobii coordinate system start in the upper left corner
and the PsychoPy coordinate system in the center.
y = -1 in PsychoPy coordinates means bottom of screen
Args: pos: N x 2 array with positions
'''
pos_temp = copy.deepcopy(pos) # To avoid that the called parameter is changed
# Convert between coordinate system
pos_temp[:, 0] = 2.0 * (pos_temp[:, 0] - 0.5)
pos_temp[:, 1] = 2.0 * (pos_temp[:, 1] - 0.5) * -1
return pos_temp
def norm2tobii(pos):
''' Converts from PsychoPy's 'norm' (-1, 1) to Tobiis coordinate system [0, 1].
Note that the Tobii coordinate system start in the upper left corner
and the PsychoPy coordinate system in the center.
y = -1 in PsychoPy coordinates means bottom of screen
Args: pos: N x 2 array with positions
'''
pos_temp = copy.deepcopy(pos) # To avoid that the called parameter is changed
# Convert between coordinate system
pos_temp[:, 0] = pos_temp[:, 0] / 2.0 + 0.5
pos_temp[:, 1] = pos_temp[:, 1]/ -2.0 + 0.5
return pos_temp
def tobii2deg(pos, mon):
''' Converts Tobiis coordinate system [0, 1 to degrees.
Note that the Tobii coordinate system start in the upper left corner
and the PsychoPy coordinate system in the center
Assumes pixels are square
Args: pos: N x 2 array with calibratio position in [0, 1]
screen_height: height of screen in cm
'''
pos_temp = copy.deepcopy(pos) # To avoid that the called parameter is changed
# Center
pos_temp[:, 0] = pos_temp[:, 0] - 0.5
pos_temp[:, 1] = (pos_temp[:, 1] - 0.5) * -1
# Cenvert to psychopy coordinates (center)
pos_temp[:, 0] = pos_temp[:, 0] * mon.getWidth()
pos_temp[:, 1] = pos_temp[:, 1] * mon.getWidth() * (float(mon.getSizePix()[1]) / \
float(mon.getSizePix()[0]))
# Convert to deg.
pos_deg = cm2deg(pos_temp, mon, correctFlat=False)
return pos_deg
def deg2tobii(pos, mon):
''' Converts from degrees to Tobiis coordinate system [0, 1].
Note that the Tobii coordinate system start in the upper left corner
and the PsychoPy coordinate system in the center
'''
# First convert from deg to pixels
pos[:, 0] = deg2pix(pos[:, 0], mon, correctFlat=False)
pos[:, 1] = deg2pix(pos[:, 1], mon, correctFlat=False)
# Then normalize data -1,1
pos[:, 0] = pos[:, 0] / float(mon.getSizePix()[0])/2
pos[:, 1] = pos[:, 1] / float(mon.getSizePix()[1])/2
#.. finally shift to tobii coordinate system
return norm2tobii(pos)
def tobii2pix(pos, mon):
''' Converts from Tobiis coordinate system [0, 1] to pixles.
Note that the Tobii coordinate system start in the upper left corner
and screen coordinate in the upper left corner
Args: pos: N x 2 array with calibratio position in [0, 1]
screen_height: height of screen in cm
'''
# Center
pos[:, 0] = pos[:, 0] * mon.getSizePix()[0]
pos[:, 1] = pos[:, 1] * mon.getSizePix()[1]
# Convert to deg.
return pos
def pix2tobii(pos, mon):
''' Converts from PsychoPy pixels to Tobiis coordinate system [0, 1].
Note that the Tobii coordinate system start in the upper left corner
and the PsychoPy coordinate system in the center
'''
# Normalize data -1,1
pos[:, 0] = pos[:, 0] / (float(mon.getSizePix()[0]) / 2.0)
pos[:, 1] = pos[:, 1] / (float(mon.getSizePix()[1]) / 2.0)
#.. finally shift to tobii coordinate system
return norm2tobii(pos)
#%%
class MyDot2:
'''
Generates the best fixation target according to Thaler et al. (2013)
'''
def __init__(self, win, outer_diameter=0.5, inner_diameter=0.1,
outer_color = 'black', inner_color = 'white',units = 'deg'):
'''
Class to generate a stimulus dot with
units are derived from the window
'''
# Set propertis of dot
outer_dot = visual.Circle(win,fillColor = outer_color, radius = outer_diameter/2,
units = units)
inner_dot = visual.Circle(win,fillColor = outer_color, radius = inner_diameter/2,
units = units)
line_vertical = visual.Rect(win, width=inner_diameter, height=outer_diameter,
fillColor=inner_color, units = units)
line_horizontal = visual.Rect(win, width=outer_diameter, height=inner_diameter,
fillColor=inner_color, units = units)
self.outer_dot = outer_dot
self.inner_dot = inner_dot
self.line_vertical = line_vertical
self.line_horizontal = line_horizontal
def set_size(self, size):
''' Sets the size of the stimulus as scaled by 'size'
That is, if size == 1, the size is not altered.
'''
self.outer_dot.radius = size / 2
self.line_vertical.height = size
self.line_horizontal.width = size
def set_pos(self, pos):
'''
sets position of dot
pos = [x,y]
'''
self.outer_dot.pos = pos
self.inner_dot.pos = pos
self.line_vertical.pos = pos
self.line_horizontal.pos = pos
def get_pos(self):
'''
get position of dot
'''
pos = self.outer_dot.pos
return pos
def get_size(self):
'''
get size of dot
'''
return self.outer_dot.size
def draw(self):
'''
draws the dot
'''
self.outer_dot.draw()
self.line_vertical.draw()
self.line_horizontal.draw()
self.inner_dot.draw()
def invert_color(self):
'''
inverts the colors of the dot
'''
temp = self.outer_dot.fillColor
self.outer_dot.fillColor = self.inner_dot.fillColor
self.inner_dot.fillColor = temp
def set_color(self, color):
self.outer_dot.lineColor = 'blue'
self.outer_dot.fillColor = 'blue'
self.inner_dot.fillColor = 'blue'
self.inner_dot.lineColor = 'blue'
self.line_vertical.lineColor = 'red'
self.line_horizontal.fillColor = 'red'
self.line_vertical.fillColor = 'red'
self.line_horizontal.lineColor = 'red'
#%%
class RingBuffer(object):
""" A simple ring buffer based on the deque class"""
def __init__(self, maxlen=200):
# Create que with maxlen
self.maxlen = maxlen
self._b = deque(maxlen=maxlen)
def clear(self):
""" Clears buffer """
return(self._b.clear())
def get_all(self):
""" Returns all samples from buffer and empties the buffer"""
lenb = len(self._b)
return([self._b.popleft() for i in range(lenb)])
def peek(self):
""" Returns all samples from buffer without emptying the buffer
First remove an element, then add it again
"""
b_temp = copy.copy(self._b)
c = []
if len(b_temp) > 0:
for i in range(len(b_temp)):
c.append(b_temp.pop())
return(c)
def append(self, L):
self._b.append(L)
""""Append buffer with the most recent sample (list L)"""
#%%
def ellipse(xy = (0, 0), width=1, height=1, angle=0, n_points=50):
''' Generates edge points for an ellipse
Args:
xy - center of ellipse
width - width of ellipse
height - height of ellipse
angle - angular rotation of ellipse (in radians)
n_points - number of points to generate
Return:
points - n x 2 array with ellipse points
'''
xpos,ypos=xy[0], xy[1]
radm,radn=width,height
an=angle
co,si=np.cos(an),np.sin(an)
the=np.linspace(0,2*np.pi,n_points)
X=radm*np.cos(the)*co-si*radn*np.sin(the)+xpos
Y=radm*np.cos(the)*si+co*radn*np.sin(the)+ypos
points = np.vstack((X, Y)).T
return points
#%%
class EThead(object):
""" A class to handle head animation in Titta
The animated head should reflect a mirror image of the participants
head.
"""
def __init__(self, win):
'''
Args:
win - psychopy window handle
'''
self.win = win
# Define colors
blue = tuple(np.array([37, 97, 163]) / 255.0 * 2 - 1)
green = tuple(np.array([0, 120, 0]) / 255.0 * 2 - 1)
red = tuple(np.array([150, 0, 0]) / 255.0 * 2 - 1)
yellow = tuple(np.array([255, 255, 0]) / 255.0 * 2 - 1)
yellow_linecolor = tuple(np.array([255, 255, 0]) / 255.0 * 2 - 1)
# Head parameters
HEAD_POS_CIRCLE_FIXED_COLOR = blue
HEAD_POS_CIRCLE_FIXED_RADIUS = 0.20
self.HEAD_POS_ELLIPSE_MOVING_HEIGHT = 0.20
self.HEAD_POS_ELLIPSE_MOVING_MIN_HEIGHT = 0.05
# Eye parameters
self.EYE_SIZE = 0.03
# Setup control circles for head position
self.static_circ = visual.Circle(win, radius = HEAD_POS_CIRCLE_FIXED_RADIUS,
lineColor = HEAD_POS_CIRCLE_FIXED_COLOR,
lineWidth=4, units='height')
self.moving_ellipse = visual.ShapeStim(win, lineColor = yellow_linecolor,
lineWidth=4, units='height',
fillColor=yellow, opacity=0.1)
# Ellipses for eyes
self.eye_l = visual.ShapeStim(win, lineColor = 'white', fillColor='white',
lineWidth=2, units='height')
self.eye_r = visual.ShapeStim(win, lineColor = 'white', fillColor='white',
lineWidth=2, units='height')
# Ellipses for pupils
self.pupil_l = visual.ShapeStim(win, fillColor = 'black',
lineColor = 'black',
units='height')
self.pupil_r = visual.ShapeStim(win, fillColor = 'black',
lineColor = 'black',
units='height')
self.eye_l_closed = visual.Rect(win, fillColor=(1,1,1),
lineColor=(1,1,1), units='height')
self.eye_r_closed = visual.Rect(win, fillColor=(1,1,1),
lineColor=(1,1,1), units='height')
self.head_width = 0.25
self.head_height = 0.25
def update(self, sample, sample_user_pos, latest_valid_binocular_avg,
previous_binocular_sample_valid,
latest_valid_roll,
latest_valid_yaw,
offset, eye='both'):
'''
Args:
sample - a dict containing information about the sample
relevant info in sample is
'left_gaze_origin_in_user_coordinate_system'
'right_gaze_origin_in_user_coordinate_system'
'left_gaze_origin_in_trackbox_coordinate_system'
'right_gaze_origin_in_trackbox_coordinate_system'
'left_pupil_diameter'
'right_pupil_diameter'
sample_user_pos - a dict containing information about the user
positioning.
relevant info in sample is
'left_user_position'
'left_user_position_validity'
'right_user_position'
'right_user_position_validity'
eye - track, both eyes, left eye, or right eye
the non-tracked eye will be indicated by a cross
latest_binocular_avg
'''
self.eye = eye # Which eye(s) should be tracked
self.latest_valid_roll = latest_valid_roll * 180 / np.pi * -1
# Indicate that eye is not used by red color
if 'right' in self.eye:
self.eye_l_closed.fillColor = (1, -1, -1) # Red
if 'left' in self.eye:
self.eye_r_closed.fillColor = (1, -1, -1)
#%% 1. Compute the average position of the head ellipse
xyz_pos_eye_l = sample_user_pos['left_user_position']
xyz_pos_eye_r = sample_user_pos['right_user_position']
# Valid data from the eyes?
self.right_eye_valid = np.sum(np.isnan(xyz_pos_eye_r)) == 0 # boolean
self.left_eye_valid = np.sum(np.isnan(xyz_pos_eye_l)) == 0
# Compute the average position of the eyes
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
avg_pos = np.nanmean([xyz_pos_eye_l, xyz_pos_eye_r], axis=0)
# print('avg pos {:f} {:f} {:f}'.format(avg_pos[0], avg_pos[1], avg_pos[2]))
# If one eye is closed, the center of the circle is moved,
# Try to prevent this by compensating by an offset
if eye == 'both':
if self.left_eye_valid and self.right_eye_valid: # if both eyes are open
latest_valid_binocular_avg = avg_pos[:]
previous_binocular_sample_valid = True
offset = np.array([0, 0, 0])
elif self.left_eye_valid or self.right_eye_valid:
if previous_binocular_sample_valid:
offset = latest_valid_binocular_avg - avg_pos
previous_binocular_sample_valid = False
#(0.5, 0.5, 0.5) means the eye is in the center of the box
self.moving_ellipse.pos = ((avg_pos[0] - 0.5) * -1 - offset[0] ,
(avg_pos[1] - 0.5) * -1 - offset[1])
self.moving_ellipse.height = (avg_pos[2] - 0.5)*-1 * 0.5 + self.HEAD_POS_ELLIPSE_MOVING_HEIGHT
# Control min size of head ellipse
if self.moving_ellipse.height < self.HEAD_POS_ELLIPSE_MOVING_MIN_HEIGHT:
self.moving_ellipse.height = self.HEAD_POS_ELLIPSE_MOVING_MIN_HEIGHT
# Compute roll and yaw data from 3D information about the eyes
# in the headbox (if both eyes are valid)
if self.left_eye_valid and self.right_eye_valid:
roll = np.math.tan((xyz_pos_eye_l[1] - xyz_pos_eye_r[1]) / \
(xyz_pos_eye_l[0] - xyz_pos_eye_r[0]))
yaw = np.math.tan((xyz_pos_eye_l[2] - xyz_pos_eye_r[2]) / \
(xyz_pos_eye_l[0] - xyz_pos_eye_r[0])) *-1
latest_valid_roll = roll
latest_valid_yaw = yaw
else: # Otherwise use latest valid measurement
roll = latest_valid_roll
yaw = latest_valid_roll
# print('test', latest_valid_binocular_avg, roll, yaw)
# Compute the ellipse height and width
# The width should be zero if yaw = pi/2 rad (90 deg)
# The width should be equal to the height if yaw = 0
self.head_width = self.moving_ellipse.height - \
np.abs(yaw) / np.pi * (self.moving_ellipse.height)
# print(self.moving_ellipse.pos, self.moving_ellipse.height,
# self.head_width, roll, yaw)
# Get head ellipse points to draw
ellipse_points_head = ellipse(xy = (0, 0),
width= self.head_width,
height=self.moving_ellipse.height,
angle=roll)
# update position and shape of head ellipse
self.moving_ellipse.vertices = ellipse_points_head
#%% Compute the position and size of the eyes (roll)
eye_head_distance = self.head_width / 2
self.eye_l.pos = (self.moving_ellipse.pos[0] -
|
np.cos(roll)
|
numpy.cos
|
"""
main training script
"""
import os
import math
from decimal import Decimal
import utility
import numpy as np
from math import floor
import torch
import torch.nn.utils as utils
from tqdm import tqdm
import torch.nn.functional as F
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.losstype = args.loss
self.task = args.task
self.noise_eval = args.noise_eval
self.optimizer = utility.make_optimizer(args, self.model)
if self.args.load != '':
self.optimizer.load(ckp.dir, epoch=len(ckp.log))
self.noiseL_B = [0, 55] # ingnored when opt.mode=='S'
self.error_last = 1e8
self.ckp.write_log("-------options----------")
self.ckp.write_log(args)
def train(self):
self.loss.step()
epoch = self.optimizer.get_last_epoch() + 1
lr = self.optimizer.get_lr()
self.ckp.write_log(
'[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))
)
self.loss.start_log()
self.model.train()
timer_data, timer_model, timer_backward = utility.timer(), utility.timer(), utility.timer()
# TEMP
self.loader_train.dataset.set_scale(0)
for batch, (lr, hr, mask, _,) in enumerate(self.loader_train):
if self.args.debug:
print("into train loop")
print(lr.shape,hr.shape,mask.shape)
if self.task =='denoise' and (not self.args.real_isp):
#if self.args.compute_grads or self.args.predict_groups:
noise = torch.randn(lr.size())*(self.noise_eval)
lr = torch.clamp((lr + noise),0,255)
lr, hr, mask = self.prepare(lr, hr, mask)
# lr = lr.to(self.model.device)
# hr = hr.to(self.model.device)
# mask = mask.to(self.model.device)
mask.requires_grad_(False)
if self.args.debug:
print('lr shape:', lr.shape)
print("hr shape: ", hr.shape)
print("mask shape", mask.shape)
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
sr = self.model(lr, mask, 0)
if self.args.debug:
print("training forwarded: ")
print(lr.shape,hr.shape,mask.shape)
print(mask.dtype)
print(mask[:,10:12,10:12])
if self.args.compute_grads:
loss = self.loss(sr, mask)
elif self.args.predict_groups:
mask = mask.long()
loss = self.loss_expand(sr[0],mask[:,0,:,:])+self.loss_expand(sr[1],mask[:,1,:,:])+self.loss_expand(sr[2],mask[:,2,:,:])
else:
if 'WeightedL1' in self.losstype:
loss = self.loss(sr, hr, mask)
else:
loss = self.loss(sr, hr)
timer_backward.tic()
loss.backward()
timer_backward.hold()
if self.args.debug:
print("loss backwarded: ")
if self.args.gclip > 0:
utils.clip_grad_value_(
self.model.parameters(),
self.args.gclip
)
self.optimizer.step()
timer_model.hold()
if (batch + 1) % self.args.print_every == 0:
if self.args.timer:
self.ckp.write_log('[{}/{}]\t{}\t{:.3f}+{:.3f}+{:.3f}s\t{:.3f}+{:.3f}+{:.3f}s'.format(
(batch + 1) * self.args.batch_size,
len(self.loader_train.dataset),
self.loss.display_loss(batch),
timer_model.release(),
timer_data.release(),
timer_backward.release(),
self.args.timer_total_forward.release(),
self.args.timer_embedding_forward.release(),
self.args.timer_kconv_forward.release(),
))
else:
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}+{:.1f}s'.format(
(batch + 1) * self.args.batch_size,
len(self.loader_train.dataset),
self.loss.display_loss(batch),
timer_model.release(),
timer_data.release(),
timer_backward.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[-1, -1]
self.optimizer.schedule()
def loss_expand(self,pred,ground):
if self.args.debug:
print("loss expand function:")
print(pred.shape, ground.shape)
pred = pred.permute(0, 2, 3, 1)
ncs = pred.shape[3]
pred = pred.view(-1, ncs)
mask_flat = ground.view(-1)
lossp = self.loss(pred, mask_flat)
return lossp
"""
def merge_grads(self,grads_tensor):
stre = np.linspace(0, 0.2, self.args.Qstr + 1)[1:-1]
cohe = np.linspace(0, 1, self.args.Qcohe + 1)[1:-1]
grads = grads_tensor.cpu().numpy()
H, W, C = grads.shape
if self.args.debug:
print('merging grads:')
print(stre, cohe)
print(grads.shape, grads.dtype)
grad_map = np.zeros((H, W), dtype=np.int32)
for i in range(H):
for j in range(W):
if self.args.debug:
print('grads:')
print(grads[i,j,0],grads[i,j,1],grads[i,j,2])
tempu = floor(grads[i, j, 0] * self.args.Qangle)
if tempu < 0:
tempu = 0
if tempu > self.args.Qangle - 1:
tempu = self.args.Qangle - 1
if self.args.debug:
print('tempu:')
print(tempu)
lamda = np.searchsorted(stre, grads[i, j, 1])
mu = np.searchsorted(cohe, grads[i, j, 2])
if self.args.debug:
print('lamda&mu:')
print(lamda, mu)
grad_map[i,j] = (tempu * self.args.Qstr * self.args.Qcohe) + lamda * self.args.Qcohe + mu
if self.args.debug:
print('grad_map:')
print(grad_map[10:12,10:12])
return grad_map
"""
def merge_grads(self,grads_tensor):
"""
:param grads_tensor: of shape (H,W,3)
:return:
"""
if self.args.Qstr ==2:
stre = np.array([0.05],dtype=np.float32)
else:
stre = np.linspace(0, 0.2, self.args.Qstr + 1)[1:-1]
if self.args.Qcohe ==2:
cohe = np.array([0.3], dtype=np.float32)
else:
cohe = np.linspace(0, 1, self.args.Qcohe + 1)[1:-1]
grads = grads_tensor.cpu().numpy()
H, W, C = grads.shape
if self.args.debug:
print('merging grads:')
print(stre, cohe)
print(grads.shape, grads.dtype)
tempus = np.clip(np.floor(grads[:,:,0]*self.args.Qangle),0,self.args.Qangle-1)
lamdas = np.clip(np.searchsorted(stre,grads[:,:,1]),0,self.args.Qstr)
mus = np.clip(np.searchsorted(cohe,grads[:,:,2]),0,self.args.Qcohe)
grad_map = (tempus * self.args.Qstr * self.args.Qcohe) + lamdas * self.args.Qcohe + mus
if self.args.debug:
print('grad_map:')
print(grad_map.shape)
print(grad_map[10:12,10:12])
return grad_map
def stride_cut(self,lr,hr,mask=None, split_size=400, overlap_size=100):
if self.args.debug:
print('stride cutting the following tensors: ')
print('lr: ',lr.shape)
print('hr: ',hr.shape)
if mask is not None:
print('mask: ',mask.shape)
print('split_size: ',split_size,' overlap_size: ',overlap_size)
stride = split_size - overlap_size
## 构建图像块的索引
orig_shape = (lr.shape[2], lr.shape[3])
imhigh = lr.shape[2]
imwidth = lr.shape[3]
range_y = np.arange(0, imhigh - split_size, stride)
range_x = np.arange(0, imwidth - split_size, stride)
if self.args.debug:
print(range_x)
print(range_y)
if range_y[-1] != imhigh - split_size:
range_y = np.append(range_y, imhigh - split_size)
if range_x[-1] != imwidth - split_size:
range_x = np.append(range_x, imwidth - split_size)
sz = len(range_y) * len(range_x) ## 图像块的数量
if self.args.debug:
print('sz: ',sz)
res_lr= torch.zeros((sz,lr.shape[1], split_size,split_size),dtype=lr.dtype)
res_hr = torch.zeros((sz, hr.shape[1], split_size, split_size), dtype = hr.dtype)
res_mask = torch.zeros((sz, split_size, split_size), dtype = mask.dtype)
if self.args.debug:
print(range_x)
print(range_y)
print('sz: ',sz, res_lr.shape,res_lr.dtype,res_hr.shape,res_mask.shape,res_mask.dtype)
index = 0
for y in range_y:
for x in range_x:
res_lr[index,:,:,:] = lr[0,:,y:y + split_size, x:x + split_size]
res_hr[index, :, :, :] = hr[0, :, y:y + split_size, x:x + split_size]
res_mask[index, :, :] = mask[0, y:y + split_size, x:x + split_size]
index = index + 1
if self.args.debug:
print('finished cutting: ', res_lr.shape,res_hr.shape,res_mask.shape)
return res_lr,res_hr,res_mask
def recon_from_cols(self,sr_cols,imsize, stride=300,split_size=400):
sr_recon = torch.zeros((1,sr_cols.shape[1],imsize[0],imsize[1]),dtype = sr_cols.dtype)
w = torch.zeros((1,sr_cols.shape[1],imsize[0],imsize[1]),dtype = sr_cols.dtype)
if self.args.debug:
print('reconstructing patches: ', sr_recon.shape, w.shape)
range_y = np.arange(0, imsize[0] - split_size, stride)
range_x = np.arange(0, imsize[1] - split_size, stride)
if range_y[-1] != imsize[0] - split_size:
range_y =
|
np.append(range_y, imsize[0] - split_size)
|
numpy.append
|
import random
import numpy as np
def approx_entropy(x, m=2, r=20.0, use_std_r=False):
N = x.shape[0]
def _d(x_i, x_j):
return np.max(np.abs(x_i - x_j), 1)
def _phi(N, m, r, x):
s = np.empty((N - m + 1, m))
for i in range(N - m + 1):
s[i,:] = x[i:i+m]
C = np.zeros((N - m + 1,))
for i in range(N - m +1):
C += np.less_equal(_d(s, np.roll(s, i, axis=0)), r)
C /= (N - m + 1.0)
return (N - m + 1.0)**(-1) * np.sum(np.log(C))
if use_std_r:
r = r * np.std(x)
return abs(_phi(N, m + 1, r, x) - _phi(N, m, r, x))
def id_scale(y):
return y
def get_approx_entropy(m, r, scale=id_scale):
def custom_approx_entropy(x):
x = scale(x)
N = x.shape[0]
def _d(x_i, x_j):
return np.max(np.abs(x_i - x_j), 1)
def _phi(N, m, r, x):
s = np.empty((N - m + 1, m))
for i in range(N - m + 1):
# try:
s[i,:] = x[i:i+m]
# except Exception as e:
# print(s[i, :].shape, x[i:i+m].shape, i, m)
# raise e
C = np.zeros((N - m + 1,))
for i in range(N - m +1):
C += np.less_equal(_d(s, np.roll(s, i, axis=0)), r)
C /= (N - m + 1.0)
return (N - m + 1.0)**(-1) * np.sum(np.log(C))
return abs(_phi(N, m + 1, r, x) - _phi(N, m, r, x))
return custom_approx_entropy
def sample_entropy(x, m=2, r=0.15, use_std_r=True):
N = x.shape[0]
def _d(x_i, x_j):
return np.max(np.abs(x_i - x_j), 1)
def _C(N, m, r, x):
s = np.empty((N - m + 1, m))
for i in range(N - m + 1):
s[i,:] = x[i:i+m]
C = np.zeros((N - m + 1,))
for i in range(1, N - m +1):
C += np.less_equal(_d(s, np.roll(s, i, axis=0)), r)
C /= (N - m - 1.0)
return np.sum(C) / (N - m)
if use_std_r:
r = r * np.std(x)
A = _C(N, m+1, r, x)
B = _C(N, m, r, x)
return -np.log(A / B)
def triangle_noise(data, pts=1, dx=3):
max_noise = np.std(data)
noised = np.copy(data)
inds = np.random.choice(range(0, noised.size - 1), pts)
for ind in inds:
noise = (random.random() - 0.5) * 2.0 * max_noise
for i in range(max(ind-dx, 0), min(ind+dx+1, noised.size)):
noised[i] += noise * (1.0 - (abs((ind - i))/(dx+1.0)))
return noised
def get_scale(h, w):
def curry_scale(y):
try:
y2 = np.interp(np.linspace(0, y.size-1, w), np.arange(y.size), y)
except Exception as e:
print('error on y: '+str(y))
raise e
y3 = y2 - np.min(y2)
y4 = y3 * (h/np.max(y3))
return y4
return curry_scale
def scale(y, h, w):
try:
y2 = np.interp(np.linspace(0, y.size-1, w), np.arange(y.size), y)
except Exception as e:
print('error on y: '+str(y))
raise e
y3 = y2 - np.min(y2)
y4 = y3 * (h/np.max(y3))
return y4
def step_noise_cb(i):
step_size = 1
dx = 3
if i > 0:
x5 = i // 5
x10 = i // 10
x20 = i // 20
step_size += x5 + x10 + x20
if i > 50:
dx = 2
if i > 100:
dx = 1
if i > 200:
dx = 0
return (step_size, dx)
def add_entropy(data, entropy_to_add,
entropy_fcn=sample_entropy,
noise_fcn=triangle_noise,
scale=id_scale,
step_cb=None,
step_size=1,
max_steps=300,
verbose=False):
meas_entropy = approx_entropy(scale(data))
target_entropy = meas_entropy + entropy_to_add
return noise_to_entropy(data, target_entropy,
meas_entropy,
entropy_fcn,
noise_fcn,
scale,
step_cb,
step_size,
max_steps,
verbose)
def noise_to_entropy(data, target_entropy,
meas_entropy=None,
entropy_fcn=approx_entropy,
noise_fcn=triangle_noise,
step_cb=None,
max_steps=300,
verbose=False):
if meas_entropy is None:
meas_entropy = entropy_fcn(data)
noised = np.copy(data)
i = 0
noise_args = ()
while (meas_entropy < target_entropy):
if (i >= max_steps):
if verbose:
print('exceeded max iterations!')
break
if step_cb is not None:
noise_args = step_cb(i)
new_noised = noise_fcn(noised, *noise_args)
new_meas_entropy = entropy_fcn(new_noised)
if new_meas_entropy > meas_entropy:
meas_entropy = new_meas_entropy
noised = new_noised
i += 1
if verbose:
print('noise iters: {}'.format(i))
return noised
# def smooth(data, w):
def smooth(data, window_width, smooth_factor):
# w = np.array([w_edge, 1.0, w_edge])
w = np.hamming(window_width) + smooth_factor
w = w/w.sum()
tiles = len(w) // 2
# print(tiles)
startpad = np.tile(data[0], tiles)
endpad = np.tile(data[-1], tiles)
# print(startpad.shape, data.shape, endpad.shape)
smoothed = np.convolve(
|
np.concatenate([startpad, data, endpad])
|
numpy.concatenate
|
from __future__ import print_function
from numpy.random import seed
seed(1)
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import os
from PIL import Image
import io
from sklearn.model_selection import StratifiedShuffleSplit
from vgg16module import VGG16
from keras.models import Model, model_from_json, model_from_yaml, Sequential
from keras.layers import Input, Convolution2D, MaxPooling2D, LSTM, Reshape, Merge, TimeDistributed, Flatten, Activation, Dense, Dropout, merge, AveragePooling2D, ZeroPadding2D, Lambda
from keras.optimizers import Adam, SGD
from keras.layers.normalization import BatchNormalization
from keras import backend as K
K.set_image_dim_ordering('th')
from keras.utils import np_utils
from sklearn.metrics import confusion_matrix, accuracy_score
from skimage.io import imsave
from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, LearningRateScheduler
from keras.utils.np_utils import to_categorical
import json
from scipy.ndimage import minimum, maximum, imread
import math
import numpy.ma as ma
import matplotlib.cm as cm
import h5py
import random
from collections import OrderedDict
import scipy.io as sio
import cv2
import glob
import gc
from scipy.stats import mode
from collections import Counter
from sklearn import svm
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import KFold
from keras.layers.advanced_activations import ELU
def plot_training_info(case, metrics, save, history):
# summarize history for accuracy
plt.ioff()
if 'accuracy' in metrics:
fig = plt.figure()
plt.plot(history['acc'])
plt.plot(history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
if save == True:
plt.savefig(case + 'accuracy.png')
plt.gcf().clear()
else:
plt.show()
plt.close(fig)
# summarize history for loss
if 'loss' in metrics:
fig = plt.figure()
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
#plt.ylim(1e-3, 1e-2)
plt.yscale("log")
plt.legend(['train', 'val'], loc='upper left')
if save == True:
plt.savefig(case + 'loss.png')
plt.gcf().clear()
else:
plt.show()
plt.close(fig)
def step_decay(epoch):
initial_lrate = 0.005
drop = 0.5
epochs_drop = 10.0
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
def generator(folder1,folder2):
for x,y in zip(folder1,folder2):
yield x,y
def saveFeatures(param, max_label, batch_size, phase, save_features, feature_extractor, classifier, features_file, labels_file, train_split, test_split):
#data_folder = '/ssd_drive/data/'
data_folder = '/ssd_drive/MultiCam_OF2/'
mean_file = '/ssd_drive/flow_mean.mat'
L = 10
class0 = 'Falls'
class1 = 'NotFalls'
# TRANSFORMACIONES
flip = False
crops = False
rotate = False
translate = False
#i, j = 0, 0
# substract mean
d = sio.loadmat(mean_file)
flow_mean = d['image_mean']
num_features = 4096
# ===============================================
print('Starting the loading')
mult = 1
if flip:
mult = 2
if flip and crops:
mult = 10
if flip and crops and rotate and translate:
mult = 39
#size = 0
#folders, classes = [], []
#fall_videos = [f for f in os.listdir(data_folder + class0) if os.path.isdir(os.path.join(data_folder + class0, f))]
#fall_videos.sort()
#for fall_video in fall_videos:
# x_images = glob.glob(data_folder + class0 + '/' + fall_video + '/flow_x*.jpg')
# if int(len(x_images)) >= 10:
# folders.append(data_folder + class0 + '/' + fall_video)
# classes.append(0)
#not_fall_videos = [f for f in os.listdir(data_folder + class1) if os.path.isdir(os.path.join(data_folder + class1, f))]
#not_fall_videos.sort()
#for not_fall_video in not_fall_videos:
# if int(len(x_images)) >= 10:
# x_images = glob.glob(data_folder + class1 + '/' + not_fall_video + '/flow_x*.jpg')
# folders.append(data_folder + class1 + '/' + not_fall_video)
# classes.append(1)
h5features = h5py.File(features_file,'w')
h5labels = h5py.File(labels_file,'w')
# Get all folders and classes
i = 0
idx = 0
#nb_total_stacks = 0
#for folder in folders:
# x_images = glob.glob(folder + '/flow_x*.jpg')
# nb_total_stacks += int(len(x_images))-L+1
#size_test *= mult
#dataset_features_train = h5features.create_dataset('train', shape=(size_train, num_features), dtype='float64')
#dataset_features_test = h5features.create_dataset('test', shape=(size_test, num_features), dtype='float64')
#dataset_labels_train = h5labels.create_dataset('train', shape=(size_train, 1), dtype='float64')
#dataset_labels_test = h5labels.create_dataset('test', shape=(size_test, 1), dtype='float64')
#print(size_train, size_test)
#a = np.zeros((20,10))
#b = range(20)
#nb_stacks=20-10+1
#for i in range(len(b)):
# for s in list(reversed(range(min(10,i+1)))):
# print(i,s)
# a[i-s,s] = b[i]
#ind_fold = 0
fall_videos = np.zeros((24,2), dtype=np.int)
i = 0
while i < 3:
fall_videos[i,:] = [i*7, i*7+7]
i += 1
fall_videos[i,:] = [i*7, i*7+14]
i += 1
while i < 23:
fall_videos[i,:] = [i*7, i*7+7]
i += 1
fall_videos[i,:] = [i*7, i*7]
not_fall_videos = np.zeros((24,2), dtype=np.int)
i = 0
while i < 23:
not_fall_videos[i,:] = [i*7, i*7+14]
i += 1
not_fall_videos[i,:] = [i*7, i*7+7]
stages = []
for i in [24] + range(1,24):
stages.append('chute{:02}'.format(i))
black = np.zeros((224,224))
idx_falls, idx_nofalls = 0, 0
for stage, nb_stage in zip(stages, range(len(stages))):
print(nb_stage, stage)
h5features.create_group(stage)
h5labels.create_group(stage)
#h5features[stage].create_group('augmented')
h5features[stage].create_group('not_augmented')
#h5labels[stage].create_group('augmented')
h5labels[stage].create_group('not_augmented')
cameras = glob.glob(data_folder + stage + '/cam*')
cameras.sort()
for camera, nb_camera in zip(cameras, range(1, len(cameras)+1)):
print('Cam {}'.format(nb_camera))
#h5features[stage]['augmented'].create_group('cam{}'.format(nb_camera))
h5features[stage]['not_augmented'].create_group('cam{}'.format(nb_camera))
#h5labels[stage]['augmented'].create_group('cam{}'.format(nb_camera))
h5labels[stage]['not_augmented'].create_group('cam{}'.format(nb_camera))
#not_falls = [f for f in os.listdir(data_folder + stage + '/cam{}/NotFalls/'.format(nb_camera)) if os.path.isdir(os.path.join(data_folder + stage + '/cam{}/NotFalls/'.format(nb_camera), f))]
not_falls = glob.glob(camera + '/NotFalls/notfall*'.format(nb_camera))
not_falls.sort()
h5features.close()
h5labels.close()
h5features = h5py.File(features_file,'a')
h5labels = h5py.File(labels_file,'a')
for not_fall in not_falls:
print(not_fall)
label = 1
x_images = glob.glob(not_fall + '/flow_x*.jpg')
x_images.sort()
y_images = glob.glob(not_fall + '/flow_x*.jpg')
y_images.sort()
nb_stacks = int(len(x_images))-L+1
#features_aug_notfall = h5features[stage]['augmented']['cam{}'.format(nb_camera)].create_dataset('notfall{:04}'.format(idx_nofalls), shape=(nb_stacks*2, num_features), dtype='float64')
features_notfall = h5features[stage]['not_augmented']['cam{}'.format(nb_camera)].create_dataset('notfall{:04}'.format(idx_nofalls), shape=(nb_stacks, num_features), dtype='float64')
#labels_aug_notfall = h5labels[stage]['augmented']['cam{}'.format(nb_camera)].create_dataset('notfall{:04}'.format(idx_nofalls), shape=(nb_stacks*2, 1), dtype='float64')
labels_notfall = h5labels[stage]['not_augmented']['cam{}'.format(nb_camera)].create_dataset('notfall{:04}'.format(idx_nofalls), shape=(nb_stacks, 1), dtype='float64')
idx_nofalls += 1
if stage == 'chute24' or stage == 'chute23':
flow = np.zeros(shape=(224,224,2*L,nb_stacks), dtype=np.float64)
gen = generator(x_images,y_images)
for i in range(len(x_images)):
flow_x_file, flow_y_file = gen.next()
img_x = cv2.imread(flow_x_file, cv2.IMREAD_GRAYSCALE)
img_y = cv2.imread(flow_y_file, cv2.IMREAD_GRAYSCALE)
for s in list(reversed(range(min(10,i+1)))):
if i-s < nb_stacks:
flow[:,:,2*s, i-s] = img_x
flow[:,:,2*s+1,i-s] = img_y
del img_x,img_y
gc.collect()
flow = flow - np.tile(flow_mean[...,np.newaxis], (1, 1, 1, flow.shape[3]))
flow = np.transpose(flow, (3, 2, 0, 1))
predictions = np.zeros((flow.shape[0], num_features), dtype=np.float64)
truth = np.zeros((flow.shape[0], 1), dtype=np.float64)
for i in range(flow.shape[0]):
prediction = feature_extractor.predict(np.expand_dims(flow[i, ...],0))
predictions[i, ...] = prediction
truth[i] = label
features_notfall[:,:] = predictions
labels_notfall[:,:] = truth
del predictions, truth, flow, features_notfall, labels_notfall
gc.collect()
#flow_aug = np.zeros(shape=(224,224,2*L,nb_stacks*2), dtype=np.float64)
#gen = generator(x_images,y_images)
#for i in range(len(x_images)):
# flow_x_file, flow_y_file = gen.next()
# img_x = cv2.imread(flow_x_file, cv2.IMREAD_GRAYSCALE)
# img_y = cv2.imread(flow_y_file, cv2.IMREAD_GRAYSCALE)
# flip_x = 255 - img_x[:, ::-1]
# flip_y = img_y[:, ::-1]
# for s in list(reversed(range(min(10,i+1)))):
# if i-s < nb_stacks:
# flow_aug[:,:,2*s, i-s] = img_x
# flow_aug[:,:,2*s+1,i-s] = img_y
# flow_aug[:,:,2*s, i-s+nb_stacks] = flip_x
# flow_aug[:,:,2*s+1,i-s+nb_stacks] = flip_y
# del img_x,img_y,flip_x,flip_y
# gc.collect()
#flow_aug = flow_aug - np.tile(flow_mean[...,np.newaxis], (1, 1, 1, flow_aug.shape[3]))
#flow_aug = np.transpose(flow_aug, (3, 2, 0, 1))
#predictions = np.zeros((flow_aug.shape[0], num_features), dtype=np.float64)
#truth = np.zeros((flow_aug.shape[0], 1), dtype=np.float64)
#for i in range(flow_aug.shape[0]):
# prediction = feature_extractor.predict(np.expand_dims(flow_aug[i, ...],0))
# predictions[i, ...] = prediction
# truth[i] = label
#features_aug_notfall[:,:] = predictions
#labels_aug_notfall[:,:] = truth
#del predictions, truth, flow_aug, features_aug_notfall, labels_aug_notfall,
#gc.collect()
# NOT CHUTE24 ==================
else:
flow = np.zeros(shape=(224,224,2*L,nb_stacks), dtype=np.float64)
#flow_aug = np.zeros(shape=(224,224,2*L,nb_stacks*2), dtype=np.float64)
gen = generator(x_images,y_images)
for i in range(len(x_images)):
flow_x_file, flow_y_file = gen.next()
img_x = cv2.imread(flow_x_file, cv2.IMREAD_GRAYSCALE)
img_y = cv2.imread(flow_y_file, cv2.IMREAD_GRAYSCALE)
flip_x = 255 - img_x[:, ::-1]
flip_y = img_y[:, ::-1]
for s in list(reversed(range(min(10,i+1)))):
if i-s < nb_stacks:
flow[:,:,2*s, i-s] = img_x
flow[:,:,2*s+1,i-s] = img_y
#flow_aug[:,:,2*s, i-s] = img_x
#flow_aug[:,:,2*s+1,i-s] = img_y
#flow_aug[:,:,2*s, i-s+nb_stacks] = flip_x
#flow_aug[:,:,2*s+1,i-s+nb_stacks] = flip_y
del img_x,img_y,flip_x,flip_y
gc.collect()
flow = flow - np.tile(flow_mean[...,np.newaxis], (1, 1, 1, flow.shape[3]))
flow = np.transpose(flow, (3, 2, 0, 1))
predictions = np.zeros((flow.shape[0], num_features), dtype=np.float64)
truth = np.zeros((flow.shape[0], 1), dtype=np.float64)
for i in range(flow.shape[0]):
prediction = feature_extractor.predict(np.expand_dims(flow[i, ...],0))
predictions[i, ...] = prediction
truth[i] = label
features_notfall[:,:] = predictions
labels_notfall[:,:] = truth
del predictions, truth, flow, features_notfall, labels_notfall
gc.collect()
#flow_aug = flow_aug - np.tile(flow_mean[...,np.newaxis], (1, 1, 1, flow_aug.shape[3]))
#flow_aug = np.transpose(flow_aug, (3, 2, 0, 1))
#predictions = np.zeros((flow_aug.shape[0], num_features), dtype=np.float64)
#truth = np.zeros((flow_aug.shape[0], 1), dtype=np.float64)
#for i in range(flow_aug.shape[0]):
# prediction = feature_extractor.predict(np.expand_dims(flow_aug[i, ...],0))
# predictions[i, ...] = prediction
# truth[i] = label
#features_aug_notfall[:,:] = predictions
#labels_aug_notfall[:,:] = truth
#del predictions, truth, flow_aug, features_aug_notfall, labels_aug_notfall,
#gc.collect()
del x_images, y_images, nb_stacks
gc.collect()
if stage == 'chute24':
idx += 2
continue
falls = glob.glob(camera + '/Falls/fall*'.format(nb_camera))
falls.sort()
h5features.close()
h5labels.close()
h5features = h5py.File(features_file,'a')
h5labels = h5py.File(labels_file,'a')
for fall in falls:
print(fall)
label = 0
x_images = glob.glob(fall + '/flow_x*.jpg')
x_images.sort()
y_images = glob.glob(fall + '/flow_y*.jpg')
y_images.sort()
nb_stacks = int(len(x_images))-L+1
#features_aug_fall = h5features[stage]['augmented']['cam{}'.format(nb_camera)].create_dataset('fall{:04}'.format(idx_falls), shape=(nb_stacks*2, num_features), dtype='float64')
features_fall = h5features[stage]['not_augmented']['cam{}'.format(nb_camera)].create_dataset('fall{:04}'.format(idx_falls), shape=(nb_stacks, num_features), dtype='float64')
#labels_aug_fall = h5labels[stage]['augmented']['cam{}'.format(nb_camera)].create_dataset('fall{:04}'.format(idx_falls), shape=(nb_stacks*2, 1), dtype='float64')
labels_fall = h5labels[stage]['not_augmented']['cam{}'.format(nb_camera)].create_dataset('fall{:04}'.format(idx_falls), shape=(nb_stacks, 1), dtype='float64')
idx_falls += 1
flow = np.zeros(shape=(224,224,2*L,nb_stacks), dtype=np.float64)
#flow_aug = np.zeros(shape=(224,224,2*L,nb_stacks*2), dtype=np.float64)
gen = generator(x_images,y_images)
for i in range(len(x_images)):
flow_x_file, flow_y_file = gen.next()
img_x = cv2.imread(flow_x_file, cv2.IMREAD_GRAYSCALE)
img_y = cv2.imread(flow_y_file, cv2.IMREAD_GRAYSCALE)
flip_x = 255 - img_x[:, ::-1]
flip_y = img_y[:, ::-1]
for s in list(reversed(range(min(10,i+1)))):
if i-s < nb_stacks:
flow[:,:,2*s, i-s] = img_x
flow[:,:,2*s+1,i-s] = img_y
#flow_aug[:,:,2*s, i-s] = img_x
#flow_aug[:,:,2*s+1,i-s] = img_y
#flow_aug[:,:,2*s, i-s+nb_stacks] = flip_x
#flow_aug[:,:,2*s+1,i-s+nb_stacks] = flip_y
del img_x,img_y,flip_x,flip_y
gc.collect()
flow = flow - np.tile(flow_mean[...,np.newaxis], (1, 1, 1, flow.shape[3]))
flow = np.transpose(flow, (3, 2, 0, 1))
predictions = np.zeros((flow.shape[0], num_features), dtype=np.float64)
truth = np.zeros((flow.shape[0], 1), dtype=np.float64)
for i in range(flow.shape[0]):
prediction = feature_extractor.predict(np.expand_dims(flow[i, ...],0))
predictions[i, ...] = prediction
truth[i] = label
features_fall[:,:] = predictions
labels_fall[:,:] = truth
del predictions, truth, flow, features_fall, labels_fall
gc.collect()
#flow_aug = flow_aug - np.tile(flow_mean[...,np.newaxis], (1, 1, 1, flow_aug.shape[3]))
#flow_aug = np.transpose(flow_aug, (3, 2, 0, 1))
#predictions = np.zeros((flow_aug.shape[0], num_features), dtype=np.float64)
#truth = np.zeros((flow_aug.shape[0], 1), dtype=np.float64)
#for i in range(flow_aug.shape[0]):
# prediction = feature_extractor.predict(np.expand_dims(flow_aug[i, ...],0))
# predictions[i, ...] = prediction
# truth[i] = label
#features_aug_fall[:,:] = predictions
#labels_aug_fall[:,:] = truth
#del predictions, truth, features_fall, labels_aug_fall, labels_fall, flow_aug, features_aug_fall,
#gc.collect()
h5features.close()
h5labels.close()
sys.exit()
for folder, label in zip(folders, classes):
print(folder)
h5features.create_group(folder)
h5labels.create_group(folder)
#os.makedirs('/home/anunez/imagenes/' + folder)
x_images = glob.glob(folder + '/flow_x*.jpg')
x_images.sort()
y_images = glob.glob(folder + '/flow_y*.jpg')
y_images.sort()
nb_stacks = int(len(x_images))-(2*L)+1
flow = np.zeros(shape=(224,224,2*L,nb_stacks), dtype=np.float64)
flow_aug = np.zeros(shape=(224,224,2*L,nb_stacks*mult), dtype=np.float64)
gen = generator(x_images,y_images)
for i in range(len(x_images)):
flow_x_file, flow_y_file = gen.next()
img_x = cv2.imread(flow_x_file, cv2.IMREAD_GRAYSCALE)
img_y = cv2.imread(flow_y_file, cv2.IMREAD_GRAYSCALE)
flip_x = 255 - img_x[:, ::-1]
flip_y = img_y[:, ::-1]
for s in list(reversed(range(min(10,i+1)))):
flow[:,:,2*s, i-s] = img_x
flow[:,:,2*s+1,i-s] = img_y
flow_aug[:,:,2*s, i-s] = img_x
flow_aug[:,:,2*s+1,i-s] = img_y
flow_aug[:,:,2*s, i-s+nb_stacks] = flip_x
flow_aug[:,:,2*s+1,i-s+nb_stacks] = flip_y
del img_x,img_y,flip_x,flip_y
gc.collect()
flow = flow - np.tile(flow_mean[...,np.newaxis], (1, 1, 1, flow.shape[3]))
flow = np.transpose(flow, (3, 2, 0, 1))
predictions = np.zeros((flow.shape[0], num_features), dtype=np.float64)
truth = np.zeros((flow.shape[0], 1), dtype=np.float64)
#print('flow shape {}'.format(flow.shape[0]))
#print(flow.shape)
for i in range(flow.shape[0]):
prediction = feature_extractor.predict(np.expand_dims(flow[i, ...],0))
predictions[i, ...] = prediction
truth[i] = label
dataset_features_train[cont_train:cont_train+flow.shape[0], :] = predictions
dataset_labels_train[cont_train:cont_train+flow.shape[0]] = truth
cont_train += flow.shape[0]
flow_aug = flow_aug - np.tile(flow_mean[...,np.newaxis], (1, 1, 1, flow_aug.shape[3]))
flow_aug = np.transpose(flow_aug, (3, 2, 0, 1))
predictions = np.zeros((flow.shape[0], num_features), dtype=np.float64)
truth = np.zeros((flow_aug.shape[0], 1), dtype=np.float64)
for i in range(flow_aug.shape[0]):
prediction = feature_extractor.predict(np.expand_dims(flow_aug[i, ...],0))
predictions[i, ...] = prediction
truth[i] = label
dataset_features_train[cont_train:cont_train+flow.shape[0], :] = predictions
dataset_labels_train[cont_train:cont_train+flow.shape[0]] = truth
cont_train += flow.shape[0]
#print(cont, flow.shape[0])
#features[cont:cont+flow.shape[0], :] = predictions
#all_labels[cont:cont+flow.shape[0], :] = truth
#cont += flow.shape[0]
#dataset_features2[...] = predictions2
#dataset_labels2[...] = truth
del flow, predictions, truth
gc.collect()
#is_testing = False
#if p == 0:
# np.save(features_file + '2_train.npy', features)
# np.save(labels_file + '2_train.npy', all_labels)
#else:
# np.save(features_file + '2_test.npy', features)
# np.save(labels_file + '2_test.npy', all_labels)
h5features.close()
h5labels.close()
def main(learning_rate, batch_size, dropout, batch_norm, weight_0, weight_1, nb_neurons, exp, model_file, weights_file):
best_model = 'best_weights/best_weights_{}.hdf5'.format(exp)
print(exp)
num_features = 4096
with open(parameter_file) as data_file:
param = json.load(data_file)
# VGG16 =====================================================
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(param['input_channels'], param['input_width'], param['input_height'])))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(4096, name='fc6', init='glorot_uniform'))
extracted_features = Input(shape=(4096,), dtype='float32', name='input')
#x = Dense(4096, activation='relu', name='fc1')(extracted_features)
#if batch_norm:
# x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(extracted_features)
# x = ELU(alpha=1.0)(x)
x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(extracted_features)
x = Activation('relu')(x)
x = Dropout(0.9)(x)
x = Dense(nb_neurons, name='fc2', init='glorot_uniform')(x)
#if batch_norm:
# x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
x = Activation('relu')(x)
x = Dropout(0.8)(x)
x = Dense(1, name='predictions', init='glorot_uniform')(x)
x = Activation('sigmoid')(x)
classifier = Model(input=extracted_features, output=x, name='classifier')
layerskeras = ['block1_conv1', 'block1_conv2', 'block2_conv1', 'block2_conv2', 'block3_conv1', 'block3_conv2', 'block3_conv3', 'block4_conv1', 'block4_conv2', 'block4_conv3', 'block5_conv1', 'block5_conv2', 'block5_conv3', 'fc1', 'fc2', 'predictions']
layerscaffe = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1', 'conv5_2', 'conv5_3', 'fc6', 'fc7', 'fc8']
i = 0
h5 = h5py.File('/home/anunez/project/caffedata.h5')
layer_dict = dict([(layer.name, layer) for layer in model.layers])
for layer in layerscaffe[:-3]:
w2, b2 = h5['data'][layer]['0'], h5['data'][layer]['1']
w2 = np.transpose(np.asarray(w2), (0,1,2,3))
w2 = w2[:, :, ::-1, ::-1]
b2 = np.asarray(b2)
#model.get_layer(layerskeras[i]).W.set_value(w2)
#model.get_layer(layerskeras[i]).b.set_value(b2)
layer_dict[layer].W.set_value(w2)
layer_dict[layer].b.set_value(b2)
i += 1
layer = layerscaffe[-3]
w2, b2 = h5['data'][layer]['0'], h5['data'][layer]['1']
w2 = np.transpose(np.asarray(w2), (1,0))
b2 = np.asarray(b2)
#model.get_layer(layerskeras[i]).W.set_value(w2)
#model.get_layer(layerskeras[i]).b.set_value(b2)
layer_dict[layer].W.set_value(w2)
layer_dict[layer].b.set_value(b2)
i += 1
copy_dense_weights = False
if copy_dense_weights:
print('Copiando pesos de capas densas')
#for layer in layerscaffe[-2:]:
layer = layerscaffe[-2]
w2, b2 = h5['data'][layer]['0'], h5['data'][layer]['1']
w2 = np.transpose(w2,(1,0))
b2 = np.asarray(b2)
print(layerskeras[i])
classifier.get_layer('fc2').W.set_value(w2)
classifier.get_layer('fc2').b.set_value(b2)
i += 1
for layer in classifier.layers:
layer.trainable = True
w,b = classifier.get_layer('fc2').get_weights()
#print(np.allclose(w, w))
#classifier.load_weights(best_model)
#plot_model(model, to_file='model.png', show_shapes=False, show_layer_names=True)
#plot_model(classifier, to_file='classifier.png', show_shapes=False, show_layer_names=True)
adam = Adam(lr=learning_rate, beta_1=param['beta_1'], beta_2=param['beta_2'], epsilon=param['adam_eps'], decay=param['decay'])
#sgd = SGD(lr=learning_rate, momentum=0.0, decay=param['decay'], nesterov=False)
#if True == 'adam':
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=param['metrics'])
#elif optimizer == 'sgd':
# model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=param['metrics'])
c = ModelCheckpoint(filepath=best_model, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto')
#e = EarlyStopping(monitor='loss', min_delta=0, patience=0, verbose=0, mode='auto')
#r = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
#l = LearningRateScheduler(step_decay)
#pb = printbatch()
#callbacks = [c]
amount_of_data, parts = [],[]
#validationGenerator = getDataChinese(param, param['classes'], param['batch_size'], training_parts, amount_of_data, training_parts, validation_parts, test_parts)
if not os.path.isdir('train_history_results'):
os.mkdir('train_history_results')
#model.load_weights(best_model)
# =============================================================================================================
# FEATURE EXTRACTION
# =============================================================================================================
#features_file = '/home/anunez/project/features/features_multicam_final.h5'
#labels_file = '/home/anunez/project/labels/labels_multicam_final.h5'
features_file = '/home/anunez/project/features/features_multicam_final2.h5'
labels_file = '/home/anunez/project/labels/labels_multicam_final2.h5'
features_file2 = '/home/anunez/project/features/features_urfall_final3.h5'
labels_file2 = '/home/anunez/project/labels/labels_urfall_final3.h5'
features_file3 = '/home/anunez/project/features/features_fdd_final3.h5'
labels_file3 = '/home/anunez/project/labels/labels_fdd_final3.h5'
#features_file = '/home/anunez/project/features/features_multicam.h5'
#labels_file = '/home/anunez/project/labels/labels_multicam.h5'
save_features = False
if save_features:
print('Saving features')
#saveFeatures(param, param['classes'], param['batch_size'], 0, amount_of_data, parts, save_features, model, model2, classifier, features_file + '1.h5', labels_file + '1.h5', train_splits[0], test_splits[0])
saveFeatures(param, param['classes'], param['batch_size'], 0, amount_of_data, parts, save_features, model, classifier, features_file, labels_file, [], [])
#saveFeatures(param, param['classes'], param['batch_size'], 0, amount_of_data, parts, save_features, model, model2, classifier, features_file + '3.h5', labels_file + '3.h5', train_splits[2], test_splits[2])
#saveFeatures(param, param['classes'], param['batch_size'], 1, amount_of_data, parts, save_features, model, model2, classifier, features_file + 'validation.h5', labels_file + 'validation.h5', features_file2 + 'validation.h5', labels_file2 + 'validation.h5')
#saveFeatures(param, param['classes'], param['batch_size'], 2, amount_of_data, parts, save_features, model, model2, classifier, features_file + 'testing.h5', labels_file + 'testing.h5', features_file2 + 'testing.h5', labels_file2 + 'testing.h5')
print('Feature extraction finished')
#sys.exit()
# =============================================================================================================
# TRAINING
# =============================================================================================================
#if optimizer == 'adam':
adam = Adam(lr=learning_rate, beta_1=param['beta_1'], beta_2=param['beta_2'], epsilon=param['adam_eps'], decay=param['decay'])
#elif True or optimizer == 'sgd':
# classifier.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=param['metrics'])
do_training = True
do_training_with_features = True
do_training_normal = not do_training_with_features
compute_metrics = False
compute_roc_curve = False
threshold = 0.5
e = EarlyStopping(monitor='val_loss', min_delta=0, patience=100, verbose=0, mode='auto')
if do_training:
if do_training_with_features:
h5features = h5py.File(features_file, 'r')
h5labels = h5py.File(labels_file, 'r')
h5features2 = h5py.File(features_file2, 'r')
h5labels2 = h5py.File(labels_file2, 'r')
h5features3 = h5py.File(features_file3, 'r')
h5labels3 = h5py.File(labels_file3, 'r')
stages = []
for i in range(1,25):
stages.append('chute{:02}'.format(i))
use_aug = False
aug = 'not_augmented'
if use_aug:
aug = 'augmented'
cams_x = []
cams_y = []
for stage, nb_stage in zip(stages, range(len(stages))):
for cam, nb_cam in zip(h5features[stage][aug].keys(), range(8)):
temp_x = []
temp_y = []
for key in h5features[stage][aug][cam].keys():
#print(h5features[stage][aug][cam][key].shape)
temp_x.append(np.asarray(h5features[stage][aug][cam][key]))
temp_y.append(np.asarray(h5labels[stage][aug][cam][key]))
#temp_x = np.asarray(temp_x)
#temp_y = np.asarray(temp_y)
temp_x = np.concatenate(temp_x,axis=0)
temp_y = np.concatenate(temp_y,axis=0)
if nb_stage == 0:
cams_x.append(temp_x)
cams_y.append(temp_y)
else:
cams_x[nb_cam] = np.concatenate([cams_x[nb_cam], temp_x], axis=0)
cams_y[nb_cam] = np.concatenate([cams_y[nb_cam], temp_y], axis=0)
sensitivities_general = []
specificities_general = []
sensitivities_urfall = []
specificities_urfall = []
sensitivities_multicam = []
specificities_multicam = []
sensitivities_fdd = []
specificities_fdd = []
X = np.asarray(np.concatenate(cams_x,axis=0))
_y = np.asarray(np.concatenate(cams_y,axis=0))
X2 = np.asarray(h5features2['features']) #fdd
_y2 = np.asarray(h5labels2['labels'])
X3 = np.asarray(h5features3['train']) #fdd
_y3 = np.asarray(h5labels3['train'])
size_0 = np.asarray(np.where(_y2==0)[0]).shape[0]
size_1 = np.asarray(np.where(_y2==1)[0]).shape[0]
all0_1 = np.asarray(np.where(_y==0)[0])
all1_1 = np.asarray(np.where(_y==1)[0])
all0_2 = np.asarray(np.where(_y2==0)[0])
all1_2 = np.asarray(np.where(_y2==1)[0])
all0_3 = np.asarray(np.where(_y3==0)[0])
all1_3 = np.asarray(np.where(_y3==1)[0])
print(all0_1.shape[0], all1_1.shape[0])
print(all0_2.shape[0], all1_2.shape[0])
print(all0_3.shape[0], all1_3.shape[0])
all0_1 = np.random.choice(all0_1, size_0, replace=False)
all1_1 = np.random.choice(all1_1, size_0, replace=False)
all0_2 = np.random.choice(all0_2, size_0, replace=False)
all1_2 = np.random.choice(all1_2, size_0, replace=False)
all0_3 = np.random.choice(all0_3, size_0, replace=False)
all1_3 = np.random.choice(all1_3, size_0, replace=False)
slice_size = size_0/5
# LEAVE-ONE-OUT
for fold in range(5):
#print('='*30)
#print('LEAVE-ONE-OUT STEP {}/8'.format(cam))
#print('='*30)
#test_x = cams_x[cam]
#test_y = cams_y[cam]
#train_x = cams_x[0:cam] + cams_x[cam+1:]
#train_y = cams_y[0:cam] + cams_y[cam+1:]
#sss = StratifiedShuffleSplit(n_splits=2, test_size=0.2, random_state=777)
#sss.get_n_splits(train_x, train_y)
#train_index, test_index = [], []
#for a, b in sss.split(train_x, train_y):
# train_index = a
# test_index = b
#train_index = np.asarray(train_index)
#test_index = np.asarray(test_index)
#train_x, test_x = train_x[train_index], train_x[test_index]
#train_y, test_y = train_y[train_index], train_y[test_index]
#
print(all0_1.shape[0], fold*slice_size, (fold+1)*slice_size)
print(all0_1[0:fold*slice_size].shape, all0_1[(fold+1)*slice_size:].shape)
temp = np.concatenate((
np.hstack((
all0_1[0:fold*slice_size],all0_1[(fold+1)*slice_size:])),
np.hstack((
all1_1[0:fold*slice_size],all1_1[(fold+1)*slice_size:]))))
X1_train = X[temp]
_y1_train = _y[temp]
temp = np.concatenate((
np.hstack((
all0_2[0:fold*slice_size],all0_2[(fold+1)*slice_size:])),
np.hstack((
all1_2[0:fold*slice_size],all1_2[(fold+1)*slice_size:]))))
X2_train = X2[temp]
_y2_train = _y2[temp]
temp = np.concatenate((
np.hstack((
all0_3[0:fold*slice_size],all0_3[(fold+1)*slice_size:])),
np.hstack((
all1_3[0:fold*slice_size],all1_3[(fold+1)*slice_size:]))))
X3_train = X3[temp]
_y3_train = _y3[temp]
# TEST
temp = np.concatenate((
np.hstack((
all0_1[fold*slice_size:(fold+1)*slice_size])),
np.hstack((
all1_1[fold*slice_size:(fold+1)*slice_size]))))
X1_test = X[temp]
_y1_test = _y[temp]
temp = np.concatenate((
np.hstack((
all0_2[fold*slice_size:(fold+1)*slice_size])),
np.hstack((
all1_2[fold*slice_size:(fold+1)*slice_size]))))
X2_test = X2[temp]
_y2_test = _y2[temp]
temp = np.concatenate((
np.hstack((
all0_3[fold*slice_size:(fold+1)*slice_size])),
np.hstack((
all1_3[fold*slice_size:(fold+1)*slice_size]))))
X3_test = X3[temp]
_y3_test = _y3[temp]
#print(_y.shape, _y2.shape, _y3.shape)
recortar = False
if recortar:
all1_1 = np.random.choice(all1_1, size_0, replace=False)
allin = np.concatenate((all0_1.flatten(),all1_1.flatten()))
allin.sort()
X = X[allin,...]
_y = _y[allin]
all1_2 = np.random.choice(all1_2, size_0, replace=False)
allin = np.concatenate((all0_2.flatten(),all1_2.flatten()))
allin.sort()
X2 = X2[allin,...]
_y2 = _y2[allin]
all1_3 = np.random.choice(all1_3, size_0, replace=False)
allin = np.concatenate((all0_3.flatten(),all1_3.flatten()))
allin.sort()
X3 = X3[allin,...]
_y3 = _y3[allin]
#sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=777)
#sss.get_n_splits(X, _y)
#train_index, test_index = [], []
#for a, b in sss.split(X, _y):
# train_index = a
# test_index = b
#train_index = np.asarray(train_index)
#test_index = np.asarray(test_index)
#X1_train, X1_test = X[train_index], X[test_index]
#_y1_train, _y1_test = _y[train_index], _y[test_index]
#
#sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=777)
#sss.get_n_splits(X2, _y2)
#train_index, test_index = [], []
#for a, b in sss.split(X2, _y2):
# train_index = a
# test_index = b
#train_index = np.asarray(train_index)
#test_index = np.asarray(test_index)
#X2_train, X2_test = X2[train_index], X2[test_index]
#_y2_train, _y2_test = _y2[train_index], _y2[test_index]
#
#sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=777)
#sss.get_n_splits(X3, _y3)
#train_index, test_index = [], []
#for a, b in sss.split(X3, _y3):
# train_index = a
# test_index = b
#train_index = np.asarray(train_index)
#test_index = np.asarray(test_index)
#X3_train, X3_test = X3[train_index], X3[test_index]
#_y3_train, _y3_test = _y3[train_index], _y3[test_index]
#
X_train = np.concatenate((X1_train, X2_train, X3_train), axis=0)
_y_train = np.concatenate((_y1_train, _y2_train, _y3_train), axis=0)
X_test = np.concatenate((X1_test, X2_test, X3_test), axis=0)
_y_test = np.concatenate((_y1_test, _y2_test, _y3_test), axis=0)
all0 = np.asarray(np.where(_y==0)[0])
all1 = np.asarray(np.where(_y==1)[0])
print('Train Falls/NoFalls in dataset: {}/{}, total data: {}'.format(len(all0), len(all1), X_train.shape[0]))
all0 = np.asarray(np.where(_y2==0)[0])
all1 = np.asarray(np.where(_y2==1)[0])
print('Test Falls/NoFalls in dataset: {}/{}, total data: {}'.format(len(all0), len(all1), X_test.shape[0]))
extracted_features = Input(shape=(4096,), dtype='float32', name='input')
#x = ELU(alpha=1.0)(extracted_features)
x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(extracted_features)
x = Activation('relu')(x)
x = Dropout(0.9)(x)
x = Dense(nb_neurons, name='fc2', init='glorot_uniform')(x)
#x = ELU(alpha=1.0)(x)
x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
x = Activation('relu')(x)
x = Dropout(0.8)(x)
x = Dense(1, name='predictions', init='glorot_uniform')(x)
x = Activation('sigmoid')(x)
classifier = Model(input=extracted_features, output=x, name='classifier')
classifier.compile(optimizer=adam, loss='binary_crossentropy', metrics=param['metrics'])
class_weight = {0:weight_0, 1:weight_1}
#print('Data after stratify: {} + {} = {}'.format(a.shape[0], c.shape[0], a.shape[0]+c.shape[0]))
#print(class_weight)
history = classifier.fit(X_train, _y_train, validation_data=(X_test, _y_test), batch_size=1024, nb_epoch=3000, shuffle=True, class_weight=class_weight, callbacks=[e])
#predicted = classifier.predict(X2)
#for i in range(len(predicted)):
# if predicted[i] < threshold:
# predicted[i] = 0
# else:
# predicted[i] = 1
#predicted = np.asarray(predicted).astype(int)
#cm = confusion_matrix(_y2, predicted,labels=[0,1])
#tp = cm[0][0]
#fn = cm[0][1]
#fp = cm[1][0]
#tn = cm[1][1]
#tpr = tp/float(tp+fn)
##fpr = fp/float(fp+tn)
#fnr = fn/float(fn+tp)
#tnr = tn/float(tn+fp)
#print('TP: {}, TN: {}, FP: {}, FN: {}'.format(tp,tn,fp,fn))
#print('TPR: {}, TNR: {}, FPR: {}, FNR: {}'.format(tpr,tnr,fpr,fnr))
#recall = tp/float(tp+fn)
#specificity = tn/float(tn+fp)
#print('Sensitivity/Recall: {}'.format(recall))
#print('Specificity: {}'.format(specificity))
#sensitivities.append(recall)
#specificities.append(specificity)
#precision = tp/float(tp+fp)
#print('Precision: {}'.format(precision))
#print('F1-measure: {}'.format(2*float(precision*recall)/float(precision+recall)))
#fpr, tpr, _ = roc_curve(_y2, predicted)
#roc_auc = auc(fpr, tpr)
#aucs.append(roc_auc)
#print('AUC: {}'.format(roc_auc))
#print(classifier.predict(_X[0:1]))
plot_training_info('prueba', param['metrics'] + ['loss'], param['save_plots'], history.history)
#print(classifier.evaluate(_X2,_y2, batch_size=batch_size))
print('======= CALCULO SCIKIT GENERAL')
# Confusion matrix
predicted = classifier.predict(X_test)
for i in range(len(predicted)):
if predicted[i] < threshold:
predicted[i] = 0
else:
predicted[i] = 1
predicted = np.asarray(predicted).astype(int)
cm = confusion_matrix(_y_test, predicted,labels=[0,1])
tp = cm[0][0]
fn = cm[0][1]
fp = cm[1][0]
tn = cm[1][1]
tpr = tp/float(tp+fn)
fpr = fp/float(fp+tn)
fnr = fn/float(fn+tp)
tnr = tn/float(tn+fp)
print('TP: {}, TN: {}, FP: {}, FN: {}'.format(tp,tn,fp,fn))
print('TPR: {}, TNR: {}, FPR: {}, FNR: {}'.format(tpr,tnr,fpr,fnr))
print('Sensitivity/Recall: {}'.format(tp/float(tp+fn)))
print('Specificity: {}'.format(tn/float(tn+fp)))
print('Accuracy: {}'.format(accuracy_score(_y_test, predicted)))
sensitivities_general.append(tp/float(tp+fn))
specificities_general.append(tn/float(tn+fp))
print('======= CALCULO SCIKIT URFALL')
predicted = classifier.predict(X2_test)
for i in range(len(predicted)):
if predicted[i] < threshold:
predicted[i] = 0
else:
predicted[i] = 1
predicted = np.asarray(predicted).astype(int)
cm = confusion_matrix(_y2_test, predicted,labels=[0,1])
tp = cm[0][0]
fn = cm[0][1]
fp = cm[1][0]
tn = cm[1][1]
tpr = tp/float(tp+fn)
fpr = fp/float(fp+tn)
fnr = fn/float(fn+tp)
tnr = tn/float(tn+fp)
print('TP: {}, TN: {}, FP: {}, FN: {}'.format(tp,tn,fp,fn))
print('TPR: {}, TNR: {}, FPR: {}, FNR: {}'.format(tpr,tnr,fpr,fnr))
print('Sensitivity/Recall: {}'.format(tp/float(tp+fn)))
print('Specificity: {}'.format(tn/float(tn+fp)))
print('Accuracy: {}'.format(accuracy_score(_y2_test, predicted)))
sensitivities_urfall.append(tp/float(tp+fn))
specificities_urfall.append(tn/float(tn+fp))
print('======= CALCULO SCIKIT MULTICAM')
predicted = classifier.predict(X1_test)
for i in range(len(predicted)):
if predicted[i] < threshold:
predicted[i] = 0
else:
predicted[i] = 1
predicted = np.asarray(predicted).astype(int)
cm = confusion_matrix(_y1_test, predicted,labels=[0,1])
tp = cm[0][0]
fn = cm[0][1]
fp = cm[1][0]
tn = cm[1][1]
tpr = tp/float(tp+fn)
fpr = fp/float(fp+tn)
fnr = fn/float(fn+tp)
tnr = tn/float(tn+fp)
print('TP: {}, TN: {}, FP: {}, FN: {}'.format(tp,tn,fp,fn))
print('TPR: {}, TNR: {}, FPR: {}, FNR: {}'.format(tpr,tnr,fpr,fnr))
print('Sensitivity/Recall: {}'.format(tp/float(tp+fn)))
print('Specificity: {}'.format(tn/float(tn+fp)))
print('Accuracy: {}'.format(accuracy_score(_y1_test, predicted)))
sensitivities_multicam.append(tp/float(tp+fn))
specificities_multicam.append(tn/float(tn+fp))
print('======= CALCULO SCIKIT FDD')
predicted = classifier.predict(X3_test)
for i in range(len(predicted)):
if predicted[i] < threshold:
predicted[i] = 0
else:
predicted[i] = 1
predicted = np.asarray(predicted).astype(int)
cm = confusion_matrix(_y3_test, predicted,labels=[0,1])
tp = cm[0][0]
fn = cm[0][1]
fp = cm[1][0]
tn = cm[1][1]
tpr = tp/float(tp+fn)
fpr = fp/float(fp+tn)
fnr = fn/float(fn+tp)
tnr = tn/float(tn+fp)
print('TP: {}, TN: {}, FP: {}, FN: {}'.format(tp,tn,fp,fn))
print('TPR: {}, TNR: {}, FPR: {}, FNR: {}'.format(tpr,tnr,fpr,fnr))
print('Sensitivity/Recall: {}'.format(tp/float(tp+fn)))
print('Specificity: {}'.format(tn/float(tn+fp)))
print('Accuracy: {}'.format(accuracy_score(_y3_test, predicted)))
sensitivities_fdd.append(tp/float(tp+fn))
specificities_fdd.append(tn/float(tn+fp))
# FIN DEL BUCLE
print('LEAVE-ONE-OUT RESULTS ===================')
print("Sensitivity General: %.2f%% (+/- %.2f%%)" % (np.mean(sensitivities_general), np.std(sensitivities_general)))
print("Specificity General: %.2f%% (+/- %.2f%%)\n" % (np.mean(specificities_general), np.std(specificities_general)))
print("Sensitivity UR Fall: %.2f%% (+/- %.2f%%)" % (np.mean(sensitivities_urfall), np.std(sensitivities_urfall)))
print("Specificity UR Fall: %.2f%% (+/- %.2f%%)\n" % (
|
np.mean(specificities_urfall)
|
numpy.mean
|
from copy import deepcopy
import graphviz
import numpy as np
def get_prev_nodes(matrix, node):
"""
Returns the list of the nodes arriving at the given node
:param matrix: adjacency matrix of the graph
:param node: given node index
:return:
"""
n_nodes = matrix.shape[0]
assert 0 <= node < n_nodes
return np.where(matrix[:node, node])[0]
def get_next_nodes(matrix, node):
"""
Returns the list of the nodes leaving the given node
:param matrix: adjacency matrix of the graph
:param node: given node index
:return:
"""
n_nodes = matrix.shape[0]
assert 0 <= node < n_nodes
return
|
np.where(matrix[node, node:])
|
numpy.where
|
import numpy as np
from sklearn.utils import resample
from sklearn.preprocessing import StandardScaler
from sklearn.utils.validation import check_memory
from .stat_tools import pval_from_cb
from .desparsified_lasso import desparsified_lasso, desparsified_group_lasso
def _subsampling(n_samples, train_size, groups=None, seed=0):
"""Random subsampling: computes a list of indices"""
if groups is None:
n_subsamples = int(n_samples * train_size)
train_index = resample(np.arange(n_samples), n_samples=n_subsamples,
replace=False, random_state=seed)
else:
unique_groups =
|
np.unique(groups)
|
numpy.unique
|
from parafermions.MPO import MPO
import time
import numpy as np
import scipy as sp
class CommutatorOp(MPO):
"""
Class for MPO representations of commutators.
"""
def __init__(self, N, t, D, mu, U, Nw, Pw, dtype=np.dtype('complex128')):
"""
Constructor of MPO commutator class.
Parameters
------------
N: int
The length of the system.
t: float array
Hopping parameters same length as system.
D: float array
Pairing term (delta) same length as system.
mu: float array
Chemical potential same length as system.
U: float array
Interaction term same length as system.
Nw: float array
Number weighting same length as system.
Pw: float
Parity weighting.
"""
self.L = 2*N
d=2; self.N = d
M=13; self.chi = M
self.dtype = dtype
self.shape = (self.N**self.L, self.N**self.L) # for mat vec routine
self.dim = self.N**self.L
X = np.asarray([[0, 1],[1, 0]], dtype=dtype)
Y = np.asarray([[0, -1j],[1j, 0]], dtype=dtype)
Z = np.asarray([[1, 0],[0, -1]], dtype=dtype)
I = np.eye(d, dtype=dtype)
b = np.zeros((1,N), dtype=dtype); self.b = b # empty row
p=np.reshape(np.vstack([t+D, b]).T, [2*N,1])/2.0 ; self.p = p
r=np.reshape(np.vstack([mu,-(t-D)]).T,[2*N,1])/2.0; self.r = r
u=np.reshape(np.vstack([U, b]).T, [2*N,1])/2.0; self.u = u
padding = np.reshape(np.asarray([0.0,0.0]),(2,1))
p=np.vstack([padding, p]); self.p = p
# r=[r]; # looks redundant
u=np.vstack([padding, u]); self.u = u
Nw=np.reshape(np.vstack([Nw,Nw]).T,[1,2*N]); self.Nw = Nw
Ws = {} #H=cell(1,2*N);
H1 = np.zeros((1,M,d,d), dtype=dtype) #H1 = zeros(1,d,M,d);
H1[0,0,:,:] = I #H1(1,:,1,:) = I;
H1[0,1,:,:] = -Y #H1(1,:,2,:) = -Y;
H1[0,2,:,:] = X #H1(1,:,3,:) = X;
H1[0,3,:,:] = -Y*r[0] #H1(1,:,4,:) = -Y*r(1);
H1[0,4,:,:] = X*r[0] #H1(1,:,5,:) = X*r(1);
H1[0,11,:,:] = Pw*Z #H1(1,:,12,:)=Pw*Z;
H1[0,12,:,:] = -Nw[0,0]*Z/2.0 #H1(1,:,13,:)= -Nw(1)/2*(Z);
Ws[0] = H1 #H{1,1} = (H1);
for n in range(1, 2*N-1): #for n=2:2*N-1
Hn = np.zeros((M,M,d,d), dtype=dtype) #Hn = zeros(M,d,M,d);
Hn[0,0,:,:] = I #Hn(1,:,1,:) = I;
Hn[0,1,:,:] = -Y #Hn(1,:,2,:) = -Y;
Hn[0,2,:,:] = X #Hn(1,:,3,:) = X;
Hn[0,3,:,:] = -Y*r[n] #Hn(1,:,4,:) = -Y*r(n);
Hn[0,4,:,:] = X*r[n] #Hn(1,:,5,:) = X*r(n);
Hn[11,11,:,:] = Z #Hn(12,:,12,:)=Z;
Hn[0,12,:,:] = -Nw[0,n]*(Z-I)/2.0 #Hn(1,:,13,:) = -Nw(n)/2*(Z-I);
Hn[1,5,:,:] = X #Hn(2,:,6,:) = X;
Hn[1,6,:,:] = Z #Hn(2,:,7,:) = Z;
Hn[2,7,:,:] = Z #Hn(3,:,8,:) = Z;
Hn[2,8,:,:] = Y #Hn(3,:,9,:) = Y;
Hn[3,12,:,:] = X #Hn(4,:,13,:) = X;
Hn[4,12,:,:] = Y #Hn(5,:,13,:) = Y;
Hn[5,9,:,:] = Y*u[n] #Hn(6,:,10,:) = Y*u(n);%;1i*sy;
Hn[6,9,:,:] = p[n]*Z #Hn(7,:,10,:) = p(n)*Z;
Hn[7,10,:,:] = p[n]*Z #Hn(8,:,11,:) = p(n)*Z;
Hn[8,10,:,:] = X*u[n] #Hn(9,:,11,:) = X*u(n);
Hn[9,12,:,:] = X #Hn(10,:,13,:) = X;
Hn[10,12,:,:] = Y #Hn(11,:,13,:) = Y;%;1i*sy;
Hn[12,12,:,:] = I #Hn(13,:,13,:) = I;
Ws[n] = Hn
HN = np.zeros((M,1,d,d), dtype=dtype) #HN = zeros(M,d,1,d);
HN[0,0,:,:] = -Nw[0,2*N-1]/2*(Z)+Pw*I #HN(1,:,1,:) = -Nw(2*N)/2*(Z-I)+Pw*I;% Only put in -I when using zero Nw at site 1
HN[3,0,:,:] = X #HN(4,:,1,:) = X;
HN[4,0,:,:] = Y #HN(5,:,1,:) = Y;
HN[9,0,:,:] = X #HN(10,:,1,:) = X;
HN[10,0,:,:] = Y #HN(11,:,1,:) = Y;
HN[12,0,:,:] = I #HN(13,:,1,:) = I;
HN[11,0,:,:] = Z #HN(12,:,1,:)=Z;
Ws[2*N-1] = HN
self.Ws = Ws
self.Lp = np.ones(1, dtype=dtype)
self.Rp =
|
np.ones(1, dtype=dtype)
|
numpy.ones
|
from __future__ import print_function
import numpy as np
import os
import threading
from navrep.tools.rings import generate_rings
from navrep.models.rnn import reset_graph, sample_hps_params, MDNRNN, get_pi_idx, MAX_GOAL_DIST
from navrep.models.vae2d import ConvVAE
_Z = 32
_G = 2
class DreamEnv(object):
def __init__(self, temperature=0.25,
initial_z_path=os.path.expanduser(
"~/navrep/datasets/M/ian/000_mus_logvars_robotstates_actions_rewards_dones.npz"
),
rnn_model_path=os.path.expanduser("~/navrep/models/M/rnn.json"),
vae_model_path=os.path.expanduser("~/navrep/models/V/vae.json"),
):
# constants
self.TEMPERATURE = temperature
self.DT = 0.5 # should be the same as data rnn was trained with
# V + M Models
reset_graph()
self.rnn = MDNRNN(sample_hps_params, gpu_mode=False)
self.vae = ConvVAE(batch_size=1, is_training=False)
self.vae.load_json(vae_model_path)
self.rnn.load_json(rnn_model_path)
# load initial image encoding
arrays = np.load(initial_z_path)
initial_mu = arrays["mus"][0]
initial_logvar = arrays["logvars"][0]
initial_robotstate = arrays["robotstates"][0]
ini_lidar_z = initial_mu + np.exp(initial_logvar / 2.0) * np.random.randn(
*(initial_mu.shape)
)
ini_goal_z = initial_robotstate[:2] / MAX_GOAL_DIST
self.initial_z = np.concatenate([ini_lidar_z, ini_goal_z], axis=-1)
# other tools
self.rings_def = generate_rings(64, 64)
self.viewer = None
# environment state variables
self.reset()
# hot-start the rnn state
for i in range(20):
self.step(np.array([0,0,0]), override_next_z=self.initial_z)
def step(self, action, override_next_z=None):
feed = {
self.rnn.input_z: np.reshape(self.prev_z, (1, 1, _Z+_G)),
self.rnn.input_action: np.reshape(action, (1, 1, 3)),
self.rnn.input_restart: np.reshape(self.prev_restart, (1, 1)),
self.rnn.initial_state: self.rnn_state,
}
[logmix, mean, logstd, logrestart, next_state] = self.rnn.sess.run(
[
self.rnn.out_logmix,
self.rnn.out_mean,
self.rnn.out_logstd,
self.rnn.out_restart_logits,
self.rnn.final_state,
],
feed,
)
OUTWIDTH = _Z+_G
if self.TEMPERATURE == 0: # deterministically pick max of MDN distribution
mixture_idx = np.argmax(logmix, axis=-1)
chosen_mean = mean[(range(OUTWIDTH), mixture_idx)]
chosen_logstd = logstd[(range(OUTWIDTH), mixture_idx)]
next_z = chosen_mean
else: # sample from modelled MDN distribution
mixprob = np.copy(logmix) / self.TEMPERATURE # adjust temperatures
mixprob -= mixprob.max()
mixprob = np.exp(mixprob)
mixprob /= mixprob.sum(axis=1).reshape(OUTWIDTH, 1)
mixture_idx = np.zeros(OUTWIDTH)
chosen_mean = np.zeros(OUTWIDTH)
chosen_logstd = np.zeros(OUTWIDTH)
for j in range(OUTWIDTH):
idx = get_pi_idx(np.random.rand(), mixprob[j])
mixture_idx[j] = idx
chosen_mean[j] = mean[j][idx]
chosen_logstd[j] = logstd[j][idx]
rand_gaussian = np.random.randn(OUTWIDTH) * np.sqrt(self.TEMPERATURE)
next_z = chosen_mean + np.exp(chosen_logstd) * rand_gaussian
if sample_hps_params.differential_z:
next_z = self.prev_z + next_z
next_restart = 0
# if logrestart[0] > 0:
# next_restart = 1
self.prev_z = next_z
if override_next_z is not None:
self.prev_z = override_next_z
self.prev_restart = next_restart
self.rnn_state = next_state
# logging-only vars, used for rendering
self.prev_action = action
self.episode_step += 1
return next_z, None, next_restart, {}
def reset(self):
self.prev_z = self.initial_z
self.prev_restart = np.array([1])
self.rnn_state = self.rnn.sess.run(self.rnn.zero_state)
# logging vars
self.prev_action = np.array([0.0, 0.0, 0.0])
self.episode_step = 0
def render(self, mode="human", close=False):
if close:
if self.viewer is not None:
self.viewer.close()
return
# get last z decoding
rings_pred = (
self.vae.decode(self.prev_z.reshape(1, _Z+_G)[:, :_Z])
* self.rings_def["rings_to_bool"]
)
predicted_ranges = self.rings_def["rings_to_lidar"](rings_pred, 1080)
goal_pred = self.prev_z.reshape((_Z+_G,))[_Z:] * MAX_GOAL_DIST
if mode == "rgb_array":
raise NotImplementedError
elif mode == "human":
# Window and viewport size
WINDOW_W = 256
WINDOW_H = 256
M_PER_PX = 25.6 / WINDOW_H
VP_W = WINDOW_W
VP_H = WINDOW_H
from gym.envs.classic_control import rendering
import pyglet
from pyglet import gl
# Create viewer
if self.viewer is None:
self.viewer = rendering.Viewer(WINDOW_W, WINDOW_H)
self.score_label = pyglet.text.Label(
"0000",
font_size=12,
x=20,
y=WINDOW_H * 2.5 / 40.00,
anchor_x="left",
anchor_y="center",
color=(255, 255, 255, 255),
)
# self.transform = rendering.Transform()
self.currently_rendering_iteration = 0
self.image_lock = threading.Lock()
# Render in pyglet
def make_circle(c, r, res=10):
thetas = np.linspace(0, 2 * np.pi, res + 1)[:-1]
verts = np.zeros((res, 2))
verts[:, 0] = c[0] + r * np.cos(thetas)
verts[:, 1] = c[1] + r * np.sin(thetas)
return verts
with self.image_lock:
self.currently_rendering_iteration += 1
self.viewer.draw_circle(r=10, color=(0.3, 0.3, 0.3))
win = self.viewer.window
win.switch_to()
win.dispatch_events()
win.clear()
gl.glViewport(0, 0, VP_W, VP_H)
# colors
bgcolor = np.array([0.4, 0.8, 0.4])
nosecolor =
|
np.array([0.3, 0.3, 0.3])
|
numpy.array
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.