prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
from builtins import object
import numpy as np
from PIL import Image
from struct import pack, unpack_from
V = True
class RLXMessageImage(object):
def __init__(self, image):
self.image = image
class RLXMessage(object):
# types: https://docs.python.org/3.5/library/struct.html#struct.pack_into
TYPE_NONE = 0
TYPE_NULL = 1
TYPE_INT4 = 2
TYPE_STRING_UTF8 = 3
TYPE_DOUBLE = 4
TYPE_BOOLEAN = 5
TYPE_IMAGE = 6
TYPE_NDARRAY = 7
TYPE_LIST = 8
TYPE_UINT4 = 9
TYPE_INT64 = 10
TYPE_DICT = 11
pack_info = {
type(None).__name__: {'id': TYPE_NULL, 'pack_func': lambda cls, *args: cls._pack_null(*args),
'unpack_func': lambda cls, *args: cls._unpack_null(*args)},
int.__name__: {'id': TYPE_INT4, 'pack_func': lambda cls, *args: cls._pack_int(*args),
'unpack_func': lambda cls, *args: cls._unpack_int(*args)},
'int32': {'id': TYPE_INT4, 'pack_func': lambda cls, *args: cls._pack_int(*args),
'unpack_func': lambda cls, *args: cls._unpack_int(*args)},
'int64': {'id': TYPE_INT64, 'pack_func': lambda cls, *args: cls._pack_int64(*args),
'unpack_func': lambda cls, *args: cls._unpack_int64(*args)},
str.__name__: {'id': TYPE_STRING_UTF8, 'pack_func': lambda cls, *args: cls._pack_string(*args),
'unpack_func': lambda cls, *args: cls._unpack_string(*args)},
float.__name__: {'id': TYPE_DOUBLE, 'pack_func': lambda cls, *args: cls._pack_double(*args),
'unpack_func': lambda cls, *args: cls._unpack_double(*args)},
'float64': {'id': TYPE_DOUBLE, 'pack_func': lambda cls, *args: cls._pack_double(*args),
'unpack_func': lambda cls, *args: cls._unpack_double(*args)},
bool.__name__: {'id': TYPE_BOOLEAN, 'pack_func': lambda cls, *args: cls._pack_bool(*args),
'unpack_func': lambda cls, *args: cls._unpack_bool(*args)},
RLXMessageImage.__name__: {'id': TYPE_IMAGE, 'pack_func': lambda cls, *args: cls._pack_image(*args),
'unpack_func': lambda cls, *args: cls._unpack_image(*args)},
np.ndarray.__name__: {'id': TYPE_NDARRAY, 'pack_func': lambda cls, *args: cls._pack_ndarray(*args),
'unpack_func': lambda cls, *args: cls._unpack_ndarray(*args)},
list.__name__: {'id': TYPE_LIST, 'pack_func': lambda cls, *args: cls._pack_list(*args),
'unpack_func': lambda cls, *args: cls._unpack_list(*args)},
dict.__name__: {'id': TYPE_DICT, 'pack_func': lambda cls, *args: cls._pack_dict(*args),
'unpack_func': lambda cls, *args: cls._unpack_dict(*args)}
}
@classmethod
def _pack_type(cls, type_id, buf, pack_type=True):
if pack_type:
buf += pack("B", type_id)
@classmethod
def _unpack_type(cls, buf, offset):
return unpack_from("B", buf, offset)[0], offset+1
@classmethod
def _pack_null(cls, value, buf, pack_type=True):
cls._pack_type(cls.TYPE_NULL, buf, pack_type)
@classmethod
def _unpack_null(cls, buf, offset):
return None, offset
@classmethod
def _pack_string(cls, value, buf, pack_type=True):
cls._pack_type(cls.TYPE_STRING_UTF8, buf, pack_type)
bval = bytearray(str(value).encode('UTF-8'))
buf += pack("I", len(bval))
buf += bval
@classmethod
def _unpack_string(cls, buf, offset):
reslen = unpack_from("I", buf, offset)[0]
offset += 4
res = str(buf[offset:offset+reslen].decode('UTF-8'))
offset += reslen
return res, offset
@classmethod
def _pack_int(cls, value, buf, pack_type=True):
cls._pack_type(cls.TYPE_INT4, buf, pack_type)
buf += pack("i", value)
@classmethod
def _unpack_int(cls, buf, offset):
res = unpack_from("i", buf, offset)[0]
offset += 4
return res, offset
@classmethod
def _pack_int64(cls, value, buf, pack_type=True):
cls._pack_type(cls.TYPE_INT64, buf, pack_type)
buf += pack("q", value)
@classmethod
def _unpack_int64(cls, buf, offset):
res = unpack_from("q", buf, offset)[0]
offset += 8
return res, offset
@classmethod
def _pack_double(cls, value, buf, pack_type=True):
cls._pack_type(cls.TYPE_DOUBLE, buf, pack_type)
buf += pack("d", value)
@classmethod
def _unpack_double(cls, buf, offset):
res = unpack_from("d", buf, offset)[0]
offset += 8
return res, offset
@classmethod
def _pack_bool(cls, value, buf, pack_type=True):
cls._pack_type(cls.TYPE_BOOLEAN, buf, pack_type)
buf += pack("B", 1 if value else 0)
@classmethod
def _unpack_bool(cls, buf, offset):
res = unpack_from("B", buf, offset)[0]
res = res == 1
offset += 1
return res, offset
@classmethod
def _pack_image(cls, value, buf, pack_type=True):
cls._pack_type(cls.TYPE_IMAGE, buf, pack_type)
cls._pack_string(value.image.mode, buf, False)
buf += pack("I", value.image.size[0])
buf += pack("I", value.image.size[1])
bval = value.image.tobytes()
buf += pack("I", len(bval))
buf += bval
@classmethod
def _unpack_image(cls, buf, offset):
mode, offset = cls._unpack_string(buf, offset)
x = unpack_from("I", buf, offset)[0]
offset += 4
y = unpack_from("I", buf, offset)[0]
offset += 4
reslen = unpack_from("I", buf, offset)[0]
offset += 4
img = Image.frombytes(mode, (x, y), bytes(buf[offset:offset+reslen])) # .convert("RGB")
res = np.asarray(img)
if img.mode in ["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV"]:
res = res.astype(np.float32) * (1.0 / 255.0)
# print(res.shape)
# res = np.reshape(res, (x, y, 1))
offset += reslen
return res, offset
@classmethod
def _pack_ndarray(cls, value, buf, pack_type=True):
cls._pack_type(cls.TYPE_NDARRAY, buf, pack_type)
cls._pack_string(str(value.dtype), buf, False)
buf += pack("I", len(value.shape))
for ns in value.shape:
buf += pack("I", ns)
bval = value.tobytes()
buf += pack("I", len(bval))
buf += bval
@classmethod
def _unpack_ndarray(cls, buf, offset):
dtype, offset = cls._unpack_string(buf, offset)
shape_len = unpack_from("I", buf, offset)[0]
offset += 4
shape = []
for i in range(0, shape_len):
item = unpack_from("I", buf, offset)[0]
offset += 4
shape.append(item)
reslen = unpack_from("I", buf, offset)[0]
offset += 4
res = np.frombuffer(buf[offset:offset+reslen], dtype=
|
np.dtype(dtype)
|
numpy.dtype
|
import numpy as np
import random
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, SpatialDropout1D
from keras.layers import Activation, Flatten, Input, Masking
from keras.layers import Convolution1D, MaxPooling1D
from keras.utils import np_utils
from keras.layers.normalization import BatchNormalization
from keras import backend as K
from sklearn.metrics import auc, roc_auc_score, roc_curve, precision_recall_curve
from keras.layers.recurrent import LSTM, GRU
from keras.layers.wrappers import Bidirectional
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras import regularizers
from keras import losses
from keras import optimizers
from keras.models import Model
from crf import CRF
from layer.PyramidPooling import PyramidPooling
from keras.utils import multi_gpu_model
from utils import generate_label, make_batch
import time
from sys import argv
script, GOterm = argv
def run_crf(score_map, co_exp_net, testing_size, theta):
positive_unary_energy = 1 - score_map
crf = CRF(testing_size, positive_unary_energy, co_exp_net, theta)
pos_prob_crf = crf.inference(10)
return pos_prob_crf
#-------------------------------------------------------------------------------
#Loading sequence data
def load_sequence_data():
X_test_seq = np.load('../data/sequences/human_sequence_test_demo.npy')
X_test_dm = np.load('../data/domains/human_domain_test_demo.npy')
X_test_geneid = np.load('../data/id_lists/gene_list_test_demo.npy')
X_test_isoid = np.load('../data/id_lists/isoform_list_test_demo.npy')
return X_test_seq, X_test_dm, X_test_geneid, X_test_isoid
#-------------------------------------------------------------------------------
#Loading positive and negtive set
def pos_gene_set(StudiedGOterm):
positive_set = []
fr = open('../data/annotations/human_annotations.txt', 'r')
while True:
line = fr.readline()
if not line:
break
line = line.split('\n')[0]
gene = line.split('\t')[0]
GO = line.split('\t')[1:]
if StudiedGOterm in GO:
positive_set.append(gene)
fr.close()
return positive_set
#-------------------------------------------------------------------------------
###### main function ######
StudiedGOterm = GOterm
print('Testing model for ' + StudiedGOterm)
positive_Gene = pos_gene_set(StudiedGOterm)
co_exp_net = np.load('../data/co-expression_net/coexp_net_unified_demo.npy')
X_test_seq, X_test_dm, X_test_geneid, X_test_isoid = load_sequence_data()
K_testing_size = X_test_seq.shape[0]
seq_dim = X_test_seq.shape[1]
dm_dim = X_test_dm.shape[1]
print('Generating initial label...')
y_test = generate_label(X_test_geneid, positive_Gene)
## Model architecture
seq_input = Input(shape=(None, ), dtype='int32', name='seq_input')
x1 = Embedding(input_dim = 8001, output_dim = 32)(seq_input)
x1 = Convolution1D(filters = 64, kernel_size = 32, strides = 1, padding = 'valid', activation = 'relu')(x1)
x1 = PyramidPooling([1, 2, 4, 8])(x1)
x1 = Dense(32, kernel_regularizer=regularizers.l2(0.15))(x1)
x1 = Activation('relu')(x1)
x1 = Dropout(0.5)(x1)
x1 = Dense(16, kernel_regularizer=regularizers.l2(0.15))(x1)
seq_output = Activation('relu')(x1)
domain_input = Input(shape=(dm_dim, ), dtype='int32', name='domain_input')
x2 = Embedding(input_dim = 16155, output_dim = 32, input_length = dm_dim, mask_zero = True)(domain_input)
domain_output = LSTM(16)(x2)
x = keras.layers.concatenate([seq_output, domain_output])
x = Dense(16, kernel_regularizer=regularizers.l2(0.15))(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x)
x = Dense(1, kernel_regularizer=regularizers.l2(0.15))(x)
output = Activation('sigmoid')(x)
model = Model(inputs = [seq_input, domain_input], output = output)
model.summary()
adam = optimizers.Adam(lr = 0.002, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08, decay=0.01)
model.compile(loss= losses.binary_crossentropy, optimizer= adam, metrics=['accuracy'])
## Load model
model.load_weights('../saved_models/'+ StudiedGOterm + '_DNN.h5')
theta = np.load('../saved_models/'+ StudiedGOterm +'_CRF_weights.npy')
## Testing
tup_idx, tup_gp = make_batch(X_test_seq)
y_pred =
|
np.array([])
|
numpy.array
|
########################################################################
# Copyright 2021, UChicago Argonne, LLC
#
# Licensed under the BSD-3 License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a
# copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
########################################################################
"""
date: 2021-11-02
author: <NAME>
This module defines the Subchannel class to map coolant and structural
subchannels in and between hexagonal fuel assemblies.
"""
# Still need to implement SC-PIN adjacency (N_sc x 3)
########################################################################
import numpy as np
# _sqrt3over2 = 0.866025403784439
_invSqrt3over2 = 1.154700538379252
_sqrt3over3 = 0.5773502691896257
_sqrt3 = 1.7320508075688772
class Subchannel(object):
"""Map subchannels and neighbors in hexagonal fuel assemblies.
Define the location and adjacency of the subchannels in the
assembly. This is done by constructing a array mapping the
the relative position of subchannels in the assembly. Like for
the fuel pin labels, the subchannels are defined in concentric
rings around the assembly. The array informs connections between
subchannels and (1) other subchannels, and (2) adjacent fuel
pins, as well as the X-Y position of the subchannel centroids.
This object also defines the types of subchannels.
Parameters
----------
n_ring : int
Number of pin rings (incl. center pin) in the assembly
pin_pitch : float
Pin center-to-center pitch distance
pin_diameter : float
Diameter of pin outer clad
pin_map : numpy.ndarray
Mapping of pin IDs into array (from PinLattice)
pin_xy : numpy.ndarray
XY coordinates of pins in the assembly (from PinLattice)
duct_ftf : list
List of tuples containing inner and outer duct
flat-to-flat distances
test: bool (optional)
If testing, do not run all the instantiation methods; instead,
allow the object to be instantiated without calling them so
they can be called incrementally and independently
Attributes
----------
n_sc : dict
Number of subchannels of each type in different assembly regions
type : numpy.ndarray
Type of each subchannel in the assembly
sc_adj : numpy.ndarray
Subchannel neighbors for each subchannel in the assembly
pin_adj : numpy.ndarray
Subchannel neighbors for each pin in the assembly
sc_xy : numpy.ndarray
Array (N_sc x 2) containing the X-Y coordinates of each
subchannel in the assembly
"""
_edge_angle = [np.pi / 3, 0.0, 5 * np.pi / 3, 4 * np.pi / 3,
np.pi, 2 * np.pi / 3]
_corner_angle = [np.pi / 6, 11 * np.pi / 6, 3 * np.pi / 2,
7 * np.pi / 6, 5 * np.pi / 6, np.pi / 2]
def __init__(self, n_ring, pin_pitch, pin_diameter,
pin_map, pin_xy, duct_ftf, test=False):
"""Instantiate a subchannelSetup object."""
# Count the different types of subchannels
self.n_sc = {}
self.n_sc['coolant'] = {}
self.n_sc['coolant']['interior'] = 6 * (n_ring - 1)**2
self.n_sc['coolant']['edge'] = (n_ring - 1) * 6
self.n_sc['coolant']['corner'] = 6
self.n_sc['coolant']['total'] = 6 * (n_ring**2
- n_ring + 1)
# Assemblies with multiple ducts have the same number of SC
# in each duct/bypass
self.n_sc['duct'] = {}
self.n_sc['duct']['edge'] = (n_ring - 1) * 6
self.n_sc['duct']['corner'] = 6
self.n_sc['duct']['total'] = 6 * n_ring
# Bypass channels - if present, same number as duct channels
self.n_sc['bypass'] = {}
if len(duct_ftf) > 1:
self.n_sc['bypass']['edge'] = self.n_sc['duct']['edge']
self.n_sc['bypass']['corner'] = 6
self.n_sc['bypass']['total'] = self.n_sc['duct']['total']
else:
self.n_sc['bypass']['edge'] = 0
self.n_sc['bypass']['corner'] = 0
self.n_sc['bypass']['total'] = 0
# Get the total number of subchannels
self.n_sc['total'] = (self.n_sc['coolant']['total']
+ (2 * len(duct_ftf) - 1)
* self.n_sc['duct']['total'])
# --------------------------------------------------------------
if test:
return
# --------------------------------------------------------------
# Subchannel types:
self.type = self.setup_sc_type(n_ring, duct_ftf)
# Subchannel map
self._int_map = self._make_interior_sc_map(n_ring)
self._ext_map = self._make_exterior_sc_map(n_ring)
self._map = np.add(self._int_map, self._ext_map)
# Subchannel-subchannel adjacency
# See method(s) below for more details and docstrings
self.sc_adj = self.find_sc_sc_neighbors(n_ring, duct_ftf)
# Subchannel-pin adjacency
# (N_pin x 6); subchannels adjacent to each pin
# See method(s) below for more details and docstrings
self.pin_adj = self.find_pin_sc_neighbors(n_ring, pin_map)
# Pin-subchannel adjacency
# (N_coolant_sc x 6); pins adjacent to each coolant subchannel
self.rev_pin_adj = self.reverse_pin_neighbors()
# Subchannel X-Y position
self.xy = self.find_sc_xy(n_ring, pin_pitch, pin_diameter,
pin_xy, duct_ftf)
# UPDATE ARRAYS TO PYTHON INDEXING (Type 1 --> Type 0)
self.type -= 1
self.sc_adj -= 1
self.pin_adj -= 1
####################################################################
# SUBCHANNEL TYPE SETUP
####################################################################
def setup_sc_type(self, n_ring, duct_ftf):
"""Set up the subchannel types.
Parameters
----------
n_ring : int
Number of pin rings (incl. center pin) in the assembly
duct_ftf : list
List of tuples containing the inner/outer flat-to-flat
distances for each duct in the assembly
Notes
-----
Coolant: (1) Interior (2) Edge (3) Corner
Duct: (4) Edge (5) Corner
Bypass: (6) Edge (7) Corner
(Inter-asm gap not attributed to any assembly)
"""
sc_type = np.ones(self.n_sc['coolant']['interior'], dtype="int")
# Append coolant SC types 2 and 3 for each side
ext = np.ones(int(n_ring - 1), dtype="int") * 2
ext = np.append(ext, np.array([3], dtype="int"))
for side in range(0, 6): # loop over hex sides
sc_type = np.append(sc_type, ext)
# Append duct wall SC types 4,5 for each side; same number of
# SC as for edge/corner coolant channels with same arrangement
duct = ext + 2
bypass = duct + 2
for i in range(0, len(duct_ftf)):
if i > 0: # bypass only occurs within outer ducts i > 0
for side in range(0, 6):
sc_type = np.append(sc_type, bypass)
for side in range(0, 6): # loop over hex sides
sc_type = np.append(sc_type, duct)
assert len(sc_type) == self.n_sc['total']
return sc_type
####################################################################
# SUBCHANNEL MAP
####################################################################
@classmethod
def _make_interior_sc_map(cls, n_ring):
"""Generate a map of the interior subchannels.
Parameters
----------
n_ring : int
Number of pin rings (incl. center pin) in the assembly
Returns
-------
numpy.ndarray
Map of the locations of the interior subchannels
Notes
-----
Walk a "ring" around an array labeling the interior-type
subchannels that occupy the row-column positions. The shape
of the rings is designed to show the subchannel-pin adjacency
Example
-------
For an assembly with 3 pin rings, there are two rings of
interior subchannels (one between the first and second rings
and another between the second and third rings). The final
result for this system is obtained by filling in entries
for each subchannel ring:
_step 1 _step 2 Final result
(around center pin) (between ring 2-3) Int. subchannel map
| 00 00 00 00 | | 00 24 07 00 | | 00 24 07 00 |
| 00 00 00 00 | | 22 23 08 09 | | 22 23 08 09 |
| 00 06 01 00 | | 21 00 00 10 | | 21 06 01 10 |
| 00 05 02 00 | + | 20 00 00 11 | = | 20 05 02 11 |
| 00 04 03 00 | | 19 00 00 12 | | 19 04 03 12 |
| 00 00 00 00 | | 18 17 14 13 | | 18 17 14 13 |
| 00 00 00 00 | | 00 16 15 00 | | 00 16 15 00 |
The map below distorts the array to illustrate the ring
arrangement of subchannels around pins from the example.
The pin locations are at the "x" marks; pick any "x" and
you can see the subchannels that surround it. The 00 entries
are shown to lie outside of the interior subchannel domain.
X
| 00 X 24 07 X 00 |
|X 22 23 X 08 09 X|
| 21 X 06 01 X 10 |
|X 20 05 X 02 11 X|
| 19 X 04 03 X 12 |
|X 18 17 X 14 13 X|
| 00 X 16 15 X 00 |
X
"""
map = np.zeros((4 * n_ring - 1, 2 * n_ring), dtype="int")
for ring in range(2, n_ring + 1):
# Starting position
loc = ((n_ring - ring) * 2 + 2, n_ring)
sc_id = np.amax(map) + 1
map[loc] = sc_id
# Begin walking through the ring
loc, map, sc_id = cls._step(loc, 'down', map, sc_id)
for i in range(0, ring - 2): # Walk down the weird side
loc, map, sc_id = cls._step(loc, 'right', map, sc_id)
loc, map, sc_id = cls._step(loc, 'down', map, sc_id)
for i in range(0, ring - 2):
loc, map, sc_id = cls._step(loc, 'down', map, sc_id)
loc, map, sc_id = cls._step(loc, 'down', map, sc_id)
for i in range(0, ring - 2):
loc, map, sc_id = cls._step(loc, 'down', map, sc_id)
loc, map, sc_id = cls._step(loc, 'left', map, sc_id)
loc, map, sc_id = cls._step(loc, 'down', map, sc_id)
loc, map, sc_id = cls._step(loc, 'left', map, sc_id)
loc, map, sc_id = cls._step(loc, 'up', map, sc_id)
for i in range(0, ring - 2): # Walk up the weird other side
loc, map, sc_id = cls._step(loc, 'left', map, sc_id)
loc, map, sc_id = cls._step(loc, 'up', map, sc_id)
for i in range(0, ring - 2):
loc, map, sc_id = cls._step(loc, 'up', map, sc_id)
loc, map, sc_id = cls._step(loc, 'up', map, sc_id)
for i in range(0, ring - 2):
loc, map, sc_id = cls._step(loc, 'up', map, sc_id)
loc, map, sc_id = cls._step(loc, 'right', map, sc_id)
# Take the last _step up
loc, map, sc_id = cls._step(loc, 'up', map, sc_id)
return map
def _make_exterior_sc_map(self, n_ring):
r"""Generate map of the exterior (edge and corner) subchannels.
Parameters
----------
n_ring : int
Number of pin rings (incl. center pin) in the assembly
Walk a "ring" around an array labeling the exterior-type
subchannels that occupy the row-column positions. The exterior
subchannels are the edge and corner subchannels. The shape
of the rings is designed to show the subchannel-pin adjacency.
Because each pin on the outer ring only touches 5 subchannels,
this method is different from the one the labels the interior
subchannels in that one of the six possible subchannel
positions is skipped at each of the six hexagon faces.
Returns
-------
numpy.ndarray
Array mapping the locations of the edge, corner subchannels
Notes
-----
If the positions around pin i are labeled clockwise:
\ 6 | 1 /
\ | /
5 > < 2
/ | \
/ 4 | 3 \
Then depending on which assembly hexagonal face pin i is
located, one of the positions is skipped:
skip pos. 6 skip pos. 1
(incl. 90 deg corner) /\ (incl. 30 deg. corner)
/ \
skip pos. 5 | | skip pos. 2
(incl. 150 deg corner) | | (incl. 330 deg. corner)
\ /
skip pos. 4 \/ skip pos. 3
(incl. 210 deg corner) (incl. 270 deg. corner)
Example
-------
For an assembly with 3 pin rings, subchannels 25-42 are
exterior subchannels. The array that holds the pin IDs
for this system is:
| 00 00 00 42 00 00 |
| 00 00 41 25 00 00 |
| 39 40 00 00 26 00 |
| 00 00 00 00 00 27 |
| 38 00 00 00 00 28 |
| 00 00 00 00 00 00 |
| 37 00 00 00 00 29 |
| 36 00 00 00 00 00 |
| 00 35 00 00 31 30 |
| 00 00 34 32 00 00 |
| 00 00 33 00 00 00 |
"""
map = np.zeros((4 * n_ring - 1, 2 * n_ring), dtype="int")
sc_id = self.n_sc['coolant']['interior']
loc = (1, n_ring - 1) # starting location
for i in range(0, n_ring): # NE side: skip "sector 1"
loc, map, sc_id = self._step(loc, 'right', map, sc_id)
loc = self._move(loc, 'down') # need an extra step down
loc = self._move(loc, 'up') # undo extra move from last iter
for i in range(0, n_ring): # E side: skip "sector 2"
loc, map, sc_id = self._step(loc, 'down', map, sc_id)
loc = self._move(loc, 'down') # need an extra step down
loc = self._move(loc, 'up') # undo extra move from last iter
for i in range(0, n_ring): # SE side: skip "sector 3"
loc, map, sc_id = self._step(loc, 'left', map, sc_id)
loc = self._move(loc, 'down') # need an extra step right
loc = self._move(loc, 'up') # undo extra move from last iter
for i in range(0, n_ring): # SW side: skip "sector 4"
loc, map, sc_id = self._step(loc, 'up', map, sc_id)
loc = self._move(loc, 'left') # need an extra step
loc = self._move(loc, 'right') # undo extra move from last iter
for i in range(0, n_ring): # W side: skip "sector 5"
loc, map, sc_id = self._step(loc, 'up', map, sc_id)
loc = self._move(loc, 'up') # need an extra step
loc = self._move(loc, 'down') # undo extra move from last iter
for i in range(0, n_ring): # NW side: skip "sector 6"
loc, map, sc_id = self._step(loc, 'right', map, sc_id)
loc = self._move(loc, 'up') # need an extra step right
return map
@classmethod
def _step(cls, loc, dir, map, sc_id):
"""Take a _step into an adjacent position and fill it
Parameters
----------
loc : tuple
The row,column entries of the current position
dir : str
The direction ("left", "right", "up", or "down") to _move
map : numpy array
The array map being updated
sc_id : int
The current subchannel ID number
Returns
-------
tuple
Updated position
numpy.ndarray
Updated array of subchannel map
int
Updated subchannel ID
"""
new_loc = cls._move(loc, dir)
new_scid = sc_id + 1
map[new_loc] = new_scid
return new_loc, map, new_scid
@staticmethod
def _move(loc, dir):
"""_move to an adjacent array entry
Parameters
----------
loc : tuple
(row, col) of the current location in the array
dir : str
Direction ('left, right, up, down') in which to _step
Returns
-------
tuple
(row, col) after taking _step
"""
if dir == 'left':
loc = (loc[0], loc[1] - 1)
elif dir == 'right':
loc = (loc[0], loc[1] + 1)
elif dir == 'up':
loc = (loc[0] - 1, loc[1])
elif dir == 'down':
loc = (loc[0] + 1, loc[1])
else:
msg = 'Direction must be one of: [left, right, up, down]'
raise ValueError(msg)
return loc
####################################################################
# SUBCHANNEL-SUBCHANNEL NEIGHBORS
# For each subchannel, determine what subchannels are next to it
####################################################################
def find_sc_sc_neighbors(self, n_ring, duct_ftf):
"""Define the connections between neighboring subchannels.
Parameters
----------
n_ring : int
Number of pin rings (incl. center pin) in the assembly
duct_ftf : list
List of tuples containing duct inner/outer flat-to-flat
distances for each duct in the assembly
Notes
-----
The columns of the neighbors array imply the type of
subchannel to which the connection is being established:
Columns 1 - 3: Connection with interior subchannels
Three columns are reserved for connections of a subchannel
and an adjacent interior subchannel - for an interior
subchannel in contact with three other interior subchannels,
all three of these columns are filled. Edge subchannels
connect with one interior subchannel. Corner subchannels do
not touch any interior subchannels.
Columns 4 - 5: Connection with edge subchannels
Edge subchannels always touch two other edge subchannels,
which may be corners; corner subchannels always touch two
edge subchannels. Interior subchannels along the assembly
periphery will be in contact with one edge subchannels.
Columns 5 - 6: Connection with duct subchannels
Columns 7 - 8: Connection with bypass subchannels (optional)
Columns 9 - 10: Connection with outer duct (optional)
...
"""
# Array has 5 columns for interior coolant subchannels
# First duct ring adds 2 cols; every subsequent duct ring
# adds 4 more (2 for the duct, 2 for the bypass flow).
ncol = 5 + 4 * len(duct_ftf) - 2 # 5 cols for coolant subchannels
sc_sc = np.zeros((self.n_sc['total'], ncol), dtype="int")
sc_sc = self._connect_int_sc(sc_sc)
# print(sc_sc)
sc_sc = self._connect_int_ext_sc(sc_sc)
# print(sc_sc)
sc_sc = self._connect_ext_sc(sc_sc)
# print(sc_sc)
sc_sc = self._connect_duct_bypass_sc(sc_sc, n_ring, duct_ftf)
return sc_sc
def _connect_int_sc(self, sc_adj):
"""Define the connections between interior subchannels
Parameters
----------
sc_adj : numpy.ndarray
Array defining subchannel neighbors
Returns
-------
numpy.ndarray
Updated subchannel neighbor definitions
"""
for col in range(1, self._int_map.shape[1] - 1):
# Vertical connections: at least one, at most two
temp = self._int_map[:, col]
temp = temp[temp != 0]
if temp.size == 0:
continue
else:
for i in range(1, len(temp) - 1):
sc_adj[temp[i] - 1, 0] = temp[i - 1]
sc_adj[temp[i] - 1, 1] = temp[i + 1]
# first and last entries from "temp" are separate
sc_adj[temp[0] - 1, 1] = temp[1]
sc_adj[temp[-1] - 1, 0] = temp[-2]
# Horizontal connections (at most 1); we are mapping the
# connections between the current column ("j") and column
# "j+1". The first row with a connection is the one for
# which both column "j" and column "j+1" are nonzero.
try:
r1 = min(np.intersect1d((self._int_map[:, col]
.nonzero()[0]),
(self._int_map[:, col + 1]
.nonzero()[0])))
except ValueError: # trying to compare with zero columns
continue
else: # make connection every other row
for row in range(r1, self._int_map.shape[0], 2):
sc_i = self._int_map[row, col]
sc_ip1 = self._int_map[row, col + 1]
if sc_i == 0 or sc_ip1 == 0:
continue
else:
sc_adj[sc_i - 1, 2] = sc_ip1
sc_adj[sc_ip1 - 1, 2] = sc_i
return sc_adj
def _connect_int_ext_sc(self, sc_adj):
"""Define connections between interior and exterior subchannels.
Connections between interior and exterior (edge, corner)
subchannels are handled individually - this method figures
out which interior subchannels are missing an interior
connection; the position of the missing connection informs
the direction in which to look in the assembly subchannel
map for the adjacent edge subchannel.
Parameters
----------
sc_adj : numpy.ndarray
Array defining subchannel neighbors
Returns
-------
numpy.ndarray
Updated subchannel neighbor definitions
"""
# Total number of missing interior-exterior subchannel
# connections; connect only via interior-edge subchannels
n_missing_total = self.n_sc['coolant']['edge']
idx_missing = 0
for i in range(0, self.n_sc['coolant']['interior']):
# Find subchannel with missing value in map
row, col = np.where(self._map == i + 1)
# Looking for missing connections in the first three cols
missing = np.where(sc_adj[i][:3] == 0)
if len(missing[0]) > 0: # some missing, count that
idx_missing += 1
for idx in range(0, len(missing[0])):
if missing[0][idx] == 0: # look upward for value
sc_adj[i, idx + 3] = self._map[row - 1, col]
sc_adj[self._map[row - 1, col] - 1, idx] = i + 1
elif missing[0][idx] == 1: # look down
sc_adj[i, idx + 3] = self._map[row + 1, col]
sc_adj[self._map[row + 1, col] - 1, idx] = i + 1
else: # look horizontallyf
# Because we're going clockwise, if we're on the
# first half of subchannels w/ missing connections
# we're on the RIGHT side of the asm
if idx_missing < n_missing_total / 2:
sc_adj[i, idx + 3] = self._map[row, col + 1]
sc_adj[self._map[row, col + 1] - 1, idx] = i + 1
else: # SC on the left side of asm (higher numbers)
sc_adj[i, idx + 3] = self._map[row, col - 1]
sc_adj[self._map[row, col - 1] - 1, idx] = i + 1
return sc_adj
def _connect_ext_sc(self, sc_adj):
"""Connect edge and corner subchannels to each other.
Parameters
----------
sc_adj : numpy.ndarray
Array defining subchannel neighbors
Returns
-------
numpy.ndarray
Updated subchannel neighbor definitions
Notes
-----
Because they are arranged in a ring, each exterior
subchannel touches the subchannels immediately in front and
behind it
"""
ext_sc = np.arange(self.n_sc['coolant']['interior'] + 1,
self.n_sc['coolant']['total'] + 1, 1)
for i in range(-1, len(ext_sc) - 1):
# backward connection: i+1 back to i
sc_adj[ext_sc[i + 1] - 1, 3] = ext_sc[i]
# forward connection: i forward to i+1
sc_adj[ext_sc[i] - 1, 4] = ext_sc[i + 1]
return sc_adj
def _connect_duct_bypass_sc(self, sc_adj, n_ring, duct_ftf):
"""Connect successive duct and bypass rings.
Parameters
----------
sc_adj : numpy.ndarray
Array defining subchannel neighbors
n_ring : int
Number of pin rings (incl. center pin) in the assembly
duct_ftf : list
List of tuple containing the inner/outer duct flat-to-flat
distances for each duct in the assembly
Returns
-------
numpy.ndarray
Updated subchannel neighbor definitions
Notes
-----
The duct and bypass subchannels are organized in rings; each
ring has the same number of subchannels. This method connects
each ring to the previous.
"""
sc = (np.arange(1, self.n_sc['duct']['total'] + 1, 1)
+ self.n_sc['coolant']['total'])
for r in range(1, 2 * len(duct_ftf)): # r:ring
# duct ring 1: cols 5-6; bypass ring 1: cols 7-8, ...
# relationship between ring and cols: 2r+3, 2r+4
for i in range(-1, len(sc) - 1): # i is current sc
# backward connection: i+1 back to i (ring r)
sc_adj[sc[i + 1] - 1, 2 * r + 3] = sc[i]
# forward connection: i forward to i+1 (ring r)
sc_adj[sc[i] - 1, 2 * r + 4] = sc[i + 1]
# inward connection: i+1 (ring r) to i+1 (ring r-1)
sc_adj[sc[i + 1] - 1, 2 * r + 2] = \
sc[i + 1] - 6 * n_ring
# outward connection i+1 (ring r-1) up to i+1 (ring r)
sc_adj[sc[i] - 1 - 6 * n_ring, 2 * r + 3] = sc[i]
sc = sc + 6 * n_ring
return sc_adj
####################################################################
# PIN-SUBCHANNEL ADJACENCY
# Determine the subchannels that neighbor each pin
####################################################################
def find_pin_sc_neighbors(self, n_ring, pin_map):
"""Determine the subchannels that neighbor each pin.
When this method is called, the subchannel position map has
already been created and is linked to the subchannel map
for the pins by "stamping" a 3x2 cutout of the map centered
where each pin should be.
Parameters
----------
n_ring : int
Number of pin rings (incl. center pin) in the assembly
pin_map : numpy.ndarray
Map of pin layout in the assembly
Returns
-------
numpy.ndarray
Array (n_pin x 6) indicating the subchannels that neighbor
each pin.
"""
pin_sc_adj = np.zeros((len(pin_map[pin_map != 0]), 6),
dtype="int")
for row in range(0, len(pin_map)):
loc = (row, n_ring - row) # start loc
for p in pin_map[row]:
if p != 0:
pin_sc_adj[p - 1, 0] = self._map[loc]
pin_sc_adj[p - 1, 1] = self._map[loc[0] + 1, loc[1]]
pin_sc_adj[p - 1, 2] = self._map[loc[0] + 2, loc[1]]
pin_sc_adj[p - 1, 3] = self._map[loc[0] + 2,
loc[1] - 1]
pin_sc_adj[p - 1, 4] = self._map[loc[0] + 1,
loc[1] - 1]
pin_sc_adj[p - 1, 5] = self._map[loc[0], loc[1] - 1]
loc = (loc[0] + 1, loc[1] + 1) # update location
return pin_sc_adj
def reverse_pin_neighbors(self):
"""Determine pins that are adjacent to each subchannel
Parameters
----------
None
Returns
-------
numpy.ndarray
Array (N_coolant_sc x 3) of adjacent pin indices for each
subchannel; where less than 3 pins are adjacent, the array
is filled with -1 which are later filtered
Notes
-----
This is the "inverse" of the "pin_adj" attribute and is useful
for calculating the power assigned to each subchannel
"""
sc_nghbrs = [[] for i in range(self.n_sc['coolant']['total'])]
for pin in range(len(self.pin_adj)):
for sc in self.pin_adj[pin]:
if sc > 0:
sc_nghbrs[sc - 1].append(pin)
for sc in sc_nghbrs:
while len(sc) < 3:
sc.append(-1)
return np.array(sc_nghbrs)
####################################################################
# SUBCHANNEL XY
# Determine the XY positions of the centroid of each subchannel
####################################################################
def find_sc_xy(self, n_ring, pin_pitch, pin_diameter,
pin_xy, duct_ftf):
"""Determine X-Y coordinates of the subchannels.
Parameters
----------
n_ring : int
Number of pin rings (incl. center pin) in the assembly
pin_pitch : float
Pin center-to-center pitch distance
pin_diameter : float
Diameter of pin outer clad
pin_xy : numpy.ndarray
XY coordinates of pins in the assembly (from PinLattice)
duct_ftf : list
List of tuples containing the inner and outer flat-to-
flat distances of the duct walls, ordered from inner
to outer ducts.
Returns
-------
numpy.ndarray
Array (N_sc x 2) of subchannel X-Y coordinates
Notes
-----
This method uses the subchannel-pin adjacency array and the
X-Y coordinates of the pins to define the X-Y coordinate of
each subchannel based on its location relative to each fuel
pin, with the center pin located at (0, 0).
"""
# Preallocate sc_xy array. The coolant subchannel coords are
# entered into this array; the duct and bypass subchannel
# coords are appended to it.
sc_xy =
|
np.zeros((self.n_sc['coolant']['total'], 2))
|
numpy.zeros
|
# DISCLAIMER: Code from the ddsp_timbre_transfer colab notebook demo
# Copyright 2020 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.filterwarnings("ignore")
import copy
import os
import time
import crepe
import ddsp
import ddsp.training
from ddsp.colab import colab_utils
from ddsp.colab.colab_utils import (
auto_tune, detect_notes, fit_quantile_transform,
get_tuning_factor, download, play, record,
specplot, upload, DEFAULT_SAMPLE_RATE)
import gin
import librosa
import matplotlib.pyplot as plt
import numpy as np
import pickle
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from scipy.io import wavfile
from pydub import AudioSegment
import tempfile
def write_to_file(audio_file, model_dir, output, sample_rate = DEFAULT_SAMPLE_RATE):
audio_float = audio_file_to_np(audio_file)
cat_audio_float = tranfer(audio_float, model_dir, sample_rate=sample_rate)
if len(cat_audio_float.shape) == 2:
cat_audio_float = cat_audio_float[0]
normalizer = float(np.iinfo(np.int16).max)
cat_audio_int = np.array(
np.asarray(cat_audio_float) * normalizer, dtype=np.int16)
wavfile.write(output, sample_rate, cat_audio_int)
def audio_file_to_np(audio_file, sample_rate=DEFAULT_SAMPLE_RATE, normalize_db=0.1):
audio = AudioSegment.from_file(audio_file)
audio.remove_dc_offset()
if normalize_db is not None:
audio.normalize(headroom=normalize_db)
# Save to tempfile and load with librosa.
with tempfile.NamedTemporaryFile(suffix='.wav') as temp_wav_file:
fname = temp_wav_file.name
audio.export(fname, format='wav')
audio_np, unused_sr = librosa.load(fname, sr=sample_rate)
return audio_np.astype(np.float32)
def tranfer(audio, model_dir, sample_rate = DEFAULT_SAMPLE_RATE):
audio = audio[np.newaxis, :]
ddsp.spectral_ops.reset_crepe()
audio_features = ddsp.training.metrics.compute_audio_features(audio)
audio_features['loudness_db'] = audio_features['loudness_db'].astype(np.float32)
audio_features_mod = None
gin_file = os.path.join(model_dir, 'operative_config-0.gin')
# Load the dataset statistics.
DATASET_STATS = None
dataset_stats_file = os.path.join(model_dir, 'dataset_statistics.pkl')
try:
if tf.io.gfile.exists(dataset_stats_file):
with tf.io.gfile.GFile(dataset_stats_file, 'rb') as f:
DATASET_STATS = pickle.load(f)
except Exception as err:
print('Loading dataset statistics from pickle failed: {}.'.format(err))
# Parse gin config,
with gin.unlock_config():
gin.parse_config_file(gin_file, skip_unknown=True)
# Assumes only one checkpoint in the folder, 'ckpt-[iter]`.
ckpt_files = [f for f in tf.io.gfile.listdir(model_dir) if 'ckpt' in f]
ckpt_name = ckpt_files[0].split('.')[0]
ckpt = os.path.join(model_dir, ckpt_name)
# Ensure dimensions and sampling rates are equal
time_steps_train = gin.query_parameter('DefaultPreprocessor.time_steps')
n_samples_train = gin.query_parameter('Additive.n_samples')
hop_size = int(n_samples_train / time_steps_train)
time_steps = int(audio.shape[1] / hop_size)
n_samples = time_steps * hop_size
gin_params = [
'Additive.n_samples = {}'.format(n_samples),
'FilteredNoise.n_samples = {}'.format(n_samples),
'DefaultPreprocessor.time_steps = {}'.format(time_steps),
'oscillator_bank.use_angular_cumsum = True', # Avoids cumsum accumulation errors.
]
with gin.unlock_config():
gin.parse_config(gin_params)
# Trim all input vectors to correct lengths
for key in ['f0_hz', 'f0_confidence', 'loudness_db']:
audio_features[key] = audio_features[key][:time_steps]
audio_features['audio'] = audio_features['audio'][:, :n_samples]
# Set up the model just to predict audio given new conditioning
model = ddsp.training.models.Autoencoder()
model.restore(ckpt)
# Build model by running a batch through it.
start_time = time.time()
_ = model(audio_features, training=False)
threshold = 1
ADJUST = True
quiet = 20
autotune = 0
pitch_shift = -1
loudness_shift = 3
audio_features_mod = {k: v.copy() for k, v in audio_features.items()}
## Helper functions.
def shift_ld(audio_features, ld_shift=0.0):
"""Shift loudness by a number of ocatves."""
audio_features['loudness_db'] += ld_shift
return audio_features
def shift_f0(audio_features, pitch_shift=0.0):
"""Shift f0 by a number of ocatves."""
audio_features['f0_hz'] *= 2.0 ** (pitch_shift)
audio_features['f0_hz'] = np.clip(audio_features['f0_hz'],
0.0,
librosa.midi_to_hz(110.0))
return audio_features
mask_on = None
if ADJUST and DATASET_STATS is not None:
# Detect sections that are "on".
mask_on, note_on_value = detect_notes(audio_features['loudness_db'],
audio_features['f0_confidence'],
threshold)
if np.any(mask_on):
# Shift the pitch register.
target_mean_pitch = DATASET_STATS['mean_pitch']
pitch = ddsp.core.hz_to_midi(audio_features['f0_hz'])
mean_pitch = np.mean(pitch[mask_on])
p_diff = target_mean_pitch - mean_pitch
p_diff_octave = p_diff / 12.0
round_fn = np.floor if p_diff_octave > 1.5 else np.ceil
p_diff_octave = round_fn(p_diff_octave)
audio_features_mod = shift_f0(audio_features_mod, p_diff_octave)
# Quantile shift the note_on parts.
_, loudness_norm = colab_utils.fit_quantile_transform(
audio_features['loudness_db'],
mask_on,
inv_quantile=DATASET_STATS['quantile_transform'])
# Turn down the note_off parts.
mask_off =
|
np.logical_not(mask_on)
|
numpy.logical_not
|
from __future__ import print_function, division
# pytorch imports
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import torchvision
from torchvision import datasets, models, transforms
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
# helper imports
from model_helper import load_checkpoint
# image imports
from skimage import io, transform
from PIL import Image
# general imports
import re
import os, glob
import time
import datetime
import pdb
from shutil import copyfile
from shutil import rmtree
from pathlib import Path
# data science imports
import pandas as pd
import numpy as np
import csv
from sklearn.metrics import accuracy_score
import statistics
import cxr_dataset as CXR
import eval_model as E
use_gpu = torch.cuda.is_available()
gpu_count = torch.cuda.device_count()
print("Available GPU count:" + str(gpu_count))
def create_checkpoint(model, best_loss, epoch, PRETRAINED, FREEZE, TIME, MODEL, OPTIM, LR, STEP, TRAIN_LOSS, TRAIN_ACC, VAL_LOSS, VAL_ACC):
"""
Saves checkpoint of torchvision model during training.
Args:
model: torchvision model to be saved
best_loss: best val loss achieved so far in training
epoch: current epoch of training
PRETRAINED: if use pretrained model
FREEZE: if freeze base layer
TIME: time training started (UNIX)
MODEL: model name used
OPTIM: optimizer name
LR: learning rate value
STEP: how much steps LR dropped
TRAIN_LOSS: list of train loss
TRAIN_ACC: list of train accuracy
VAL_LOSS: list of val loss
VAL_ACC: list of val accuracy
NOTES: list of notes
Returns:
None
"""
print('saving')
state = {
'model': model,
'best_loss': best_loss,
'epoch': epoch,
'rng_state': torch.get_rng_state(),
'LR': LR
}
for filename in glob.glob(f'results/{TIME}*'):
os.remove(filename)
path_name = f'results/{TIME}#{MODEL}_P-{PRETRAINED}_F-{FREEZE}_{OPTIM}_LR({LR})_every-{STEP}-step_VLOSS-{VAL_LOSS[-1]}_VACC-{VAL_ACC[-1]}'
torch.save(model.state_dict(), path_name)
return path_name
def create_csv(DEBUG, PRETRAINED, FREEZE, TIME, MODEL, OPTIM, LR, STEP, TRAIN_LOSS, TRAIN_ACC, VAL_LOSS, VAL_ACC, NOTES, CHECKPOINT):
"""
Create training results in csv
Args:
DEBUG: didn't create csv if debugging mode
PRETRAINED: if use pretrained model
FREEZE: if freeze base layer
TIME: time training started
MODEL: model name used
OPTIM: optimizer name
LR: learning rate value
STEP: how much steps LR dropped
TRAIN_LOSS: list of train loss
TRAIN_ACC: list of train accuracy
VAL_LOSS: list of val loss
VAL_ACC: list of val accuracy
NOTES: list of notes
CHECKPOINT: checkpoint name
"""
if not DEBUG:
df = pd.DataFrame({
'train_loss': TRAIN_LOSS,
'train_acc': TRAIN_ACC,
'val_loss': VAL_LOSS,
'val_acc': VAL_ACC,
'notes': NOTES
})
df.to_csv(f'results_csv/{TIME}#{MODEL}_CP-{CHECKPOINT}_P-{PRETRAINED}_F-{FREEZE}_{OPTIM}_LR({LR})_every-{STEP}-steps.csv')
def get_distillate_output(distillate_results, distillate_index, outputs_row):
return torch.from_numpy(distillate_results[distillate_index][:outputs_row, :]).float().cuda()
def train_model(
model,
criterion,
optimizer,
optim_name,
LR,
num_epochs,
dataloaders,
dataset_sizes,
weight_decay,
scheduler,
debug_mode,
pretrained,
freeze,
checkpoint,
distillate_time):
"""
Fine tunes torchvision model to NIH CXR data.
Args:
model: torchvision model to be finetuned
criterion: loss criterion
optimizer: optimizer to use in training
optim_name: optimizer name used to decay and drop
LR: learning rate
num_epochs: continue training up to this many epochs
dataloaders: pytorch train and val dataloaders
dataset_sizes: length of train and val datasets
weight_decay: weight decay parameter
scheduler: set learning rate to drop after x steps
debug_mode: if true then no log and checkpoint will be created
pretrained: for logging name only
freeze: for logging name only
checkpoint: for logging name only
distillate_time: distillate_time file
Returns:
model: trained torchvision model
best_epoch: epoch on which best model val loss was obtained
"""
checkpoint_path = ''
since = time.time()
start_epoch = 1
best_loss = 999999
best_epoch = -1
last_train_loss = -1
csv_time = datetime.datetime.now().isoformat()
csv_model = model.name
csv_optim = optim_name
csv_lr = LR
csv_step = scheduler.step_size
if not os.path.exists('results_csv'):
os.makedirs('results_csv')
# List used for csv later
list_train_loss = []
list_train_acc = []
list_val_loss = []
list_val_acc = []
list_notes = []
create_csv(debug_mode, pretrained, freeze, csv_time, csv_model, csv_optim, csv_lr, csv_step,
list_train_loss, list_train_acc, list_val_loss, list_val_acc,
list_notes, checkpoint)
# iterate over epochs
for epoch in range(start_epoch, num_epochs + 1):
print(f'Epoch {epoch}/{num_epochs}')
# training and val status
training_print = 0
val_print = 0
# loading bar
loading_bar = ''
dataloaders_length = len(dataloaders['train']) + len(dataloaders['val'])
for i in range(dataloaders_length):
loading_bar += '-'
distillate_index = 0
if distillate_time != '':
distillate_results = np.load(f'results_distillation/d-{distillate_time}.npy')
# set model to train or eval mode based on whether we are in train or
# val; necessary to get correct predictions given batchnorm
for phase in ['train', 'val']:
if phase == 'train':
model.train(True)
else:
model.train(False)
running_loss = 0.0
total_done = 0
# reset output_acc
output_acc = 0
num_of_steps = 0
# iterate over all data in train/val dataloader:
for data in dataloaders[phase]:
inputs, labels = data
batch_size = inputs.shape[0]
# loading bar progress
loading_bar = f'={loading_bar}'
loading_bar = loading_bar[:dataloaders_length]
print(f'Steps: {loading_bar}', end='\r')
# ===========================================================
# train datasets
if phase == "train":
for i in range(10):
optimizer.zero_grad()
inp = inputs.clone()[:, i]
inp = inp.cuda()
labels = labels.cuda()
num_of_steps += 1
outputs = model(inp)
if(isinstance(outputs, list)):
current_loss = []
# get output pred and get accuracy
for output_item in outputs:
if distillate_time != '':
current_loss.append(criterion(outputs, get_distillate_output(distillate_results, distillate_index, outputs.shape[0])))
else:
current_loss.append(criterion(output_item, labels))
combined_pred = torch.max(outputs[-1], dim=1)[1]
output_acc += accuracy_score(labels.cpu().data.numpy(), combined_pred.cpu().data.numpy())
loss = sum(current_loss[0:-1]) / len(current_loss[0:-1]) + (current_loss[-1] / 2)
else:
# get output pred and get accuracy
if distillate_time != '':
loss = criterion(outputs, get_distillate_output(distillate_results, distillate_index, outputs.shape[0]))
else:
loss = criterion(outputs, labels)
outputs_pred = torch.max(outputs, dim=1)[1]
output_acc += accuracy_score(labels.cpu().data.numpy(), outputs_pred.cpu().data.numpy())
loss.backward()
optimizer.step()
running_loss += loss.item() * batch_size
# ===========================================================
# val datasets
else:
optimizer.zero_grad()
inputs = inputs.cuda()
labels = labels.cuda()
outputs = model(inputs)
if(isinstance(outputs, list)):
current_loss = []
# get output pred and get accuracy
for output_item in outputs:
if distillate_time != '':
current_loss.append(criterion(outputs, get_distillate_output(distillate_results, distillate_index, outputs.shape[0])))
else:
current_loss.append(criterion(output_item, labels))
combined_pred = torch.max(outputs[-1], dim=1)[1]
output_acc += accuracy_score(labels.cpu().data.numpy(), combined_pred.cpu().data.numpy())
loss = sum(current_loss[0:-1]) / len(current_loss[0:-1]) + (current_loss[-1] / 2)
else:
if distillate_time != '':
loss = criterion(outputs, get_distillate_output(distillate_results, distillate_index, outputs.shape[0]))
else:
loss = criterion(outputs, labels)
# get output pred and get accuracy
outputs_pred = torch.max(outputs, dim=1)[1]
output_acc += accuracy_score(labels.cpu().data.numpy(), outputs_pred.cpu().data.numpy())
running_loss += loss.item() * batch_size
distillate_index += 1
# ===========================================================
if phase == 'train':
output_acc = output_acc / len(dataloaders[phase]) / 10
epoch_loss = running_loss / dataset_sizes[phase] / 10
last_train_loss = epoch_loss
training_print = f'{phase} epoch {epoch}: acc {output_acc:.2f} loss {epoch_loss:.4f} with data size {dataset_sizes[phase]}'
list_train_loss.append(f'{epoch_loss:.4f}')
list_train_acc.append(f'{output_acc:.2f}')
else:
output_acc = output_acc / len(dataloaders[phase])
epoch_loss = running_loss / dataset_sizes[phase]
val_print = f'{phase} epoch {epoch}: acc {output_acc:.2f} loss {epoch_loss:.4f} with data size {dataset_sizes[phase]}'
list_val_loss.append(f'{epoch_loss:.4f}')
list_val_acc.append(f'{output_acc:.2f}')
print('')
# checkpoint model if has best val loss yet
if phase == 'val' and epoch_loss < best_loss and not debug_mode:
best_loss = epoch_loss
best_epoch = epoch
checkpoint_path = create_checkpoint(model, best_loss, epoch, pretrained, freeze, since, csv_model, csv_optim, csv_lr, csv_step, list_train_loss, list_train_acc, list_val_loss, list_val_acc)
# log training and validation loss over each epoch
if phase == 'val':
with open("results/log_train", 'a') as logfile:
logwriter = csv.writer(logfile, delimiter=',')
if(epoch == 1):
logwriter.writerow(["epoch", "train_loss", "val_loss"])
logwriter.writerow([epoch, last_train_loss, epoch_loss])
# decay learning rate every x steps
scheduler.step()
if epoch % scheduler.step_size == 0:
lr_new = '{0:.20f}'.format(optimizer.param_groups[0]['lr']).rstrip('0')
optim_print = f'created new {optim_name} optimizer with LR: {lr_new}'
print(optim_print)
list_notes.append(optim_print)
else:
list_notes.append('')
# update csv report
create_csv(debug_mode, pretrained, freeze, csv_time, csv_model, csv_optim, csv_lr, csv_step,
list_train_loss, list_train_acc, list_val_loss, list_val_acc,
list_notes, checkpoint)
print(training_print)
print(val_print)
data = {'train_loss': list_train_loss, 'train_acc': list_train_acc}
total_done += batch_size
if(total_done % (100 * batch_size) == 0):
print("completed " + str(total_done) + " so far in epoch")
# break if no val loss improvement in 3 epochs
# if ((epoch - best_epoch) >= 3):
# print("no improvement in 3 epochs, break")
# break
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
# load best model weights to return
# checkpoint_best = torch.load(checkpoint_path)
# model = checkpoint_best['model']
return model, best_epoch
def train_cnn(MODEL_NAME, PRETRAINED, FREEZE, EPOCHS, BATCH_SIZE, N_LABELS, OPTIMIZERS, PATH_TO_IMAGES, LR, WEIGHT_DECAY, LR_DECAY_STEPS, DEBUG_MODE, CHECKPOINT_PATH = '', DISTILLATE_WITH = ''):
"""
Train torchvision model to NIH data given high level hyperparameters.
Args:
MODEL_NAME: model name
PRETRAINED: if model pretrained
FREEZE: model layer frozen or not
EPOCHS: epochs iteration
BATCH_SIZE: number of batch data per training
N_LABELS: number of class labels
OPTIMIZERS: optimizers used
PATH_TO_IMAGES: path to NIH images
LR: learning rate
WEIGHT_DECAY: weight decay parameter for SGD
LR_DECAY_STEPS: how many steps before LR decayed and dropped
DEBUG_MODE: if true then no log will be created
CHECKPOINT_PATH: load checkpoint path
DISTILLATE_WITH: distillate the model with
Returns:
# preds: torchvision model predictions on test fold with ground truth for comparison
# aucs: AUCs for each train,test tuple
"""
if not os.path.exists('results'):
os.makedirs('results')
# use imagenet mean,std for normalization
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
normalize = transforms.Normalize(mean=mean, std=std)
# define torchvision transforms
data_transforms = {
'train': transforms.Compose([
transforms.Resize(256),
transforms.TenCrop(224),
transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
}
# create train/val dataloaders
transformed_datasets = {x: datasets.ImageFolder(os.path.join(PATH_TO_IMAGES, x), data_transforms[x]) for x in ['train', 'val', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(transformed_datasets[x], batch_size=BATCH_SIZE, shuffle=True, num_workers=0) for x in ['train', 'val']}
# please do not attempt to train without GPU as will take excessively long
if not use_gpu:
raise ValueError("Error, requires GPU")
# Check model used
import modified_densenet
import modified_alexnet
model = (models.densenet121(pretrained=PRETRAINED) if MODEL_NAME == 'densenet'
else modified_densenet.densenet121(type=MODEL_NAME, pretrained=PRETRAINED)
if MODEL_NAME == 'va-densenet'
or MODEL_NAME == 'reva-densenet'
or MODEL_NAME == 'fp-densenet'
or MODEL_NAME == 'start-densenet'
or MODEL_NAME == 'every-densenet'
or MODEL_NAME == 'sedensenet'
or MODEL_NAME == 'triplelossdensenet'
else models.alexnet(pretrained=PRETRAINED) if MODEL_NAME == 'alexnet'
else modified_alexnet.alexnet(type=MODEL_NAME, pretrained=PRETRAINED)
if MODEL_NAME == 'va-alexnet'
or MODEL_NAME == 'reva-alexnet'
or MODEL_NAME == 'fp-alexnet'
or MODEL_NAME == 'start-alexnet'
else models.resnet152(pretrained=PRETRAINED)if MODEL_NAME == 'resnet'
else models.vgg16(pretrained=PRETRAINED)if MODEL_NAME == 'VGG'
else models.vgg16_bn(pretrained=PRETRAINED)if MODEL_NAME == 'VGG_Bn'
else '')
# get num_ftrs based on model name
num_ftrs = (model.classifier.in_features
if MODEL_NAME == 'densenet'
or MODEL_NAME == 'va-densenet'
or MODEL_NAME == 'reva-densenet'
or MODEL_NAME == 'fp-densenet'
or MODEL_NAME == 'start-densenet'
or MODEL_NAME == 'every-densenet'
or MODEL_NAME == 'sedensenet'
or MODEL_NAME == 'triplelossdensenet'
else model.classifier[6].in_features
if MODEL_NAME == 'alexnet'
or MODEL_NAME == 'va-alexnet'
or MODEL_NAME == 'reva-alexnet'
or MODEL_NAME == 'fp-alexnet'
or MODEL_NAME == 'start-alexnet'
or MODEL_NAME == 'VGG'
or MODEL_NAME == 'VGG_Bn'
else model.fc.in_features
if MODEL_NAME == 'resnet'
else model.fc3.in_features
if MODEL_NAME == 'small_va'
else '')
# change classifier class to N_LABELS
if (MODEL_NAME == 'densenet' or MODEL_NAME == 'va-densenet' or MODEL_NAME == 'reva-densenet' or MODEL_NAME == 'fp-densenet' or MODEL_NAME == 'start-densenet' or MODEL_NAME == 'every-densenet' or MODEL_NAME == 'sedensenet' or MODEL_NAME == 'triplelossdensenet'):
model.classifier = nn.Linear(num_ftrs, N_LABELS)
elif (MODEL_NAME == 'alexnet' or MODEL_NAME == 'va-alexnet' or MODEL_NAME == 'va-alexnet' or MODEL_NAME == 'reva-alexnet' or MODEL_NAME == 'fp-alexnet' or MODEL_NAME == 'start-alexnet' or MODEL_NAME == 'VGG' or MODEL_NAME == 'VGG_Bn'):
model.classifier[6] = nn.Linear(num_ftrs, N_LABELS)
elif (MODEL_NAME == 'resnet'):
model.fc = nn.Linear(num_ftrs, N_LABELS)
else:
raise ValueError("Error model name")
if CHECKPOINT_PATH != '':
model = load_checkpoint(model, CHECKPOINT_PATH)
# show params to learn
if FREEZE:
for name, param in model.named_parameters():
attention_pattern = re.compile(r'^(conv2d1x1|valinear|transconv|start|every|se_).+$')
classifier_pattern = re.compile(r'^(classifier(?!\.\d)|classifier\.6|fc).+$')
if attention_pattern.match(name):
param.requires_grad = True
elif classifier_pattern.match(name) and CHECKPOINT_PATH == '':
param.requires_grad = True
else:
param.requires_grad = False
if FREEZE:
print('Params to learn:')
for name, param in model.named_parameters():
if param.requires_grad == True:
print(name)
print('==================================')
# Distillate
distillate_time = ''
if DISTILLATE_WITH != '':
print(f'Distillate with {DISTILLATE_WITH}')
distillate_time = datetime.datetime.now().isoformat()
model_distillate = models.densenet121(pretrained=PRETRAINED)
num_ftrs_distillate = model_distillate.classifier.in_features
model_distillate.classifier = nn.Linear(num_ftrs_distillate, N_LABELS)
model_distillate = load_checkpoint(model_distillate, DISTILLATE_WITH)
print('Loaded checkpoint for distillation')
model_distillate = model_distillate.cuda()
loading_bar = ''
dataloaders_length = len(dataloaders['train']) + len(dataloaders['val'])
for i in range(dataloaders_length):
loading_bar += '-'
for phase in ['train', 'val']:
for data in dataloaders[phase]:
loading_bar = f'={loading_bar}'
loading_bar = loading_bar[:dataloaders_length]
print(f'Distillating: {loading_bar}', end='\r')
inputs, labels = data
if phase == 'train':
for i in range(10):
inp = inputs.clone()[:, i]
inp = inp.cuda()
labels = labels.cuda()
outputs = model_distillate(inp).cpu().data.numpy()
if len(outputs) != BATCH_SIZE:
outputs_padding =
|
np.zeros((BATCH_SIZE, N_LABELS))
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
last mod 5/14/19
"""
import numpy as np
import numba as nb
from time import time
@nb.njit(nb.i8(nb.b1[:,:,:,:], nb.i8[:], nb.i8, nb.i8, nb.i8[:]))
def orderIdxsBySplit(X, idxs, start, end, split):
"""
partition array[start:end] so that split_directions==True are on left
"""
j = start
for i in xrange(start, end):
xi = idxs[i]
if np.any(X[xi,split[0]:split[3],split[1]:split[4],split[2]:split[5]]):
idxs[i] = idxs[j]
idxs[j] = xi
j += 1
return j
@nb.njit(nb.f8(nb.f8,nb.f8,nb.f8,nb.f8))
def calcScore(gradin,hessin,gradsum,hesssum):
gradout = gradsum - gradin
hessout = hesssum - hessin
return gradin*gradin/max(hessin, 1e-10) + gradout*gradout/max(hessout, 1e-10)
#def makeIntegral(X2):
# X,Y,Z,N = X2.shape
# for x,y,n in np.ndindex(X,Y,N):
# count = 0
# for z in range(Z):
# count += X2[x,y,z,n]
# X2[x,y,z,n] = count
# for x,z,n in np.ndindex(X,Z,N):
# count = 0
# for y in range(Y):
# count += X2[x,y,z,n]
# X2[x,y,z,n] = count
# for y,z,n in np.ndindex(Y,Z,N):
# count = 0
# for x in range(X):
# count += X2[x,y,z,n]
# X2[x,y,z,n] = count
#@nb.njit(nb.void(nb.i2[:,:,:,:], nb.i2[:], nb.f8[:], nb.f8[:], nb.i8[:],
# nb.f8[:], nb.i8[:,:]))
#def findSplitIntegral(X, storage, grad, hess, leafidxs, scores, splits):
# xfull, yfull, zfull, n = X.shape
# gradsum = np.sum(grad[leafidxs])
# hesssum = np.sum(hess[leafidxs])
# K = scores.shape[0]
# worstbestscore = scores[0]
# for xmin, ymin, zmin in np.ndindex(xfull, yfull, zfull):
# xmaxcount = xfull - xmin
# ymaxcount = yfull - ymin
# zmaxcount = zfull - zmin
# for xmax, ymax, zmax in np.ndindex(xmaxcount, ymaxcount, zmaxcount):
# xmax += xmin+1
# ymax += ymin+1
# zmax += zmin+1
# gradin = 0.
# hessin = 0.
# storage[:] = (X[xmax,ymax,zmax] - X[xmax,ymax,zmin] -
# X[xmax,ymin,zmax] - X[xmin,ymax,zmax] +
# X[xmax,ymin,zmin] + X[xmin,ymax,zmin] +
# X[xmin,ymin,zmax] - X[xmin,ymin,zmin])
# gradin = np.sum(grad[storage>0])
# hessin = np.sum(hess[storage>0])
# if hessin > 0 and hessin < hesssum:
# gradout = gradsum - gradin
# hessout = hesssum - hessin
# score = gradin*gradin/hessin + gradout*gradout/hessout
# if score > worstbestscore:
# lastk = 0
# for k in range(1,K):
# if score < scores[k]:
# break
# scores[lastk] = scores[k]
# splits[lastk] = splits[k]
# lastk = k
# scores[lastk] = score
# splits[lastk] = (xmin,ymin,zmin,xmax,ymax,zmax)
# worstbestscore = scores[0]
@nb.njit(nb.void(nb.b1[:,:,:,:], nb.b1[:,:,:], nb.f8[:], nb.f8[:],
nb.i8[:], nb.f8[:], nb.i8[:,:]))
def findSplitInitial(X, storage, grad, hess, leafidxs, scores, splits):
"""
X = [LxWxHxn] binary feature grid
grad, hess = [n] float target values for GBM
tries all possible integer 3d boxes in LxWxH
"""
xfull, yfull, zfull, n = X.shape
# variables to perform efficient calculations of the GB split cost
gradsum = np.sum(grad[leafidxs])
hesssum = np.sum(hess[leafidxs])
# worstbestscore = gradsum*gradsum/hesssum
K = scores.shape[0]
worstbestscore = scores[0]
for xmin, ymin, zmin in np.ndindex(xfull, yfull, zfull):
xmaxcount = xfull - xmin
ymaxcount = yfull - ymin
zmaxcount = zfull - zmin
storage[ymin:yfull+1,zmin:zfull+1, leafidxs] = False
for xmax, ymax, zmax in np.ndindex(xmaxcount, ymaxcount, zmaxcount):
xmax += xmin+1
ymax += ymin+1
zmax += zmin+1
gradin = 0.
hessin = 0.
for xidx in leafidxs:
here = (storage[ymax,zmax,xidx] |
storage[ymax-1,zmax,xidx] |
storage[ymax,zmax-1,xidx] |
X[xmax-1,ymax-1,zmax-1,xidx])
# current storage[ymax,zmax] has info on :xmax-1,:ymax,:zmax
# will never use this again, can replace with :xmax,:ymax,:zmax
storage[ymax,zmax,xidx] = here
if here:
gradin += grad[xidx]
hessin += hess[xidx]
score = calcScore(gradin, hessin, gradsum, hesssum)
if score > worstbestscore:
# heapless O(k) update of k-best scores
lastk = 0
for k in xrange(1,K):
if score < scores[k]:
break
scores[lastk] = scores[k]
splits[lastk] = splits[k]
lastk = k
scores[lastk] = score
splits[lastk] = (xmin,ymin,zmin,xmax,ymax,zmax)
worstbestscore = scores[0]
@nb.njit(nb.void(nb.b1[:,:,:,:], nb.f8[:], nb.f8[:],
nb.i8[:], nb.i8[:,:], nb.f8[:], nb.i8[:,:]))
def findSplitPartial2(X, grad, hess, leafidxs, boxes, scores, splits):
"""
X = [nxLxWxH] binary feature grid
grad, hess = [n] float target values for GBM
tries various intervals
"""
n, xfull, yfull, zfull = X.shape
maxxmin, maxymin, maxzmin, minxmax, minymax, minzmax = boxes[0]
minxmin, minymin, minzmin, maxxmax, maxymax, maxzmax = boxes[1]
leaf_in = 0
prevgradin = 0.
prevhessin = 0.
for leafidx in xrange(leafidxs.shape[0]):
xidx = leafidxs[leafidx]
if np.any(X[xidx, maxxmin:minxmax, maxymin:minymax, maxzmin:minzmax]):
leafidxs[leafidx] = leafidxs[leaf_in]
leafidxs[leaf_in] = xidx
leaf_in += 1
prevgradin += grad[xidx]
prevhessin += hess[xidx]
leaf_out = leafidxs.shape[0]
for leafidx in xrange(leafidxs.shape[0]-1, leaf_in-1, -1):
xidx = leafidxs[leafidx]
if not np.any(X[xidx, minxmin:maxxmax, minymin:maxymax, minzmin:maxzmax]):
leaf_out -= 1
leafidxs[leafidx] = leafidxs[leaf_out]
leafidxs[leaf_out] = xidx
gradsum = np.sum(grad[leafidxs])
hesssum = np.sum(hess[leafidxs])
K = scores.shape[0]
worstbestscore = scores[0]
thissplit = np.zeros(6, dtype=np.int64)
xmin, ymin, zmin, xmax, ymax, zmax = boxes[0]
bestchangescore = calcScore(prevgradin, prevhessin, gradsum, hesssum)
for addition in xrange(25):
bestchange = 0
if xmin > minxmin:
gradin = prevgradin
hessin = prevhessin
for xidx in leafidxs[leaf_in:leaf_out]:
if np.any(X[xidx, xmin-1, ymin:ymax, zmin:zmax]):
gradin += grad[xidx]
hessin += hess[xidx]
changescore = calcScore(gradin, hessin, gradsum, hesssum)
if changescore > bestchangescore:
bestchange = 1
bestchangescore = changescore
if ymin > minymin:
gradin = prevgradin
hessin = prevhessin
for xidx in leafidxs[leaf_in:leaf_out]:
if np.any(X[xidx, xmin:xmax, ymin-1, zmin:zmax]):
gradin += grad[xidx]
hessin += hess[xidx]
changescore = calcScore(gradin, hessin, gradsum, hesssum)
if changescore > bestchangescore:
bestchange = 2
bestchangescore = changescore
if zmin > minzmin:
gradin = prevgradin
hessin = prevhessin
for xidx in leafidxs[leaf_in:leaf_out]:
if np.any(X[xidx, xmin:xmax, ymin:ymax, zmin-1]):
gradin += grad[xidx]
hessin += hess[xidx]
changescore = calcScore(gradin, hessin, gradsum, hesssum)
if changescore > bestchangescore:
bestchange = 3
bestchangescore = changescore
if xmax < maxxmax:
gradin = prevgradin
hessin = prevhessin
for xidx in leafidxs[leaf_in:leaf_out]:
if np.any(X[xidx, xmax, ymin:ymax, zmin:zmax]):
gradin += grad[xidx]
hessin += hess[xidx]
changescore = calcScore(gradin, hessin, gradsum, hesssum)
if changescore > bestchangescore:
bestchange = 4
bestchangescore = changescore
if ymax < maxymax:
gradin = prevgradin
hessin = prevhessin
for xidx in leafidxs[leaf_in:leaf_out]:
if
|
np.any(X[xidx, xmin:xmax, ymax, zmin:zmax])
|
numpy.any
|
# Experiment that generates several sets of networks of varying CH-divergence types
# then trains an msbm of a single type in a "consensus" type of way. Then we report the
# average rand_index and average entropy of the z variables, which are indicators of how well
# the algorithm is learning the true model.
import os, sys
import pickle
import pdb
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
def main():
stats_url = os.path.join('stats', 'stats_' + 'detectability.pickle')
print("generating plots from: {}".format(stats_url))
statistics = pickle.load(open(stats_url, 'rb'), encoding='latin1')
#box plot CH-div vs Rand Index
#We create a list with the 8 boxes
data1 = np.array(statistics['ari_Z'])
fil = np.array([(chd>0.55)&(chd<0.65) for chd in statistics['CH_div']])
fil2 = np.array([n == 250 for n in statistics['N']])
data1 = data1[fil&fil2].flatten()
data2 = np.array(statistics['ari_Z'])
fil = np.array([(chd>0.65)&(chd<0.75) for chd in statistics['CH_div']])
fil2 = np.array([n == 250 for n in statistics['N']])
data2 = data2[fil&fil2].flatten()
data3 = np.array(statistics['ari_Z'])
fil = np.array([(chd>0.75)&(chd< 0.85) for chd in statistics['CH_div']])
fil2 = np.array([n == 250 for n in statistics['N']])
data3 = data3[fil&fil2].flatten()
data4 = np.array(statistics['ari_Z'])
fil = np.array([(chd>0.85)&(chd<0.95) for chd in statistics['CH_div']])
fil2 = np.array([n == 250 for n in statistics['N']])
data4 = data4[fil&fil2].flatten()
data5 = np.array(statistics['ari_Z'])
fil = np.array([(chd>0.95)&(chd<1.05) for chd in statistics['CH_div']])
fil2 = np.array([n == 250 for n in statistics['N']])
data5 = data5[fil&fil2].flatten()
data6 = np.array(statistics['ari_Z'])
fil = np.array([(chd>1.05)&(chd<1.15) for chd in statistics['CH_div']])
fil2 = np.array([n == 250 for n in statistics['N']])
data6 = data6[fil&fil2].flatten()
data7 = np.array(statistics['ari_Z'])
fil = np.array([(chd>1.15)&(chd<1.25) for chd in statistics['CH_div']])
fil2 = np.array([n == 250 for n in statistics['N']])
data7 = data7[fil&fil2].flatten()
data8 = np.array(statistics['ari_Z'])
fil = np.array([(chd>1.25)&(chd<1.35) for chd in statistics['CH_div']])
fil2 = np.array([n == 250 for n in statistics['N']])
data8 = data8[fil&fil2].flatten()
data = [data1, data2, data3, data4, data5, data6, data7, data8]
plt.boxplot(data)
plt.xticks(range(1,9),[0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3], fontsize=10)
plt.xlabel('CH Divergence')
plt.ylabel('Adj. Rand Index')
plt.title("CH-div vs Average Rand Index", fontsize= 16)
plot_file = os.path.join('plots', 'plot_' + 'boxplot_ch.svg')
plt.savefig(plot_file, format="svg")
plt.clf()
#we make a boxplot for N vs ari_Z
data1 = np.array(statistics['ari_Z'])
fil = np.array([n == 75 for n in statistics['N']])
fil2= np.array([(chd>1.00)&(chd<1.10) for chd in statistics['CH_div']])
data1 = data1[fil&fil2].flatten()
data2 = np.array(statistics['ari_Z'])
fil =
|
np.array([n == 151 for n in statistics['N']])
|
numpy.array
|
# ------------------------------------------------------------------- #
# This Python code samples the sample space $\chi$ of the normalized #
# maximum likelihood (NML) for the Dirichlet model which is used to #
# plot Figure 2 in the text: #
# Minimum Description Length codes are critical #
# <NAME>; <NAME>; <NAME> #
# ------------------------------------------------------------------- #
# Some basic imports
from __future__ import division
import numpy as np
# Import necessary libraries
from collections import Counter
from mpmath import mp, log, loggamma, exp, fmul, fsum, power
# Configure mpmath to calculate quantities up to 100 decimal places
mp.dps = 100
mp.pretty = True
# Define function to calculate $\hat{H}[k]$ and $\hat{H}[s]$
def calculate_HofKS(mapping_ks):
ks_counts = np.asarray(Counter(mapping_ks).most_common())
positive_values = np.where(ks_counts[:,0]>0)[0]
kq, mq = ks_counts[:,0][positive_values], ks_counts[:,1][positive_values]
assert np.sum(kq*mq)==np.sum(mapping_ks)
M = float(np.sum(kq*mq))
return -np.sum(((kq*mq)/M)*np.log2((kq*mq)/M))/np.log2(M), -np.sum(((kq*mq)/M)*np.log2(kq/M))/np.log2(M)
# Define function needed for the Monte Carlo step which takes care of $k=0$
def dk_term(k):
k = float(k)
if k!=0: return k*log(k) - loggamma(k+1)
else: return - loggamma(k+1)
# Fix the parameters of the Dirichlet model
# $\rho = M/S_size$ where $S_size$ is the size of the state space and $M$ is the number of samples
rho = 10
S_size = 1000
M = rho*S_size
output_name = 'dirichlet_M1e3_S1000'
# Initialize the vector $k_s$ which the frequency of the state $s$ in the sample $\hat{s}$
# Here, we initialize by creating (almost) equally sampled states
k_sample = np.floor(M/S_size)*np.ones(S_size)
remainder = int(M -
|
np.sum(k_sample)
|
numpy.sum
|
from model_utils import get_det2_features
import os
import numpy as np
from utils import add_overlay
from detectron2.data import MetadataCatalog
from detectron2.utils.visualizer import Visualizer
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
import json
import panoramic_camera as camera
from skimage.transform import PiecewiseAffineTransform, warp
from scipy.ndimage import map_coordinates
import sys
import cv2
import matplotlib
usage = '''
Re-run a history of actions of an agent.
Change DATA_PATH and DETECTRON2_YAML with your paths to those files.
argv[1] : json file for the history
argv[2] : 0 or 1 whether to use detectron for demo
argv[3] : degrees between successive FoVs make sure it is equal to the agent's original FoV increment
examples:
PYTHONPATH=.. python simulate_history.py exp-random/samples/1575_randomagent.json 1 15
PYTHONPATH=.. python simulate_history.py exp-random/samples/2100_randomagent.json 0 15
'''
DATA_PATH = '../py_bottom_up_attention/demo/data/genome/1600-400-20'
DETECTRON2_YAML = '../py_bottom_up_attention/configs/VG-Detection/faster_rcnn_R_101_C4_caffe.yaml'
matplotlib.use('Agg')
if __name__ == '__main__':
if len(sys.argv) != 4:
print(usage)
quit(1)
history = json.load(open(sys.argv[1], 'r'))
detectron = int(sys.argv[2])
increment = int(sys.argv[3])
if detectron:
data_path = DATA_PATH
vg_classes = []
with open(os.path.join(data_path, 'objects_vocab.txt')) as f:
for object in f.readlines():
vg_classes.append(object.split(',')[0].lower().strip())
MetadataCatalog.get("vg").thing_classes = vg_classes
yaml_file = DETECTRON2_YAML
cfg = get_cfg()
cfg.merge_from_file(yaml_file
)
cfg.MODEL.RPN.POST_NMS_TOPK_TEST = 300
cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.6
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.6
# VG Weight
cfg.MODEL.WEIGHTS = "http://nlp.cs.unc.edu/models/faster_rcnn_from_caffe.pkl"
predictor = DefaultPredictor(cfg)
refer = ".".join([" ".join(refexp) for refexp in history['refexps']])
n_steps = len(history['lng_diffs'])
image_path = history['img_src']
waldo_position_lng = history['gt_lng']
waldo_position_lat = history['gt_lat']
start_lng = history['start_lat']
start_lat = history['start_lng']
pred_lng = history['pred_lng']
pred_lat = history['pred_lat']
pred_x = history['pred_x']
pred_y = history['pred_y']
print("image_path", image_path)
print(refer)
print("start:", start_lat, start_lng)
print("gt:", waldo_position_lat, waldo_position_lng)
print("pred:", pred_lat, pred_lng)
print("pred xy:", pred_x, pred_y)
print('n_steps:', n_steps)
full_w, full_h = 4552, 2276
canvas = np.zeros((full_h, full_w, 3), dtype='uint8')
fov_canvas = np.ones((full_h, full_w, 3), dtype='uint8')*255
size = 20
x = int(full_w * ((waldo_position_lng + 180)/360.0))
y = int(full_h - full_h *
((waldo_position_lat + 90)/180.0))
print('lat lng:', waldo_position_lat, waldo_position_lng)
print('x y:', x, y)
fov_canvas[y-size:y+size, x-size:x+size, 2] = 255.
fov_canvas[y-size:y+size, x-size:x+size, :2] = 0
original = cv2.imread(image_path, cv2.IMREAD_COLOR)
cam = camera.PanoramicCamera()
cam.load_img(image_path, convert_color=False)
waldo_img = cv2.resize(cv2.imread(
'../data/waldo.png', cv2.IMREAD_COLOR), (60, 40), interpolation=cv2.INTER_AREA)
target_img = cv2.resize(cv2.imread(
'../data/target.png', cv2.IMREAD_COLOR), (60, 60), interpolation=cv2.INTER_AREA)
nn = 0
lng = -180 # start_lng
lat = 75 # start_lat
objects = []
THRESHOLD = 10
font = cv2.FONT_HERSHEY_SIMPLEX
while True:
cam.look(lng, lat)
img = cam.get_image()
lng_map, lat_map = cam.get_pixel_map()
mapping = np.stack((lng_map, lat_map), axis=2)
points = []
poly_transform = np.stack((lng_map.reshape(400*400, 1),
lat_map.reshape(400*400, 1)), axis=1).reshape(400*400, 2)
poly = []
w, h = mapping.shape[0], mapping.shape[1]
debug = []
for ii in range(h):
poly.append([mapping[ii][0][0], mapping[ii][0][1]])
debug.append(poly[0])
debug.append(poly[-1])
for ii in range(w):
poly.append([mapping[w-1][ii][0], mapping[w-1][ii][1]])
debug.append(poly[-1])
for ii in range(h):
poly.append([mapping[h-1-ii][w-1][0], mapping[h-1-ii][w-1][1]])
debug.append(poly[-1])
for ii in range(w):
poly.append([mapping[0][w-1-ii][0], mapping[0][w-1-ii][1]])
debug.append(poly[-1])
points.append(np.array(poly))
color = np.uint8(np.random.rand(3) * 255).tolist()
# orig_lng, orig_lat = lng, lat
# ylats = np.arange(-3, 3, 1)
# xlngs = np.arange(-3, 3, 1)
# for ylat in ylats:
# for xlng in xlngs:
# cam.look(lng + xlng, lat + ylat)
# cover_pixel_map = cam.get_map()
# c_lng_map, c_lat_map = cam.get_pixel_map()
# canvas[c_lat_map, c_lng_map, :] = original[c_lat_map, c_lng_map, :]
# canvas[lat_map, lng_map, :] = original[lat_map, lng_map, :]
#canvas = cv2.blur(canvas, (3, 3))
# tform = PiecewiseAffineTransform()
# tform.estimate(poly_transform, poly_transform)
# out_rows = original.shape[0]
# out_cols = original.shape[1]
# canvas = warp(original, tform, output_shape=(out_rows, out_cols))
inverse_mapx, inverse_mapy = cam.get_inverse_map()
dummy = np.zeros((full_h, full_w), dtype='uint8')
m1 = np.greater(inverse_mapx, dummy)
m2 = np.greater(inverse_mapy, dummy)
m = np.logical_or(m1, m2).astype(np.uint8)*255
mask =
|
np.stack((m,)*3, axis=-1)
|
numpy.stack
|
#!/usr/bin/env python3
import numpy as np
import re
import sys
import os
import subprocess
import logging
import argparse
from scipy.interpolate import UnivariateSpline
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from astropy.time import Time
from scipy.optimize import curve_fit
logger = logging.getLogger(__name__)
#---------------------------------------------------------------
class LittleClipError(Exception):
"""Raise when not enough data is clipped"""
pass
class LargeClipError(Exception):
"""Raise when too much data is clipped"""
pass
class NoComponentsError(Exception):
"""Raise when there are no feasible profile components"""
pass
class ProfileLengthError(Exception):
"""Raise when a profile's legnth is too small to be fit"""
pass
class NoFitError(Exception):
"""Raise when no gaussian fits have been found"""
pass
#---------------------------------------------------------------
def subprocess_pdv(archive, outfile="archive.txt", pdvops="-FTt"):
"""
Runs the pdv commnand from PSRCHIVE as a python subprocess
Parameters
----------
archive: string
The name of the archive file to run the command on
outfile: string
OPTIONAL - The name of the text file to write the output to. Default: outfile
pdvops: string
OPTIONAL - Additional options for the pdv. Default: -FTt
"""
myoutput = open(outfile,'w+')
commands=["pdv"]
commands.append(pdvops)
commands.append(archive)
subprocess.run(commands, stdout=myoutput)
myoutput.close()
#---------------------------------------------------------------
def get_from_bestprof(file_loc):
"""
Get info from a bestprof file
Parameters:
-----------
file_loc: string
The path to the bestprof file
Returns:
--------
[obsid, pulsar, dm, period, period_uncer, obsstart, obslength, profile, bin_num]: list
obsid: int
The observation ID
pulsar: string
The name of the pulsar
dm: float
The dispersion measure of the pulsar
period: float
The period of the pulsar
period_uncer: float
The uncertainty in the period measurement
obsstart: int
The beginning time of the observation
obslength: float
The length of the observation in seconds
profile: list
A list of floats containing the profile data
bin_num: int
The number of bins in the profile
"""
with open(file_loc,"r") as bestprof:
lines = bestprof.readlines()
# Find the obsid by finding a 10 digit int in the file name
obsid = re.findall(r'(\d{10})', lines[0])[0]
try:
obsid = int(obsid)
except ValueError:
obsid = None
pulsar = str(lines[1].split("_")[-1][:-1])
if not (pulsar.startswith('J') or pulsar.startswith('B')):
pulsar = 'J{0}'.format(pulsar)
dm = lines[14][22:-1]
period = lines[15][22:-1]
period, period_uncer = period.split(' +/- ')
mjdstart = Time(float(lines[3][22:-1]), format='mjd', scale='utc')
# Convert to gps time
obsstart = int(mjdstart.gps)
# Get obs length in seconds by multipling samples by time per sample
obslength = float(lines[6][22:-1])*float(lines[5][22:-1])
# Get the pulse profile
orig_profile = []
for l in lines[27:]:
orig_profile.append(float(l.split()[-1]))
bin_num = len(orig_profile)
profile = np.zeros(bin_num)
# Remove min
min_prof = min(orig_profile)
for p, _ in enumerate(orig_profile):
profile[p] = orig_profile[p] - min_prof
return [obsid, pulsar, dm, period, period_uncer, obsstart, obslength, profile, bin_num]
#---------------------------------------------------------------
def get_from_ascii(file_loc):
"""
Retrieves the profile from an ascii file
Parameters:
-----------
file_loc: string
The location of the ascii file
Returns:
--------
[profile, len(profile)]: list
profile: list
A list of floats containing the profile data
len(profile): int
The number of bins in the profile
"""
f = open(file_loc)
lines = iter(f.readlines())
next(lines) #skip first line
f.close()
profile=[]
for line in lines:
thisline=line.split()
profile.append(float(thisline[3]))
return [profile, len(profile)]
#---------------------------------------------------------------
def get_stokes_from_ascii(file_loc):
"""
Retrieves the all stokes components from an ascii file
Parameters:
-----------
file_loc: string
The location of the ascii file
Returns:
--------
[I, Q, U, V, len(profile)]: list
I: list
Stokes I
Q: list
Stokes Q
U: list
Stokes U
V: list
Stokes V
len(profile): int
The number of bins in the profile
"""
f = open(file_loc)
lines = iter(f.readlines())
f.close()
next(lines) #skip first line
I=[]
Q=[]
U=[]
V=[]
for line in lines:
thisline=line.split()
I.append(float(thisline[3]))
Q.append(float(thisline[4]))
U.append(float(thisline[5]))
V.append(float(thisline[6]))
return [I, Q, U, V, len(I)]
#---------------------------------------------------------------
def sigmaClip(data, alpha=3., tol=0.1, ntrials=10):
"""
Sigma clipping operation:
Compute the data's median, m, and its standard deviation, sigma.
Keep only the data that falls in the range (m-alpha*sigma,m+alpha*sigma) for some value of alpha, and discard everything else.
This operation is repeated ntrials number of times or until the tolerance level is hit.
Parameters:
-----------
data: list
A list of floats - the data to clip
alpha: float
OPTIONAL - Determines the number of sigmas to use to determine the upper and lower limits. Default=3
tol: float
OPTIONAL - The fractional change in the standard deviation that determines when the tolerance is hit. Default=0.1
ntrials: int
OPTIONAL - The maximum number of times to apply the operation. Default=10
Returns:
--------
oldstd: float
The std of the clipped data
x: list
The data list that contains only noise, with nans in place of 'real' data
"""
x = np.copy(data)
oldstd = np.nanstd(x)
#When the x[x<lolim] and x[x>hilim] commands encounter a nan it produces a
#warning. This is expected because it is ignoring flagged data from a
#previous trial so the warning is supressed.
old_settings = np.seterr(all='ignore')
for trial in range(ntrials):
median = np.nanmedian(x)
lolim = median - alpha * oldstd
hilim = median + alpha * oldstd
x[x<lolim] = np.nan
x[x>hilim] = np.nan
newstd = np.nanstd(x)
tollvl = (oldstd - newstd) / newstd
if tollvl <= tol:
logger.debug("Took {0} trials to reach tolerance".format(trial+1))
np.seterr(**old_settings)
return oldstd, x
if trial + 1 == ntrials:
logger.info("Reached number of trials without reaching tolerance level")
np.seterr(**old_settings)
return oldstd, x
oldstd = newstd
#---------------------------------------------------------------
def check_clip(clipped_prof, toomuch=0.8, toolittle_frac=0., toolittle_absolute=4):
"""
Determines whether a clipped profile from sigmaClip() has been appropriately clipped by checking the number of nans.
Raises a LittleClipError or a LargeClipError if too little or toomuch of the data has been clipped respectively.
Parameters:
-----------
clipped_prof: list
The clipped profile from sigmaClip()
toomuch: float
OPTIONAL - The fraction of the clipped profile beyond which is considered overclipped. Default: 0.8
toolittle: float
OPTIONAL - The fraction of the clipped profile below which is considered underclipped. Default: 0.
toolittle_absolute: int
OPTIONAL - If a profile has this many or less on-pulse bins, it is deemed not sufficient. Default: 4
"""
num_nans = 0
for i in clipped_prof:
if np.isnan(i):
num_nans += 1
if num_nans <= toolittle_frac*len(clipped_prof) or num_nans <= toolittle_absolute:
raise LittleClipError("Not enough data has been clipped. Condsier trying a smaller alpha value when clipping.")
elif num_nans >= toomuch*len(clipped_prof):
raise LargeClipError("A large portion of the data has been clipped. Condsier trying a larger alpha value when clipping.")
#---------------------------------------------------------------
def fill_clipped_prof(clipped_prof, search_scope=None, nan_type=0.):
"""
Intended for use on noisy profiles. Fills nan values that are surrounded by non-nans to avoid discontinuities in the profile
Parameters:
-----------
clipped_prof: list
The on-pulse profile
profile: list
The original profile
search_scope: int
The number of bins to search for non-nan values. If None, will search 5% of the total bin number. Default:None.
Returns:
--------
clipped_prof: list
The clipped profile with nan gaps filled in
"""
length = len(clipped_prof)
if search_scope is None:
#Search 5% ahead for non-nans
search_scope = round(length*0.05)
search_scope = np.linspace(1, search_scope, search_scope, dtype=int)
#loop over all values in clipped profile
for i, val in enumerate(clipped_prof):
if val==nan_type and not (i+max(search_scope)) >= length:
#look 'search_scope' indices ahead for non-nans
for j in sorted(search_scope, reverse=True):
#fill in nans
if clipped_prof[i+j]==nan_type:
for k in range(j):
clipped_prof[i+k]=nan_type
break
return clipped_prof
#---------------------------------------------------------------
def find_components(profile, min_comp_len=5):
"""
Given a profile in which the noise is clipped to 0, finds the components that are clumped together.
Parameters:
-----------
profile: list
A list of floats describing the profile where the noise has been clipped to zero
min_comp_len: float
OPTIONAL - Minimum length of a component to be considered real. Measured in bins. Default: 5
Returns:
--------
component_dict: dictionary
dict["component_x"] contains an array of the component x
component_idx: dictionary
dict["component_x"] contains an array of indexes of the original profile corresponding to component x
"""
component_dict={}
component_idx={}
num_components=0
for i, val in enumerate(profile):
if val!=0.:
if profile[i-1]==0 or i==0:
num_components+=1
comp_key = "component_{}".format(num_components)
component_dict[comp_key]=[]
component_idx[comp_key]=[]
component_dict[comp_key].append(val)
component_idx[comp_key].append(i)
del_comps = []
for comp_key in component_dict.keys():
if len(component_dict[comp_key]) < min_comp_len or max(component_dict[comp_key]) < 0.:
del_comps.append(comp_key)
for i in del_comps:
del component_dict[i]
del component_idx[i]
if len(component_dict.keys()) == 0:
raise NoComponentsError("No profile components have been found")
return component_dict, component_idx
#---------------------------------------------------------------
def find_minima_maxima(profile, ignore_threshold=0, min_comp_len=0):
"""
Finds all minima and maxima of the input profile. Assumes that the profile has noise zero-clipped.
profile: list
The profile with noise zero-clipped
ignore_threshold: float
OPTIONAL - Maxima with values below this number will be ignored. Default: 0
min_comp_len: float
OPTIONAL - Minimum length of a component to be considered real. Measured in bins. Default: 0
Returns:
--------
minima: list
A list of floats corresponding to the bin location of the profile minima
maxima: list
A list of floats corresponding to the bin location of the profile maxima
"""
#If there is more than one component, find each one
comp_dict, comp_idx = find_components(profile, min_comp_len)
maxima=[]
minima=[]
#loop over each profile component
for key in comp_dict.keys():
x = np.linspace(0, len(comp_dict[key])-1, len(comp_dict[key]), dtype=int)
spline = UnivariateSpline(x, comp_dict[key], s=0.0, k=4)
comp_roots = spline.derivative().roots()
# These are the roots, we want to split maxima and minima ^^
comp_maxima=[]
comp_minima=[]
for i, root in enumerate(comp_roots):
idx = int(root)
left = comp_dict[key][idx-1]
if left>comp_dict[key][idx]:
comp_minima.append(root)
else:
comp_maxima.append(root)
#Turn the root locations into locations on profile, not on component
for root in comp_minima:
abs_root = root + comp_idx[key][0]
minima.append(abs_root)
for root in comp_maxima:
abs_root = root + comp_idx[key][0]
maxima.append(abs_root)
ignore_idx = []
for i, mx in enumerate(maxima):
if max(profile[int(mx-1):int(mx+1)]) < ignore_threshold*max(profile):
ignore_idx.append(i)
for i in sorted(ignore_idx, reverse=True):
del maxima[i]
return minima, maxima
def find_minima_maxima_gauss(popt, pcov, x_length, min_comp_len=0):
"""
Finds all roots of a gaussian function
Parameters:
-----------
popt: list
A list of length 3N where N is the number of gaussians. This list contains the parameters amp, mean, centre respectively
pcov: np.matrix
The covariance matric corresponding to the parameters from popt
x_length: int
The length of the list used to fit the gaussian
Returns:
--------
minima: list
A list of the minimum points of the fit
maxima: list
A list of the maximum points of the fit
"""
#Create the derivative list and spline it to find roots
x = np.linspace(0, x_length-1, x_length)
dy = multi_gauss_ddx(x, *popt)
spline_dy = UnivariateSpline(x, dy, s=0)
roots = spline_dy.roots()
#Find which are max and min
maxima = []
minima = []
for root in roots:
idx = int(root + 0.5)
if dy[idx-1] > dy[idx]:
maxima.append(root)
else:
minima.append(root)
minima_e = find_x_err(minima, popt, pcov)
maxima_e = find_x_err(maxima, popt, pcov)
return minima, maxima, minima_e, maxima_e
#---------------------------------------------------------------
def find_x_err(x, popt, pcov):
"""
Finds the error in the horizontal position of a gaussian fit at the point x.
Uses the equation sigma_x = sigma_y/d2ydx2 where:
sigma_x = error in x
d2ydx2 = second derivative of the gaussian function at point x
sigma_y = J*C*J_T
J = Jacobian evalutated at point x
C = covariance matrix of gaussian fit
J_T = transposed jacobian
Parameters:
-----------
x: list
A list of points to evaluate the error at
popt: list
The parameters used to describe the gaussian fit
pcov: numpy.matrix
The covariance matrix corresponding to popt
Returns:
--------
x_err: list
The error evaluated at each point, x
"""
x_err = []
for _, point in enumerate(x):
J = jacobian_slope(point, *popt)
d2dx2 = multi_gauss_d2dx2(point, *popt)
JC = np.matmul(J, pcov)
sigma_y = np.sqrt( np.matmul(JC, np.transpose(J)).item(0) )
x_err.append(sigma_y / abs(d2dx2))
return x_err
#---------------------------------------------------------------
def find_widths(profile, popt, pcov, alpha=3):
"""
Attempts to find the W_10, W_50 and equivalent width of a profile by using a spline approach.
W10 and W50 errors are estimated by using: sigma_x = sigma_y/(dy/dx)
Weq errors are estimated by finding the average difference in Weq when you add and subtract the std from the on-pulse profile
Parameters:
-----------
profile: list
A list of floats that make up the profile
popt: list
The parameters that are used to create the multi-gaussian fit
pcov: np.matrix
The covariance matrix corresponding to the parameters from popt
alpha: float
OPTIONAL - The alpha value to be used in sigmaClip(). Default: 3
Returns:
--------
[W10, W50, Weq, Wscat, W10_e, W50_e, Weq_e, Wscat_e]: list
W10: float
The W10 width of the profile measured in number of bins
W50: float
The W50 width of the profile measured in number of bins
Weq: float
The equivalent width of the profile measured in number of bins
Wscat: float
The scattering width of the profile measured in number of bins
W10_e: float
The uncertainty in W10
W50_e: float
The uncertainty in W50
Weq_e: float
The uncertainty in Weq
Wscar_e: float
The unceratinty in Wscat
"""
def error_in_x_pos(pcov, popt, x):
J = jacobian_slope(x, *popt)
JC = np.matmul(J, pcov)
sigma_y = np.sqrt(np.matmul(JC, np.transpose(J)).item(0))
ddx = multi_gauss_ddx(x, *popt)
return sigma_y/ddx
#perform spline operations on the fit
x = np.linspace(0, len(profile)-1, len(profile))
fit = multi_gauss(x, *popt)
amp_fit = max(fit) - min(fit)
spline10 = UnivariateSpline(x, fit - np.full(len(x), 0.1*amp_fit), s=0)
spline50 = UnivariateSpline(x, fit - np.full(len(x), 0.5*amp_fit), s=0)
spline_s = UnivariateSpline(x, fit - np.full(len(x), 1/np.exp(1)*amp_fit), s=0)
#find Weq using the real profile
std, off_pulse = sigmaClip(profile, alpha=alpha)
check_clip(off_pulse)
on_pulse=[]
for i, data in enumerate(off_pulse):
if np.isnan(data):
on_pulse.append(profile[i])
x = np.linspace(0, len(on_pulse)-1, len(on_pulse))
spline0 = UnivariateSpline(x, on_pulse, s=0)
integral = spline0.integral(0, len(on_pulse)-1)
Weq = integral/max(on_pulse)
#find W10, W50 and Wscat
W10_roots = spline10.roots()
W50_roots = spline50.roots()
Wscat_roots = spline_s.roots()
W10 = W10_roots[-1] - W10_roots[0]
W50 = W50_roots[-1] - W50_roots[0]
Wscat = Wscat_roots[-1] - Wscat_roots[0]
#W10 root errors
err_10_1 = error_in_x_pos(pcov, popt, W10_roots[0])
err_10_2 = error_in_x_pos(pcov, popt, W10_roots[-1])
W10_e = np.sqrt(err_10_1**2 + err_10_2**2)
#W50 root errors
err_50_1 = error_in_x_pos(pcov, popt, W50_roots[0])
err_50_2 = error_in_x_pos(pcov, popt, W50_roots[-1])
W50_e = np.sqrt(err_50_1**2 + err_50_2**2)
#Wscat root errors
err_scat_1 = error_in_x_pos(pcov, popt, Wscat_roots[0])
err_scat_2 = error_in_x_pos(pcov, popt, Wscat_roots[-1])
Wscat_e = np.sqrt(err_scat_1**2 + err_scat_2**2)
#Weq errors - using covariance formula
on_pulse_less = (on_pulse - std).clip(min=0)
spline0 = UnivariateSpline(x, on_pulse_less, s=0)
integral = spline0.integral(0, len(profile)-1)
dwdint = 1/max(on_pulse)**2
dwdmax = -integral/max(on_pulse)**2
int_e = abs(integral/max(on_pulse - std) - integral/max(on_pulse))
max_e = std
Weq_e = np.sqrt( dwdint**2 * int_e**2 + dwdmax**2 * max_e**2 + 2*dwdint*dwdmax*int_e*max_e )
return [W10, W50, Weq, Wscat, W10_e, W50_e, Weq_e, Wscat_e]
#---------------------------------------------------------------
def est_sn_from_prof(prof_data, period, alpha=3.):
"""
Estimates the signal to noise ratio from a pulse profile
Based on code oringally writted by <NAME>.
Parameters:
-----------
prof_data: string
A list of floats that contains the pulse profile
period: float
The pulsar's period in ms
alpha: float
OPTIONAL - The alpha value to be used in sigmaClip(). Default: 3
Returns:
--------
[sn, sn_e, scattered]
sn: float
The estimated signal to noise ratio
u_sn: float
The uncertainty in sn
scattered: boolean
When true, the profile is highly scattered
"""
# Check profile is normalised
prof_data = prof_data / max(prof_data)
#centre the profile around the max
shift = -int(np.argmax(prof_data))+int(len(prof_data))//2
prof_data = np.roll(prof_data, shift)
#find std and check if profile is scattered
sigma, flags = sigmaClip(prof_data, tol=0.01, ntrials=100, alpha=alpha)
check_clip(flags)
bot_prof_min = (max(prof_data) - min(prof_data)) * .1 + min(prof_data)
scattered=False
if (np.nanmin(flags) > bot_prof_min) or ( not np.isnan(flags).any() ):
logger.warning("The profile is highly scattered. S/N estimate cannot be calculated")
scattered=True
sn = sn_e = None
else:
#prof_e = 500. #this is when it's not normalised
prof_e = 0.0005 #this is an approximation
non_pulse_bins = 0
#work out the above parameters
for i, _ in enumerate(prof_data):
if not np.isnan(flags[i]):
non_pulse_bins += 1
sigma_e = sigma / np.sqrt(2 * non_pulse_bins - 2)
#now calc S/N
sn = max(prof_data)/sigma
sn_e = sn * np.sqrt(prof_e/max(prof_data)**2 + (sigma_e/sigma)**2)
logger.debug("max prof: {} +/- {} ".format(max(prof_data), prof_e ))
logger.debug("sigma : {} +/- {} ".format(sigma, sigma_e ))
logger.debug("sn : {} +/- {} ".format(sn, sn_e ))
return [sn, sn_e, scattered]
#---------------------------------------------------------------
def analyse_pulse_prof(prof_data, period, alpha=3):
"""
Estimates the signal to noise ratio and many other properties from a pulse profile.
Based on code oringally writted by <NAME>.
This is the old version of 'est_sn_from_prof' but is useful when we can't fit gaussians
Parameters:
-----------
prof_data: list
A list of floats that contains the pulse profile.
period: float
The pulsar's period in ms
alpha: float
OPTIONAL - The alpha value to use when clipping using sigmaClip(). Default: 3
Returns:
--------
prof_dict: dictionary
contains keys:
sn: float
The estimated signal to noise ratio
u_sn: float
The estimated signal to noise ratio's its uncertainty
flags: list
A list of flagged data points
w_equiv_bins: float
The equivalent width of the profile measured in bins
u_w_equiv_bins: float
The uncertaintiy in w_equiv_bins
w_equiv_ms: float
The equivalent width of the profile measured in ms
u_w_equiv_ms: float
The uncertainty in w_equiv_ms
scattering: float
The scattering width in ms
u_scattering: float
The uncertainty in the scattering width in ms
scattered: boolean
When true, the profile is highly scattered
"""
prof_dict = {}
nbins = len(prof_data)
#centre the profile around the max
shift = -int(np.argmax(prof_data))+int(nbins)//2
prof_data = np.roll(prof_data, shift)
#find sigma and check if profile is scattered
sigma, flags = sigmaClip(prof_data, alpha=alpha, tol=0.01, ntrials=100)
check_clip(flags)
bot_prof_min = (max(prof_data) - min(prof_data)) * .1 + min(prof_data)
scattered=False
if (np.nanmin(flags) > bot_prof_min) or ( not np.isnan(flags).any() ):
logger.info("The profile is highly scattered. S/N estimate cannot be calculated")
scattered=True
#making a new profile with the only bin being the lowest point
prof_min_i = np.argmin(prof_data)
flags = []
for fi, _ in enumerate(prof_data):
if fi == prof_min_i:
flags.append(prof_data[fi])
else:
flags.append(np.nan)
flags = np.array(flags)
prof_data -= min(prof_data)
#Assuming width is equal to pulsar period because of the scattering
w_equiv_ms = period
u_w_equiv_ms = period/nbins
sn = None
u_sn = None
else:
u_prof = 500. #this is an approximation
pulse_width_bins = 0
non_pulse_bins = 0
p_total = 0.
u_p = 0.
#work out the above parameters
for i, data in enumerate(prof_data):
if np.isnan(flags[i]):
pulse_width_bins += 1
p_total += data
u_p = np.sqrt(u_p**2 + u_prof**2)
else:
non_pulse_bins += 1
u_simga = sigma / np.sqrt(2 * non_pulse_bins - 2)
#now calc S/N
sn = max(prof_data)/sigma
u_sn = sn * np.sqrt(u_prof/max(prof_data)**2 + (u_simga/sigma)**2)
if not scattered:
off_pulse_mean = np.nanmean(flags)
prof_data -= off_pulse_mean
flags -= off_pulse_mean
prof_max = max(prof_data)
w_equiv_bins = p_total / prof_max
w_equiv_ms = w_equiv_bins / nbins * period # in ms
u_w_equiv_bins = np.sqrt(p_total /prof_max)**2 +\
(p_total * u_prof / (prof_max)**2)**2
u_w_equiv_ms = u_w_equiv_bins / nbins * period # in ms
else:
w_equiv_ms = period
u_w_equiv_ms = period/nbins
w_equiv_bins = w_equiv_ms/period*nbins
u_w_equiv_bins = (u_w_equiv_ms/w_equiv_ms)*w_equiv_bins
#calc scattering
scat_height = max(prof_data) / 2.71828
scat_bins = 0
for p in prof_data:
if p > scat_height:
scat_bins = scat_bins + 1
scattering = float(scat_bins + 1) * float(period) /1000. #in s
u_scattering = 1. * float(period) /1000. # assumes the uncertainty is one bin
prof_dict["sn"] = sn
prof_dict["sn_e"] = u_sn
prof_dict["flags"] = flags
prof_dict["w_equiv_bins"] = w_equiv_bins
prof_dict["w_equiv_bins_e"] = u_w_equiv_bins
prof_dict["w_equiv_ms"] = w_equiv_ms
prof_dict["w_equiv_ms_e"] = u_w_equiv_ms
prof_dict["scattering"] = scattering
prof_dict["scattering_e"] = u_scattering
prof_dict["scattered"] = scattered
return prof_dict
def auto_analyse_pulse_prof(prof_data, period):
"""
Automatically finds the best alpha value to use for analyse_pulse_prof() and returns the best resulting dictionary
Parameters:
-----------
prof_data: list
A list of floats that contains the pulse profile.
preiod: float
The period of the pulsar
Returns:
--------
fit_dict: dictionary
contains keys:
sn: float
The estimated signal to noise ratio
u_sn: float
The estimated signal to noise ratio's its uncertainty
flags: list
A list of flagged data points
w_equiv_bins: float
The equivalent width of the profile measured in bins
u_w_equiv_bins: float
The uncertaintiy in w_equiv_bins
w_equiv_ms: float
The equivalent width of the profile measured in ms
u_w_equiv_ms: float
The uncertainty in w_equiv_ms
scattering: float
The scattering width in ms
u_scattering: float
The uncertainty in the scattering width in ms
scattered: boolean
When true, the profile is highly scattered
"""
if not isinstance(period, float):
period = float(period)
alphas = np.linspace(1, 5, 9)
attempts_dict = {}
loglvl = logger.level
logger.setLevel(logging.WARNING) #squelch logging for the loop
#loop over the gaussian evaluation fucntion, excepting in-built errors
for alpha in alphas:
try:
prof_dict = analyse_pulse_prof(prof_data, period, alpha=alpha)
attempts_dict[alpha] = prof_dict
except(LittleClipError, LargeClipError, NoComponentsError, ProfileLengthError) as e:
logger.setLevel(loglvl)
logger.info(e)
logger.info("Skipping alpha value: {}".format(alpha))
logger.setLevel(logging.WARNING) #squelch logging for the loop
logger.setLevel(loglvl)
#Evaluate the best profile based on the SN error.
sne = []
sne_alphas = []
scattered_trials = []
if attempts_dict:
for alpha_key in attempts_dict.keys():
scattered_trials.append(attempts_dict[alpha_key]["scattered"])
if not attempts_dict[alpha_key]["scattered"]:
sne.append(attempts_dict[alpha_key]["sn_e"])
sne_alphas.append(alpha_key)
if sne: #there is an SN estimate available. Only look through these analyses as candidates
best_sne = min(sne)
best_alpha = sne_alphas[sne.index(best_sne)]
fit_dict = attempts_dict[best_alpha]
elif scattered_trials:
mse = []
scattered_alphas = []
for alpha_key in attempts_dict.keys():
if attempts_dict[alpha_key]["scattered"]:
#use mean square of width errors as a metric for the best fit
mse.append(np.sqrt(attempts_dict[alpha_key]["w_equiv_bins_e"]**2 + attempts_dict[alpha_key]["scattering_e"]**2))
scattered_alphas.append(alpha_key)
best_mse = min(mse)
best_alpha = scattered_alphas[mse.index(best_mse)]
fit_dict = attempts_dict[best_alpha]
if not attempts_dict: #sometimes things go wrong :/
logger.error("Profile could not be fit. Returning empty dictionary!")
return {}
logger.info("Best profile analysis using an alpha value of {}".format(best_alpha))
return fit_dict
#---------------------------------------------------------------
def integral_multi_gauss(*params):
y=0
for i in range(0, len(params), 3):
a = params[i]
c = params[i+2]
y = y + a*c*np.sqrt(2*np.pi)
return y
def multi_gauss(x, *params):
y = np.zeros_like(x)
for i in range(0, len(params), 3):
a = params[i]
b = params[i+1]
c = params[i+2]
y = y + a * np.exp( -(((x-b)**2) / (2*c**2)) )
return y
def multi_gauss_ddx(x, *params):
#derivative of gaussian
y = np.zeros_like(x)
for i in range(0, len(params), 3):
a = params[i]
b = params[i+1]
c = params[i+2]
y = y - a/c**2 * (x - b) * np.exp( -(((x-b)**2) / (2*c**2)) )
return y
def multi_gauss_d2dx2(x, *params):
#double derivative of gaussian
y = np.zeros_like(x)
for i in range(0, len(params), 3):
a = params[i]
b = params[i+1]
c = params[i+2]
y = y + (multi_gauss(x, a, b, c) / c**2) * (((x - b)**2)/(c**2) - 1)
return y
def partial_gauss_dda(x, a, b, c):
return np.exp((-(b - x)**2)/(2*c**2))
def partial_gauss_ddb(x, a, b, c):
return a*(x - b) * np.exp((-(b - x)**2)/(2*c**2))/c**2
def partial_gauss_ddc(x, a, b, c):
return a*(x - b)**2 * np.exp((-(b - x)**2)/(2*c**2))/c**3
#---------------------------------------------------------------
def jacobian_slope(x, *params):
"""
Evaluates the Jacobian matrix of a gaussian slope at a single point, x
Parameters:
-----------
x: float
The point to evaluate
*params: list
A list containing three parameters per gaussian component in the order: Amp, Mean, Width
Returns:
--------
J: numpy.matrix
The Jacobian matrix
"""
def dda(a, b, c, x):
return -multi_gauss(x, a, b, c) * (x - b)/(c**2)/a
def ddb(a, b, c, x):
return multi_gauss(x, a, b, c) * (1 - (x - b)**2/(c**2))/c**2
def ddc(a, b, c, x):
return multi_gauss(x, a, b, c) * (x - b)/(c**3) * (2 - (x-b)**2/(c**2))
J = []
for i in range(0, len(params), 3):
a = params[i]
b = params[i+1]
c = params[i+2]
mypars = [a, b, c, x]
J.append(dda(*mypars))
J.append(ddb(*mypars))
J.append(ddc(*mypars))
J = np.asmatrix(J)
return J
#---------------------------------------------------------------
def plot_fit(plot_name, y, fit, popt, maxima=None, maxima_e=None):
x = np.linspace(0, len(y)-1, len(y))
plt.figure(figsize=(30, 18))
for j in range(0, len(popt), 3):
z = multi_gauss(x, *popt[j:j+3])
plt.plot(x, z, "--", label="Gaussian Component {}".format(int((j+3)/3)))
if maxima:
for i, mx in enumerate(maxima):
plt.axvline(x=(mx + maxima_e[i]), ls=":", lw=2, color="gray")
plt.axvline(x=(mx - maxima_e[i]), ls=":", lw=2, color="gray")
plt.title(plot_name.split("/")[-1].split(".")[0], fontsize=22)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.xlim(0, len(y))
plt.xlabel("Bins", fontsize=20)
plt.ylabel("Intensity", fontsize=20)
plt.plot(x, y, label="Original Profile", color="black")
plt.plot(x, fit, label="Gaussian Model", color="red")
plt.legend(loc="upper right", prop={'size': 16})
plt.savefig(plot_name)
plt.close()
#---------------------------------------------------------------
def fit_gaussian(profile, max_N=6, min_comp_len=0, plot_name=None, alpha=3.):
"""
Fits multiple gaussian components to a pulse profile and finds the best number to use for a fit.
Will always fit at least one gaussian per profile component.
Profile components are defined by find_components().
Each gaussian is defined by the following: y = amp * np.exp( -((x - ctr)/wid)**2)
Parameters:
-----------
profile: list
A list containing the profile data
max_N: int
OPTIONAL - The maximum number of gaussain components to attempt to fit. Default: 6
min_comp_len: float
OPTIONAL - Minimum length of a component to be considered real. Measured in bins. Default: 0
plot_name: string
OPTIONAL - If not none, will make a plot of the best fit with this name. Default: None
alpha: float
OPTIONAL - The alpha value to be used in sigmaClip(). Default: 3
Returns:
--------
[fit, redchisq, best_bic, popt, pcov]: list
fit: list
The data containing the multi-component gaussian fit to the input profile
redchisq: float
The reduced chi-sqaured value of the fit
best_bic: float
The bayesian information criterion for the fit
popt: list
A list of floats where each 3 numbers describes a single gaussain and are 'ctr', 'amp' and 'wid' respectively
pcov: numpy matrix
The covariance matrix generated by the curve_fit function
"""
#chi sqaured evaluation
def chsq(observed_values, expected_values, err):
test_statistic=0
for observed, expected in zip(observed_values, expected_values):
test_statistic+=((float(observed)-float(expected))/float(err))**2
return test_statistic
#Take noise mean and normalize the profile and check the clipped profile
_, clipped = sigmaClip(profile, alpha=alpha)
check_clip(clipped)
y = np.array(profile) - np.nanmean(np.array(clipped))
max_y = max(y)
y = np.array(y)/max_y
noise_std = np.nanstd(np.array(clipped)/max_y)
#Find profile components
clipped = fill_clipped_prof(clipped, search_scope=int(len(profile)/100))
on_pulse=[]
for i, val in enumerate(clipped):
if not np.isnan(val):
on_pulse.append(0)
else:
on_pulse.append(y[i])
comp_dict, comp_idx = find_components(on_pulse, min_comp_len=min_comp_len)
#Estimate gaussian parameters based on profile components
comp_centres = []
comp_max = []
comp_width = []
for i in range(max_N//len(comp_idx.keys())+1):
for key in comp_idx.keys():
comp_centres.append(np.mean(comp_idx[key]))
comp_max.append(max(comp_dict[key])*0.5)
comp_width.append((max(comp_idx[key])-min(comp_idx[key])))
centre_guess = iter(comp_centres)
width_guess=iter(comp_width)
max_guess=iter(comp_max)
n_comps=len(comp_dict.keys())
logger.debug("Number of profile components: {0} ({1})".format(n_comps, comp_centres[:n_comps]))
#Fit from 1 to max_N gaussians to the profile. Evaluate profile fit using bayesian information criterion
x=np.linspace(0, len(y)-1, len(y))
bounds_arr=[[],[]]
guess = []
fit_dict = {}
for num in range(1, max_N):
guess += [next(max_guess), next(centre_guess), next(width_guess)]
bounds_arr[0].append(0)
bounds_arr[0].append(0)
bounds_arr[0].append(0)
bounds_arr[1].append(max(y))
bounds_arr[1].append(len(y))
bounds_arr[1].append(len(y))
bounds_tuple=(tuple(bounds_arr[0]), tuple(bounds_arr[1]))
popt, pcov = curve_fit(multi_gauss, x, y, bounds=bounds_tuple, p0=guess, maxfev=100000)
fit = multi_gauss(x, *popt)
chisq = chsq(y, fit, noise_std)
#Bayesian information criterion for gaussian noise
k = 3*(num+1)
bic = chisq + k*np.log(len(y))
fit_dict[str(num+1)]={"popt":[], "pcov":[], "fit":[], "chisq":[], "bic":[]}
fit_dict[str(num+1)]["popt"] = popt
fit_dict[str(num+1)]["pcov"] = pcov
fit_dict[str(num+1)]["fit"] = fit
fit_dict[str(num+1)]["redchisq"] = chisq/(len(y)-1)
fit_dict[str(num+1)]["bic"] = bic
logger.debug("Reduced chi squared for {0} components: {1}".format(num+1, fit_dict[str(num+1)]["redchisq"]))
logger.debug("Bayesian Information Criterion for {0} components: {1}".format(num+1, fit_dict[str(num+1)]["bic"]))
#Find the best fit according to the BIC
best_bic = np.inf
best_fit = None
for n_components in fit_dict.keys():
if fit_dict[n_components]["bic"] < best_bic:
best_bic = fit_dict[n_components]["bic"]
best_fit = n_components
logger.info("Fit {0} gaussians for a reduced chi sqaured of {1}".format(best_fit, fit_dict[best_fit]["redchisq"]))
popt = fit_dict[best_fit]["popt"]
pcov = fit_dict[best_fit]["pcov"]
fit = fit_dict[best_fit]["fit"]
redchisq = fit_dict[best_fit]["redchisq"]
return [fit, redchisq, best_bic, popt, pcov, comp_dict, comp_idx]
#---------------------------------------------------------------
def prof_eval_gfit(profile, max_N=6, ignore_threshold=None, min_comp_len=None, plot_name=None, alpha=3., period=None):
"""
Fits multiple gaussians to a profile and subsequently finds W10, W50, Weq and maxima
Parameters:
-----------
profile: list
The pulse profile to evaluate
max_N: int
OPTIONAL - The maximum number of gaussian components to attempt to fit. Default: 6
ignore_threshold: float
OPTIONAL - Maxima with values below this number will be ignored. If none, will use 3*noise. Default: None
min_comp_len: float
OPTIONAL - Minimum length of a component to be considered real. Measured in bins. If None, will use 1% of total profile lengths + 2, max 50. Default: None
plot_name: string
OPTIONAL - If not none, will make a plot of the best fit with this name. Default: None
alpha: float
OPTIONAL - The alpha value passed to the sigmaClip() function. Default: 3
period: float
OPTIONAL - The puslar's period in ms. If not none, will attempt a S/N calculation. Default: None
Returns:
--------
fit_dict: dictionary
contains the following keys:
W10: float
The W10 width of the profile measured in number of bins
W10_e: float
The uncertainty in the W10
W50: float
The W50 width of the profile measured in number of bins
W50_e: float
The uncertainty in the W50
Weq: float
The equivalent width of the profile measured in number of bins
Weq_e: float
The uncertainty in the equivalent width
Wscat: float
The scattering width of the profile measured in number of bins
Wscat_e: float
The uncertainty in the scattering width
maxima: list
A lost of floats corresponding to the bin location of each maximum point
maxima_e: list
A list of floats, each correspinding to the error of the maxima of the same index. Measured in bins
redchisq: float
The reduced chi sqared of the fit
num_gauss: int
The number of gaussian components used in the best fit
bic: float
The Bayesian Information Criterion for the best fit
gaussian_params: list
A list of length 3*N there N is num_gauss. Each set of 3 parameters corresponds to the amp, centre and width of a guassian component
cov_mat: np.matrix
The covariance matrix from the fit
comp_dict: dictionary
dict["component_x"] contains an array of the component x
comp_idx: dictionary
dict["component_x"] contains an array of indexes of the original profile corresponding to component x
alpha: float
The alpha value used in sigmaClip()
profile: list
The input profile
fit: list
The best fit made into a list form
sn: float
The estimated signal to noise ratio, obtained from the profile. Will be None is period unsupplied
sn_e: float
The uncertainty in sn. Will be None is period unsupplied
scattered: boolean
True is the profile is scattered. Will be None is period unsupplied
"""
#initialize minimum component length and ignore threshold
if min_comp_len is None:
min_comp_len = int(len(profile)/100 + 0.5) + 2
if min_comp_len > 100:
min_comp_len = 100
if min_comp_len < 3:
min_comp_len = 3
#Normalize, find the std
y = np.array(profile)/max(profile)
noise_std, clipped = sigmaClip(y, alpha=alpha)
check_clip(clipped)
if ignore_threshold is None:
ignore_threshold = 3 * noise_std
y = y -
|
np.nanmean(clipped)
|
numpy.nanmean
|
from scipy import stats
from numpy import linalg
import numpy as np
import sys
# NOTE: enable/disable smart quantization of weights and activations
smart_quantization = False
def quantize_arr(input_arr, min_val, max_val):
quantize_range = 256.0
input_range = max_val - min_val
mul_factor = input_range / quantize_range
v1 = np.subtract(input_arr, min_val)
v2 = np.divide(v1, mul_factor)
v3 = v2.astype(int)
v4 = np.multiply(v3, mul_factor)
v5 = np.add(v4, min_val)
v6 = np.clip(v5, min_val, max_val)
return v6
def compute_norm(a1, a2):
norm_inp = np.subtract(a1, a2)
#norm = linalg.norm(norm_inp, ord = 1)
norm = np.sum(np.abs(norm_inp))
print ("*** norm = ", norm)
return norm
def get_best_quant_range(input_arr):
# For disabled smart quantization, skip expensive quant range computation
if smart_quantization == False:
min_val = np.percentile(input_arr, 0.1)
max_val = np.percentile(input_arr, 99.9)
return (min_val, max_val)
# Trying different threshold values for INT8 quantization
min_percentiles = [0.0]
max_percentiles = [99.9, 99.8, 99.7, 99.5]
min_norm = 100000000
min_pair = (0, 100)
range_vals = (0, 0)
for i in min_percentiles:
for j in max_percentiles:
print (" i = ", i, " j = ", j, " \n")
min_val = np.percentile(input_arr, i)
max_val = np.percentile(input_arr, j)
res = quantize_arr(input_arr, min_val, max_val)
norm = compute_norm(res, input_arr)
if norm < min_norm:
min_norm = norm
min_pair = (i, j)
range_vals = (min_val, max_val)
print ("--- min_norm = ", min_norm, " , min_pair = ", min_pair , " range_vals = ", range_vals)
return range_vals
def dumpQuantizeRanges(weights_dir, input_min, input_max, w_max, w_min, \
b_max, b_min, output_min, output_max):
outfile_path = weights_dir + "/quant_ranges.txt"
f = open(outfile_path, "a+")
f.write(str(input_min) + " " + str(input_max) + " " + str(w_min) + " " + str(w_max) + " " + \
str(b_min) + " " + str(b_max) + " " + str(output_min) + " " + str(output_max) + "\n")
f.close()
if __name__ == "__main__":
vals = np.zeros((2,3))
vals[0][0] = 1.2
vals[0][1] = 0.48
vals[0][2] = 0.5
vals[1][0] = -0.3
vals[1][1] = 0.25
vals[1][2] = 0.46
input_arr =
|
np.array(vals)
|
numpy.array
|
from __future__ import print_function, division, absolute_import
import copy
import time
import numpy as np
import sys
class Bridge(object):
def __init__(self, initial_components, available_components):
self.components = list(initial_components)
self.score = sum([sum(tup) for tup in self.components])
self.available_components = available_components
def next_required_number(self):
if len(self.components) == 1:
c = self.components[0]
nrn = c[0] if c.index(0) == 1 else c[1]
else:
c1 = self.components[-1]
c2 = self.components[-2]
nrn = c1[0] if c1[1] in c2 else c1[1]
return nrn
def add_component(self, c):
nrn = self.next_required_number()
if nrn not in c:
raise ValueError('Invalid connection, wrong port. Needed: {0} Got: {1}'.format(nrn, str(c)))
if c not in self.available_components:
raise ValueError('Component unavailable:', c)
self.components.append(c)
self.score += sum(c)
self.available_components.remove(c)
# def score(self):
# return sum([sum(tup) for tup in self.components])
def length(self):
return len(self.components)
def assemble_next(self):
"""
Find the next required number in the bridge. Return
a *new* list of bridges each with a different valid
component on the end, depending on the available components.
Returns
-------
"""
nrn = self.next_required_number()
next_components = [c for c in self.available_components if nrn in c]
new_bridges = []
for nx in next_components:
b = Bridge(initial_components=tuple(self.components),
available_components=self.available_components.copy())
b.add_component(nx)
new_bridges.append(b)
return new_bridges
def __str__(self):
s = '--'.join(['{0}/{1}'.format(*c) for c in self.components])
return s
def solve(inp):
components = [(int(line.split('/')[0]), int(line.split('/')[1])) for line in inp]
starting_comps = [c for c in components if 0 in c]
bridges = []
for sc in starting_comps:
bridges.append(Bridge((sc,), set(components)-set((sc,))))
complete_bridges = []
complete_bridges.extend(bridges)
for i in range(1000):
print('.', end='')
sys.stdout.flush()
new_bridges = []
for b in bridges:
new_bridges.extend(b.assemble_next())
if not new_bridges:
# Terminate once no new bridges can be built
break
bridges = new_bridges
complete_bridges.extend(new_bridges)
strongest_bridge = complete_bridges[
|
np.argmax([b.score for b in complete_bridges])
|
numpy.argmax
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Created on Thu Mar 30 9:30:00 2017
@author: gsutanto
"""
import re
import numpy as np
import os
import sys
import copy
import glob
import pickle
import shutil
from scipy import signal
from scipy.interpolate import interp1d
import pyplot_util as pypl_util
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def computeMSEVarGTNMSE(predictions, ground_truth, axis=0):
mse = np.mean(
np.square(predictions - ground_truth),
axis=axis) # Mean-Squared Error (MSE)
var_ground_truth = np.var(
ground_truth, axis=axis) # Variance of the Ground-Truth
nmse = np.divide(mse,
var_ground_truth) # Normalized Mean-Squared Error (NMSE)
return mse, var_ground_truth, nmse
def computeNMSE(predictions, ground_truth, axis=0):
[_, _, nmse] = computeMSEVarGTNMSE(predictions, ground_truth, axis)
return nmse
def computeWNMSE(predictions, ground_truth, weight, axis=0):
N_data = ground_truth.shape[0]
N_dims = ground_truth.shape[1]
wmse = np.mean(
np.multiply(
np.tile(weight, (1, N_dims)), np.square(predictions - ground_truth)),
axis=axis) # Weighted Mean-Squared Error (WMSE)
mean_gt = np.mean(ground_truth, axis=axis)
zero_mean_gt = ground_truth - np.tile(mean_gt, (N_data, 1))
wvar_gt = (1.0 / (N_data - 1)) * np.sum(
np.multiply(np.tile(weight, (1, N_dims)), np.square(zero_mean_gt)),
axis=axis) # Weighted Variance of the Ground-Truth
wnmse = np.divide(wmse,
wvar_gt) # Normalized Weighted Mean-Squared Error (NWMSE)
return wnmse
def computeSumSquaredL2Norm(matrix, axis=None):
return np.sum(np.square(matrix), axis=axis)
def compareTwoNumericFiles(file_1_path,
file_2_path,
scalar_max_abs_diff_threshold=1.001e-5,
scalar_max_rel_abs_diff_threshold=1.001e-5,
is_relaxed_comparison=False):
file_1 = np.loadtxt(file_1_path)
file_2 = np.loadtxt(file_2_path)
return compareTwoMatrices(file_1, file_2, scalar_max_abs_diff_threshold,
scalar_max_rel_abs_diff_threshold, file_1_path,
file_2_path, is_relaxed_comparison)
def compareTwoMatrices(matrix1,
matrix2,
scalar_max_abs_diff_threshold=1.001e-5,
scalar_max_rel_abs_diff_threshold=1.001e-5,
name1="",
name2="",
is_relaxed_comparison=False):
assert (matrix1.shape == matrix2.shape
), "File dimension mis-match! %s vs %s" % (str(
matrix1.shape), str(matrix2.shape))
file_diff = matrix1 - matrix2
abs_diff = np.abs(file_diff)
rowvec_max_abs_diff = np.max(abs_diff, axis=0)
rowvec_max_idx_abs_diff = np.argmax(abs_diff, axis=0)
scalar_max_abs_diff =
|
np.max(rowvec_max_abs_diff)
|
numpy.max
|
import numpy as np
from pandas import DataFrame
import matplotlib.pyplot as plt
def plot_train_curve(L, labels=["Learning Curve"], title='Training Curve'):
"""
Plot model training curve
Parameters
----------
L: list
Records list during training.
labels: list
Labels of different datasets.
title: str
Title of figure.
"""
if type(L[0]) != tuple:
x = range(1, len(L) + 1)
plt.plot(x, L, label=labels[0])
else:
datasets_size = len(L[0])
for i in range(datasets_size):
x = range(1, len(L) + 1)
v = [m_L[i] for m_L in L]
plt.plot(x, v, label=labels[i])
# no ticks
plt.xlabel("Steps")
plt.legend(loc="best")
plt.title(title)
plt.show()
def plot_surv_curve(df_survf, title="Survival Curve"):
"""
Plot survival curve.
Parameters
----------
df_survf: pandas.DataFrame or numpy.ndarray
Survival function of samples, shape of which is (n, #Time_Points).
`Time_Points` indicates the time point presented in columns of DataFrame.
title: str
Title of figure.
"""
if isinstance(df_survf, DataFrame):
plt.plot(df_survf.columns.values,
|
np.transpose(df_survf.values)
|
numpy.transpose
|
"""
Copyright 2018 Defense Innovation Unit Experimental
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import json
import os
import time
import scipy.io
import numpy as np
from tqdm import tqdm
from scoring.matching import Matching
from scoring.rectangle import Rectangle
"""
Scoring code to calculate per-class precision and mean average precision.
Args:
predictions: a folder path of prediction files.
Prediction files should have filename format 'XYZ.tif.txt',
where 'XYZ.tif' is the xView TIFF file being predicted on.
Prediction files should be in space-delimited csv format, with each
line like (xmin ymin xmax ymax class_prediction score_prediction).
ie ("predictions/")
groundtruth: a filepath to ground truth labels (GeoJSON format)
ie ("ground_truth.geojson")
output (-o): a folder path where output metrics are saved
ie ("scores/")
Outputs:
Writes two files to the 'output' parameter folder: 'score.txt' and 'metrics.txt'
'score.txt' contains a single floating point value output: mAP
'metrics.txt' contains the remaining metrics in per-line format (metric/class_num: score_float)
"""
#@profile
def get_labels(fname):
"""
Processes a WorldView3 GEOJSON file
Args:
fname: filepath to the GeoJson file.
Outputs:
Bounding box coordinate array, Chip-name array, and Classes array
"""
with open(fname) as f:
data = json.load(f)
coords = np.zeros((len(data['features']), 4))
chips = np.zeros((len(data['features'])), dtype="object")
classes = np.zeros((len(data['features'])))
for i in tqdm(range(len(data['features']))):
if data['features'][i]['properties']['bounds_imcoords'] != []:
b_id = data['features'][i]['properties']['image_id']
val = np.array([int(num) for num in data['features'][i]['properties']['bounds_imcoords'].split(",")])
chips[i] = b_id
classes[i] = data['features'][i]['properties']['type_id']
if val.shape[0] != 4:
raise ValueError('A bounding box should have 4 entries!')
else:
coords[i] = val
else:
chips[i] = 'None'
return coords, chips, classes
def convert_to_rectangle_list(coordinates):
"""
Converts a list of coordinates to a list of rectangles
Args:
coordinates: a flattened list of bounding box coordinates in format
(xmin,ymin,xmax,ymax)
Outputs:
A list of rectangles
"""
rectangle_list = []
number_of_rects = int(len(coordinates) / 4)
for i in range(number_of_rects):
rectangle_list.append(Rectangle(
coordinates[4 * i], coordinates[4 * i + 1], coordinates[4 * i + 2],
coordinates[4 * i + 3]))
return rectangle_list
def ap_from_pr(p, r):
"""
Calculates AP from precision and recall values as specified in
the PASCAL VOC devkit.
Args:
p: an array of precision values
r: an array of recall values
Outputs:
An average precision value
"""
r = np.concatenate([[0], r, [1]])
p = np.concatenate([[0], p, [0]])
for i in range(p.shape[0] - 2, 0, -1):
if p[i] > p[i - 1]:
p[i - 1] = p[i]
i = np.where(r[1:] != r[:len(r) - 1])[0] + 1
ap = np.sum(
(r[i] - r[i - 1]) * p[i])
return ap
# @profile
def score(path_predictions, path_groundtruth, path_output, iou_threshold=.5):
"""
Compute metrics on a number of prediction files, given a folder of prediction files
and a ground truth. Primary metric is mean average precision (mAP).
Args:
path_predictions: a folder path of prediction files.
Prediction files should have filename format 'XYZ.tif.txt',
where 'XYZ.tif' is the xView TIFF file being predicted on.
Prediction files should be in space-delimited csv format, with each
line like (xmin ymin xmax ymax class_prediction score_prediction)
path_groundtruth: a file path to a single ground truth geojson
path_output: a folder path for output scoring files
iou_threshold: a float between 0 and 1 indicating the percentage
iou required to count a prediction as a true positive
Outputs:
Writes two files to the 'path_output' parameter folder: 'score.txt' and 'metrics.txt'
'score.txt' contains a single floating point value output: mAP
'metrics.txt' contains the remaining metrics in per-line format (metric/class_num: score_float)
Raises:
ValueError: if there are files in the prediction folder that are not in the ground truth geojson.
EG a prediction file is titled '15.tif.txt', but the file '15.tif' is not in the ground truth.
"""
assert (iou_threshold < 1 and iou_threshold > 0)
ttime = time.time()
boxes_dict = {}
pchips = []
stclasses = []
num_preds = 0
for file in tqdm(os.listdir(path_predictions)):
fname = file.split(".txt")[0]
pchips.append(fname)
with open(path_predictions + file, 'r') as f:
arr = np.array(list(csv.reader(f, delimiter=" ")))
if arr.shape[0] == 0:
# If the file is empty, we fill it in with an array of zeros
boxes_dict[fname] = np.array([[0, 0, 0, 0, 0, 0]])
num_preds += 1
else:
arr = arr[:, :6].astype(np.float64)
threshold = 0
arr = arr[arr[:, 5] > threshold]
stclasses += list(arr[:, 4])
num_preds += arr.shape[0]
if np.any(arr[:, :4] < 0):
raise ValueError('Bounding boxes cannot be negative.')
if np.any(arr[:, 5] < 0) or np.any(arr[:, 5] > 1):
raise ValueError('Confidence scores should be between 0 and 1.')
boxes_dict[fname] = arr[:, :6]
pchips = sorted(pchips)
stclasses = np.unique(stclasses).astype(np.int64)
# gt_coords, gt_chips, gt_classes = get_labels(path_groundtruth)
# scipy.io.savemat('ground_truth.mat',{'gt_coords':gt_coords,'gt_chips':gt_chips,'gt_classes':gt_classes})
mat = scipy.io.loadmat('scoring/ground_truth.mat')
gt_coords, gt_chips, gt_classes = mat['gt_coords'], mat['gt_chips'], mat['gt_classes']
gt_unique = np.unique(gt_classes.astype(np.int64))
max_gt_cls = 100
if set(pchips).issubset(set(gt_unique)):
raise ValueError('The prediction files {%s} are not in the ground truth.' % str(set(pchips) - (set(gt_unique))))
print("Number of Predictions: %d" % num_preds)
print("Number of GT: %d" % np.sum(gt_classes.shape))
per_file_class_data = {}
for i in gt_unique:
per_file_class_data[i] = [[], []]
num_gt_per_cls = np.zeros((max_gt_cls))
attempted = np.zeros(100)
for file_ind in range(len(pchips)):
print(pchips[file_ind])
det_box = boxes_dict[pchips[file_ind]][:, :4]
det_scores = boxes_dict[pchips[file_ind]][:, 5]
det_cls = boxes_dict[pchips[file_ind]][:, 4]
gt_box = gt_coords[(gt_chips == pchips[file_ind]).flatten()]
gt_cls = gt_classes[(gt_chips == pchips[file_ind])]
for i in gt_unique:
s = det_scores[det_cls == i]
ssort = np.argsort(s)[::-1]
per_file_class_data[i][0] += s[ssort].tolist()
gt_box_i_cls = gt_box[gt_cls == i].flatten().tolist()
det_box_i_cls = det_box[det_cls == i]
det_box_i_cls = det_box_i_cls[ssort].flatten().tolist()
gt_rects = convert_to_rectangle_list(gt_box_i_cls)
rects = convert_to_rectangle_list(det_box_i_cls)
attempted[i] += len(rects)
matching = Matching(gt_rects, rects)
rects_matched, gt_matched = matching.greedy_match(iou_threshold)
# we aggregate confidence scores, rectangles, and num_gt across classes
# per_file_class_data[i][0] += det_scores[det_cls == i].tolist()
per_file_class_data[i][1] += rects_matched
num_gt_per_cls[i] += len(gt_matched)
average_precision_per_class = np.ones(max_gt_cls) * float('nan')
per_class_p = np.ones(max_gt_cls) * float('nan')
per_class_r = np.ones(max_gt_cls) * float('nan')
per_class_rcount = np.ones(max_gt_cls) * float('nan')
for i in gt_unique:
scores = np.array(per_file_class_data[i][0])
rects_matched = np.array(per_file_class_data[i][1])
if num_gt_per_cls[i] != 0:
sorted_indices = np.argsort(scores)[::-1]
tp_sum = np.cumsum(rects_matched[sorted_indices])
fp_sum = np.cumsum(np.logical_not(rects_matched[sorted_indices]))
precision = tp_sum / (tp_sum + fp_sum + np.spacing(1))
recall = tp_sum / num_gt_per_cls[i]
per_class_p[i] = np.sum(rects_matched) / len(rects_matched)
per_class_r[i] = np.sum(rects_matched) / num_gt_per_cls[i]
per_class_rcount[i] = np.sum(rects_matched)
ap = ap_from_pr(precision, recall)
else:
ap = 0
average_precision_per_class[i] = ap
# metric splits
metric_keys = ['map', 'map/small', 'map/medium', 'map/large',
'map/common', 'map/rare']
splits = {
'map/small': [17, 18, 19, 20, 21, 23, 24, 26, 27, 28, 32, 41, 60,
62, 63, 64, 65, 66, 91],
'map/medium': [11, 12, 15, 25, 29, 33, 34, 35, 36, 37, 38, 42, 44,
47, 50, 53, 56, 59, 61, 71, 72, 73, 76, 84, 86, 93, 94],
'map/large': [13, 40, 45, 49, 51, 52, 54, 55, 57, 74, 77, 79, 83, 89],
'map/common': [13, 17, 18, 19, 20, 21, 23, 24, 25, 26, 27, 28, 34, 35, 41,
47, 60, 63, 64, 71, 72, 73, 76, 77, 79, 83, 86, 89, 91],
'map/rare': [11, 12, 15, 29, 32, 33, 36, 37, 38, 40, 42, 44, 45, 49, 50,
51, 52, 53, 54, 55, 56, 57, 59, 61, 62, 65, 66, 74, 84, 93, 94]
}
n = [ 11.0, 12.0, 13.0, 15.0, 17.0, 18.0, 19.0, 20.0, 21.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 40.0, 41.0, 42.0, 44.0, 45.0, 47.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 71.0, 72.0, 73.0, 74.0, 76.0, 77.0, 79.0, 83.0, 84.0, 86.0, 89.0, 91.0, 93.0, 94.0]
with open('data/xview.names') as f:
lines = f.readlines()
map_dict = {}
for i in range(60):
map_dict[lines[i].replace('\n','')] = average_precision_per_class[int(n[i])]
print(np.nansum(per_class_rcount), map_dict)
vals = {}
vals['map'] =
|
np.nanmean(average_precision_per_class)
|
numpy.nanmean
|
'''
LDM with AA on embeddings vs. RAA:
Run with synthetic data K = 3
Synthetic alphas = 0.2
N = 100 (wip)
K = 2 .. 10
D = 2
Inits = 5 #Number of inits.
seed = 1999
sample_size = 1
Lr = 0.01
Iterations = 10,000
'''
from src.models.train_DRRAA_module import DRRAA
from src.models.train_LSM_module import LSM, LSMAA
from src.models.train_KAA_module import KAA
from src.models.calcNMI import calcNMI
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import matplotlib as mpl
from src.data.synthetic_data import main
from src.data.synthetic_data import ideal_prediction
import networkx as nx
import archetypes as arch
import warnings
warnings.filterwarnings("ignore")
np.random.seed(42)
torch.manual_seed(42)
rand=False
#set test and train split seed. We want the same train and test split in order to know that differences
#are because of inits.
seed_split = 42
#Run 10 different seeds with 10 different inits. Take the best of the 10 inits and save as best in seed.
#Then plot the auc and nmi with errorbars on the 10 best in seeds.
alphas = [0.2,1,5]
num_init = 10
raa_best_in_seed_aucs = np.zeros((len(alphas),num_init))
lsm_best_in_seed_aucs = np.zeros((len(alphas),num_init))
lsmaa_best_in_seed_aucs = np.zeros((len(alphas),num_init))
kaa_best_in_seed_aucs = np.zeros((len(alphas),num_init))
raainit_best_in_seed_aucs = np.zeros((len(alphas),num_init))
raa_best_in_seed_nmis = np.zeros((len(alphas),num_init))
lsm_best_in_seed_nmis = np.zeros((len(alphas),num_init))
lsmaa_best_in_seed_nmis = np.zeros((len(alphas),num_init))
kaa_best_in_seed_nmis = np.zeros((len(alphas),num_init))
raainit_best_in_seed_nmis = np.zeros((len(alphas),num_init))
seed_init = 0
####################################
## Synthetic model comparison ##
## RAA and all RAAs without stuff ##
####################################
#Defining models
iter = 10
for alpha_idx, alpha in enumerate(alphas):
if rand:
np.random.seed(1)
torch.manual_seed(1)
else:
np.random.seed(42)
torch.manual_seed(42)
k=3
n = 100
d = 2
adj_m, z, A, Z_true, beta, partition = main(alpha=alpha, k=k, dim=d, nsamples=n, rand=rand)
G = nx.from_numpy_matrix(adj_m.numpy())
temp = [x for x in nx.generate_edgelist(G, data=False)]
edge_list = np.zeros((2, len(temp)))
for i in range(len(temp)):
edge_list[0, i] = temp[i].split()[0]
edge_list[1, i] = temp[i].split()[1]
raa_models = []
lsm_models = []
lsmaa_models = []
kaa_models = []
raainit_models = []
raa_nmi_models = []
lsm_nmi_models = []
lsmaa_nmi_models = []
kaa_nmi_models = []
raainit_nmi_models = []
for init in range(num_init):
raa = DRRAA(k=k,
d=d,
sample_size=1,
data=edge_list,
data_type = "edge list",
link_pred = True,
seed_split=seed_split,
seed_init=seed_init
)
raa.train(iterations=iter)
raa_models.append(raa)
lsm = LSM(d=d,
sample_size=1,
data=edge_list,
data_type="edge list",
link_pred=True,
seed_init=seed_init,
seed_split = seed_split
)
lsm.train(iterations=iter)
lsm_models.append(lsm)
lsmaa = LSMAA(d=d,
k=k,
sample_size=1,
data = edge_list,
data_type = "Edge list",
link_pred = True,
seed_split=seed_split,
seed_init=seed_init
)
lsmaa.train(iterations=iter)
lsmaa_models.append(lsmaa)
kaa = KAA(k=k,
data=edge_list,
data_type='edge list',
type='jaccard',
link_pred=True,
seed_split = seed_split,
seed_init = seed_init
)
kaa.train(iterations=iter)
kaa_models.append(kaa)
kaainit = KAA(k=k,
data=edge_list,
data_type="edge list",
link_pred=True,
seed_split=seed_split,
seed_init=seed_init
)
kaainit.train(iterations=1000)
raainit = DRRAA(init_Z=kaainit.S.detach(),
k=k,
d=d,
sample_size=1,
data=edge_list,
data_type="edge list",
link_pred=True,
seed_split=seed_split,
seed_init=seed_init
)
raainit.train(iterations=iter)
raainit_models.append(raainit)
#############################################################################
#NMIs - require full data, so link_pred=False, else everything is the same :)
raa_nmi = DRRAA(k=k,
d=d,
sample_size=1,
data=edge_list,
data_type="edge list",
link_pred=False,
seed_init=seed_init
)
raa_nmi.train(iterations=iter)
raa_nmi_models.append(raa_nmi)
lsm_nmi = LSM(d=d,
sample_size=1,
data=edge_list,
data_type="edge list",
link_pred=False,
seed_init=seed_init
)
lsm_nmi.train(iterations=iter)
lsm_nmi_models.append(lsm_nmi)
lsmaa_nmi = LSMAA(d=d,
k=k,
sample_size=1,
data = edge_list,
data_type = "Edge list",
link_pred = False,
seed_split=seed_split
)
lsmaa_nmi.train(iterations=iter)
lsmaa_nmi_models.append(lsmaa_nmi)
kaa_nmi = KAA(k=k,
data=adj_m.numpy(),
type='jaccard',
link_pred=False,
seed_split = seed_split
)
kaa_nmi.train(iterations=iter)
kaa_nmi_models.append(kaa_nmi)
kaainit = KAA(k=k,
data=adj_m.numpy(),
link_pred=False,
seed_init=seed_init
)
kaainit.train(iterations=1000)
raainit_nmi = DRRAA(init_Z=kaainit.S.detach(),
k=k,
d=d,
sample_size=1,
data=edge_list,
data_type="edge list",
link_pred=False,
seed_init=seed_init
)
raainit_nmi.train(iterations=iter)
raainit_nmi_models.append(raainit_nmi)
#make sure to increase the initialisation-seed ;)
seed_init += 1
print(seed_init)
raa_aucs = []
lsm_aucs = []
lsmaa_aucs = []
kaa_aucs = []
raainit_aucs = []
raa_nmis = []
lsm_nmis = []
lsmaa_nmis = []
kaa_nmis = []
raainit_nmis = []
for i in range(num_init):
#calc aucs
raa_auc, _, _ = raa_models[i].link_prediction()
lsm_auc, _, _ = lsm_models[i].link_prediction()
lsmaa_auc, _, _ = lsmaa_models[i].link_prediction()
kaa_auc, _, _ = kaa_models[i].link_prediction()
raainit_auc, _, _ = raainit_models[i].link_prediction()
raa_aucs.append(raa_auc)
lsm_aucs.append(lsm_auc)
lsmaa_aucs.append(lsmaa_auc)
kaa_aucs.append(kaa_auc)
raainit_aucs.append(raainit_auc)
#calc nmis
raa_nmi = calcNMI(F.softmax(raa_nmi_models[i].Z.detach(),dim=0), Z_true)
lsm_nmi = calcNMI(F.softmax(lsm_nmi_models[i].latent_Z.detach().T,dim=0), Z_true)
aa = arch.AA(n_archetypes=k)
Z = aa.fit_transform(lsmaa_nmi_models[i].latent_Z.detach().numpy())
lsmaa_nmi = calcNMI(torch.from_numpy(Z).T.float(), Z_true)
kaa_nmi = calcNMI(F.softmax(kaa_nmi_models[i].S.detach(),dim=0), Z_true)
raainit_nmi = calcNMI(F.softmax(raainit_nmi_models[i].Z.detach(),dim=0), Z_true)
raa_nmis.append(raa_nmi)
lsm_nmis.append(lsm_nmi)
lsmaa_nmis.append(lsmaa_nmi)
kaa_nmis.append(kaa_nmi)
raainit_nmis.append(raainit_nmi)
#append aucs and NMIs
raa_best_in_seed_aucs[alpha_idx,:] = raa_aucs
lsm_best_in_seed_aucs[alpha_idx,:] = lsm_aucs
lsmaa_best_in_seed_aucs[alpha_idx,:] = lsmaa_aucs
kaa_best_in_seed_aucs[alpha_idx,:] = kaa_aucs
raainit_best_in_seed_aucs[alpha_idx, :] = raainit_aucs
raa_best_in_seed_nmis[alpha_idx,:] = raa_nmis
lsm_best_in_seed_nmis[alpha_idx,:] = lsm_nmis
lsmaa_best_in_seed_nmis[alpha_idx,:] = lsmaa_nmis
kaa_best_in_seed_nmis[alpha_idx,:] = kaa_nmis
raainit_best_in_seed_nmis[alpha_idx, :] = raainit_nmis
avg_raa_aucs = np.mean(raa_best_in_seed_aucs,1)
avg_lsm_aucs = np.mean(lsm_best_in_seed_aucs,1)
avg_lsmaa_aucs = np.mean(lsmaa_best_in_seed_aucs,1)
avg_kaa_aucs =
|
np.mean(kaa_best_in_seed_aucs,1)
|
numpy.mean
|
from tensordash.torchdash import Torchdash
import torch
import torchvision
from torchvision import transforms, datasets
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
train = datasets.MNIST('', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
test = datasets.MNIST('', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
trainset = torch.utils.data.DataLoader(train, batch_size=10, shuffle=True)
testset = torch.utils.data.DataLoader(test, batch_size=10, shuffle=False)
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(28*28, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return F.log_softmax(x, dim=1)
net = Net()
import torch.optim as optim
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
my_cb = Torchdash(ModelName='<YOUR_MODEL_NAME>', email = '<YOUR_EMAIL_ID>', password= '<<PASSWORD>>')
epochs = 10
try:
for epoch in range(epochs):
losses = []
for data in trainset:
X, y = data
net.zero_grad()
output = net(X.view(-1,784))
loss = F.nll_loss(output, y)
losses.append(loss.item())
loss.backward()
optimizer.step()
losses = np.asarray(losses)
print("Send Value")
my_cb.sendLoss(loss =
|
np.mean(losses)
|
numpy.mean
|
# This Python module is part of the PyRate software package.
#
# Copyright 2020 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module contains tests for the orbital.py PyRate module.
"""
import os
import shutil
import tempfile
import unittest
from itertools import product
from numpy import empty, dot, concatenate, float32
from numpy import nan, isnan, array
from os.path import join
import numpy as np
from numpy.linalg import pinv, inv
from numpy.testing import assert_array_equal, assert_array_almost_equal
from scipy.linalg import lstsq
from .common import small5_mock_ifgs, MockIfg
from pyrate.core import algorithm, config as cf
from pyrate.core.orbital import INDEPENDENT_METHOD, NETWORK_METHOD, PLANAR, \
QUADRATIC, PART_CUBIC
from pyrate.core.orbital import OrbitalError, _orbital_correction
from pyrate.core.orbital import get_design_matrix, get_network_design_matrix
from pyrate.core.orbital import _get_num_params, remove_orbital_error
from pyrate.core.shared import Ifg
from pyrate.core.shared import nanmedian
from tests.common import TEST_CONF_ROIPAC, IFMS16
from tests.common import SML_TEST_LEGACY_ORBITAL_DIR
from tests.common import SML_TEST_TIF, small_data_setup
from tests.common import small_ifg_file_list
#TODO: Purpose of this variable? Degrees are 1, 2 and 3 respectively
DEG_LOOKUP = {
2: PLANAR,
5: QUADRATIC,
6: PART_CUBIC}
NUM_COEF_LOOKUP = {
PLANAR: 2,
QUADRATIC: 5,
PART_CUBIC: 6}
class SingleDesignMatrixTests(unittest.TestCase):
"""
Tests to verify correctness of basic planar & quadratic design matrices or
DMs. This class serves two purposes, ensuring the independent method DMs are
produced correctly. Secondly, these indivdual DMs are subsets of the larger
DM 'grid' required for the networked orbital correction method.
"""
def setUp(self):
# faked cell sizes
self.xs = 0.75
self.ys = 0.8
self.ifg = Ifg(join(SML_TEST_TIF, 'geo_060619-061002_unw.tif'))
self.ifg.open()
self.ifg.nodata_value = 0
self.m = MockIfg(self.ifg, 3, 4)
self.m.x_size = self.xs
self.m.y_size = self.ys
# tests for planar model
def test_create_planar_dm(self):
offset = False
act = get_design_matrix(self.m, PLANAR, offset)
self.assertEqual(act.shape, (self.m.num_cells, 2))
exp = unittest_dm(self.m, INDEPENDENT_METHOD, PLANAR, offset)
assert_array_equal(act, exp)
def test_create_planar_dm_offsets(self):
offset = True
act = get_design_matrix(self.m, PLANAR, offset)
self.assertEqual(act.shape, (self.m.num_cells, 3))
exp = unittest_dm(self.m, INDEPENDENT_METHOD, PLANAR, offset)
assert_array_almost_equal(act, exp)
# tests for quadratic model
def test_create_quadratic_dm(self):
offset = False
act = get_design_matrix(self.m, QUADRATIC, offset)
self.assertEqual(act.shape, (self.m.num_cells, 5))
exp = unittest_dm(self.m, INDEPENDENT_METHOD, QUADRATIC, offset)
assert_array_equal(act, exp)
def test_create_quadratic_dm_offsets(self):
offset = True
act = get_design_matrix(self.m, QUADRATIC, offset)
self.assertEqual(act.shape, (self.m.num_cells, 6))
exp = unittest_dm(self.m, INDEPENDENT_METHOD, QUADRATIC, offset)
assert_array_equal(act, exp)
# tests for partial cubic model
def test_create_partcubic_dm(self):
offset = False
act = get_design_matrix(self.m, PART_CUBIC, offset)
self.assertEqual(act.shape, (self.m.num_cells, 6))
exp = unittest_dm(self.m, INDEPENDENT_METHOD, PART_CUBIC, offset)
assert_array_equal(act, exp)
def test_create_partcubic_dm_offsets(self):
offset = True
act = get_design_matrix(self.m, PART_CUBIC, offset)
self.assertEqual(act.shape, (self.m.num_cells, 7))
exp = unittest_dm(self.m, INDEPENDENT_METHOD, PART_CUBIC, offset)
assert_array_equal(act, exp)
# tests for unittest_dm() assuming network method
def test_create_planar_dm_network(self):
# networked method planar version should not have offsets col
ncol_exp = 2
exp = unittest_dm(self.m, NETWORK_METHOD, PLANAR, False)
self.assertEqual(exp.shape, (self.m.num_cells, ncol_exp))
exp2 = unittest_dm(self.m, NETWORK_METHOD, PLANAR, True)
self.assertEqual(exp2.shape, (self.m.num_cells, ncol_exp))
assert_array_equal(exp, exp2)
def test_create_quadratic_dm_network(self):
# quadratic version with networked method does not have offsets col
ncol_exp = 5
exp = unittest_dm(self.m, NETWORK_METHOD, QUADRATIC, False)
self.assertEqual(exp.shape, (self.m.num_cells, ncol_exp))
exp2 = unittest_dm(self.m, NETWORK_METHOD, QUADRATIC, True)
self.assertEqual(exp2.shape, (self.m.num_cells, ncol_exp))
assert_array_equal(exp, exp2)
def test_create_partcubic_dm_network(self):
# partial cubic version with networked method does not have offsets col
ncol_exp = 6
exp = unittest_dm(self.m, NETWORK_METHOD, PART_CUBIC, False)
self.assertEqual(exp.shape, (self.m.num_cells, ncol_exp))
exp2 = unittest_dm(self.m, NETWORK_METHOD, PART_CUBIC, True)
self.assertEqual(exp2.shape, (self.m.num_cells, ncol_exp))
assert_array_equal(exp, exp2)
class IndependentCorrectionTests(unittest.TestCase):
"""Test cases for the orbital correction component of PyRate."""
def setUp(self):
self.ifgs = small5_mock_ifgs()
_add_nodata(self.ifgs)
for ifg in self.ifgs:
ifg.x_size = 90.0
ifg.y_size = 89.5
ifg.open()
def alt_orbital_correction(self, ifg, deg, offset):
data = ifg.phase_data.reshape(ifg.num_cells)
dm = get_design_matrix(ifg, deg, offset)[~isnan(data)]
fd = data[~isnan(data)].reshape((dm.shape[0], 1))
dmt = dm.T
invNbb = inv(dmt.dot(dm))
orbparams = invNbb.dot(dmt.dot(fd))
alt_params = lstsq(dm, fd)[0]
# FIXME: precision
assert_array_almost_equal(orbparams, alt_params, decimal=2)
dm2 = get_design_matrix(ifg, deg, offset)
if offset:
fullorb = np.reshape(np.dot(dm2[:, :-1], orbparams[:-1]),
ifg.phase_data.shape)
else:
fullorb = np.reshape(np.dot(dm2, orbparams), ifg.phase_data.shape)
offset_removal = nanmedian(
np.reshape(ifg.phase_data - fullorb, (1, -1)))
fwd_correction = fullorb - offset_removal
# ifg.phase_data -= (fullorb - offset_removal)
return ifg.phase_data - fwd_correction
def check_correction(self, degree, method, offset, decimal=2):
orig = array([c.phase_data.copy() for c in self.ifgs])
exp = [self.alt_orbital_correction(i, degree, offset) for i in self.ifgs]
params = dict()
params[cf.ORBITAL_FIT_METHOD] = method
params[cf.ORBITAL_FIT_DEGREE] = degree
params[cf.PARALLEL] = False
params[cf.NO_DATA_VALUE] = 0
params[cf.NAN_CONVERSION] = False
for i in self.ifgs:
i.mm_converted = True
_orbital_correction(self.ifgs, params, None, offset)
corrected = array([c.phase_data for c in self.ifgs])
self.assertFalse((orig == corrected).all())
self.check_results(self.ifgs, orig) # test shape, data is non zero
# FIXME: is decimal=2 close enough?
for i, (e, a) in enumerate(zip(exp, corrected)):
assert_array_almost_equal(e, a, decimal=decimal)
def check_results(self, ifgs, corrections):
"""Helper method for result verification"""
for i, c in zip(ifgs, corrections):
ys, xs = c.shape
self.assertEqual(i.nrows, ys)
self.assertEqual(i.ncols, xs)
# ensure there is real data
self.assertFalse(isnan(i.phase_data).all())
self.assertFalse(isnan(c).all())
self.assertTrue(c.ptp() != 0) # ensure range of values in grid
def test_independent_correction_planar(self):
self.check_correction(PLANAR, INDEPENDENT_METHOD, False)
def test_independent_correction_planar_offsets(self):
self.check_correction(PLANAR, INDEPENDENT_METHOD, True)
def test_independent_correction_quadratic(self):
self.check_correction(QUADRATIC, INDEPENDENT_METHOD, False)
def test_independent_correction_quadratic_offsets(self):
self.check_correction(QUADRATIC, INDEPENDENT_METHOD, True)
def test_independent_correction_partcubic(self):
self.check_correction(PART_CUBIC, INDEPENDENT_METHOD, False)
def test_independent_correction_partcubic_offsets(self):
self.check_correction(PART_CUBIC, INDEPENDENT_METHOD, True, decimal=1)
class ErrorTests(unittest.TestCase):
"""Tests for the networked correction method"""
def test_invalid_ifgs_arg(self):
# min requirement is 1 ifg, can still subtract one epoch from the other
self.assertRaises(OrbitalError, get_network_design_matrix, [], PLANAR, True)
def test_invalid_degree_arg(self):
# test failure of a few different args for 'degree'
ifgs = small5_mock_ifgs()
for d in range(-5, 1):
self.assertRaises(OrbitalError, get_network_design_matrix, ifgs, d, True)
for d in range(4, 7):
self.assertRaises(OrbitalError, get_network_design_matrix, ifgs, d, True)
def test_invalid_method(self):
# test failure of a few different args for 'method'
ifgs = small5_mock_ifgs()
params = dict()
params[cf.ORBITAL_FIT_DEGREE] = PLANAR
params[cf.PARALLEL] = False
for m in [None, 5, -1, -3, 45.8]:
params[cf.ORBITAL_FIT_METHOD] = m
self.assertRaises(OrbitalError, _orbital_correction, ifgs, params, None)
def test_multilooked_ifgs_arg(self):
# check some bad args for network method with multilooked ifgs
ifgs = small5_mock_ifgs()
args = [[None, None, None, None, None], ["X"] * 5]
params = dict()
params[cf.ORBITAL_FIT_METHOD] = NETWORK_METHOD
params[cf.PARALLEL] = False
params[cf.ORBITAL_FIT_DEGREE] = PLANAR
for a in args:
args = (ifgs, params, a)
self.assertRaises(OrbitalError, _orbital_correction, *args)
# ensure failure if # ifgs doesn't match # mlooked ifgs
args = (ifgs, params, ifgs[:4])
self.assertRaises(OrbitalError, _orbital_correction, *args)
class NetworkDesignMatrixTests(unittest.TestCase):
"""Contains tests verifying creation of sparse network design matrix."""
def setUp(self):
self.ifgs = small5_mock_ifgs()
_add_nodata(self.ifgs)
self.nifgs = len(self.ifgs)
self.ncells = self.ifgs[0].num_cells
self.date_ids = get_date_ids(self.ifgs)
self.nepochs = len(self.date_ids)
assert self.nepochs == 6
for ifg in self.ifgs:
ifg.X_SIZE = 90.0
ifg.Y_SIZE = 89.5
def test_planar_network_dm(self):
ncoef = 2
offset = False
act = get_network_design_matrix(self.ifgs, PLANAR, offset)
self.assertEqual(act.shape, (self.ncells * self.nifgs, ncoef * self.nepochs))
self.assertNotEqual(act.ptp(), 0)
self.check_equality(ncoef, act, self.ifgs, offset)
def test_planar_network_dm_offset(self):
ncoef = 2 # NB: doesn't include offset col
offset = True
act = get_network_design_matrix(self.ifgs, PLANAR, offset)
self.assertEqual(act.shape[0], self.ncells * self.nifgs)
self.assertEqual(act.shape[1], (self.nepochs * ncoef) + self.nifgs)
self.assertNotEqual(act.ptp(), 0)
self.check_equality(ncoef, act, self.ifgs, offset)
def test_quadratic_network_dm(self):
ncoef = 5
offset = False
act = get_network_design_matrix(self.ifgs, QUADRATIC, offset)
self.assertEqual(act.shape, (self.ncells * self.nifgs, ncoef * self.nepochs))
self.assertNotEqual(act.ptp(), 0)
self.check_equality(ncoef, act, self.ifgs, offset)
def test_quadratic_network_dm_offset(self):
ncoef = 5
offset = True
act = get_network_design_matrix(self.ifgs, QUADRATIC, offset)
self.assertEqual(act.shape[0], self.ncells * self.nifgs)
self.assertEqual(act.shape[1], (self.nepochs * ncoef) + self.nifgs)
self.assertNotEqual(act.ptp(), 0)
self.check_equality(ncoef, act, self.ifgs, offset)
def test_partcubic_network_dm(self):
ncoef = 6
offset = False
act = get_network_design_matrix(self.ifgs, PART_CUBIC, offset)
self.assertEqual(act.shape, (self.ncells * self.nifgs, ncoef * self.nepochs))
self.assertNotEqual(act.ptp(), 0)
self.check_equality(ncoef, act, self.ifgs, offset)
def test_partcubic_network_dm_offset(self):
ncoef = 6
offset = True
act = get_network_design_matrix(self.ifgs, PART_CUBIC, offset)
self.assertEqual(act.shape[0], self.ncells * self.nifgs)
self.assertEqual(act.shape[1], (self.nepochs * ncoef) + self.nifgs)
self.assertNotEqual(act.ptp(), 0)
self.check_equality(ncoef, act, self.ifgs, offset)
def check_equality(self, ncoef, dm, ifgs, offset):
"""
Internal test function to check subsets against network design matrix
ncoef - base number of coefficients, without extra col for offsets
dm - network design matrix to check the results
ifgs - sequence of Ifg objs
offset - boolean to include extra parameters for model offsets
"""
deg = DEG_LOOKUP[ncoef]
np = ncoef * self.nepochs # index of 1st offset col
for i, ifg in enumerate(ifgs):
exp = unittest_dm(ifg, NETWORK_METHOD, deg, offset)
self.assertEqual(exp.shape, (ifg.num_cells, ncoef))
ib1, ib2 = [x * self.ncells for x in (i, i+1)] # row start/end
jbm = ncoef * self.date_ids[ifg.master] # starting col index for master
jbs = ncoef * self.date_ids[ifg.slave] # col start for slave
assert_array_almost_equal(-exp, dm[ib1:ib2, jbm:jbm+ncoef])
assert_array_almost_equal( exp, dm[ib1:ib2, jbs:jbs+ncoef])
# ensure remaining rows/cols are zero for this ifg NOT inc offsets
assert_array_equal(0, dm[ib1:ib2, :jbm]) # all cols leading up to master
assert_array_equal(0, dm[ib1:ib2, jbm + ncoef:jbs]) # cols btwn mas/slv
assert_array_equal(0, dm[ib1:ib2, jbs + ncoef:np]) # to end of non offsets
# check offset cols for 1s and 0s
if offset is True:
ip1 = i + np # offset column index
assert_array_equal(1, dm[ib1:ib2, ip1])
assert_array_equal(0, dm[ib1:ib2, np:ip1]) # cols before offset col
assert_array_equal(0, dm[ib1:ib2, ip1 + 1:]) # cols after offset col
# components for network correction testing
def network_correction(ifgs, deg, off, ml_ifgs=None, tol=1e-6):
"""
Compares results of orbital_correction() to alternate implementation.
deg - PLANAR, QUADRATIC or PART_CUBIC
off - True/False to calculate correction with offsets
"""
ncells = ifgs[0].num_cells
if ml_ifgs:
ml_nc = ml_ifgs[0].num_cells
ml_data = concatenate([i.phase_data.reshape(ml_nc) for i in ml_ifgs])
dm = get_network_design_matrix(ml_ifgs, deg, off)[~isnan(ml_data)]
fd = ml_data[~isnan(ml_data)].reshape((dm.shape[0], 1))
else:
data = concatenate([i.phase_data.reshape(ncells) for i in ifgs])
dm = get_network_design_matrix(ifgs, deg, off)[~
|
isnan(data)
|
numpy.isnan
|
# encoding: latin2
"""Algorithm utilities
G{packagetree core}
"""
__author__ = "<NAME>"
__credits__ = "Copyright (c) 2009-11 <NAME>"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "<EMAIL>"
from copy import deepcopy
import numpy as np
from objFunctions import makeObjDict, objectiveFunctionTypeDispatcher
from selectionTypeFunctions import selectionTypeDispatcher
from warnings import warn
from time import time
from memory import ExtendedMemory as ExtMem
from areacl import AreaCl
from helperfunctions import sortedKeys
from os import getpid
class RegionMaker:
"""
This class deals with a large amount of methods required during both the
construction and local search phases. This class takes the area instances and
coordinate them during the solution process. It also send information to
Memory when needed.
"""
def __init__(self, am, pRegions=2, initialSolution=[],
seedSelection = "kmeans",
distanceType = "EuclideanSquared",
distanceStat = "Centroid",
selectionType = "Minimum",
alpha = 0.2,
numRegionsType = "Exogenous",
objectiveFunctionType = "SS",
threshold = 0.0,
weightsDistanceStat = [],
weightsObjectiveFunctionType = [],
indexDataStat = [],
indexDataOF = []):
"""
@type am: AreaManager
@param am: Area manager object.
@type pRegions: integer
@keyword pRegions: Number of regions in scheme
@type seeds: list
@keyword seeds: List of area IDs for initial seeds.
@type distanceType: string
@keyword distanceType: Type of distance to be used, by default "EuclideanSquared"
@type distanceStat: string
@keyword distanceStat: Type of conversion used for summarizing distance, by defaults "Average"
@type selectionType: string
@keyword selectionType: Type of selection criterion for construction phase, by defaults "Minimum"
@type alpha: float.
@keyword alpha: float equal or between the interval [0,1]; for GRASP selection only.
@type numRegionsType: string
@keyword numRegionsType: Type of constructive method (Exogenous, EndogenousThreshold,
EndogenousRange), by default "Exogenous"
@type objectiveFunctionType: string
@keyword objectiveFunctionType: Method to calculate the objective function, by default "Total"
@type threshold: float
@keyword threshold: Minimum population threshold to be satisfied for each region
@type weightsDistanceStat: list
@keyword weightsDistanceStat:
@type weightsObjectiveFunctionStat: list
@keyword weightsObjectiveFunctionStat:
@type indexDataStat = list
@keyword indexDataStat:
@type indexDataOf = list
@keyword indexDataOf:
"""
self.am = am
self.areas = deepcopy(am.areas)
self.distanceType = distanceType
self.distanceStat = distanceStat
self.weightsDistanceStat = weightsDistanceStat
self.indexDataStat = indexDataStat
self.weightsObjectiveFunctionType = weightsObjectiveFunctionType
self.indexDataOF = indexDataOF
self.selectionType = selectionType
self.objectiveFunctionType = objectiveFunctionType
self.n = len(self.areas)
self.unassignedAreas = self.areas.keys()
self.assignedAreas = []
self.area2Region = {}
self.region2Area = {}
self.potentialRegions4Area = {}
self.intraBorderingAreas = {}
self.candidateInfo = {}
self.externalNeighs = set()
self.alpha = alpha
self.numRegionsType = numRegionsType
self.neighSolutions = {(0,0): 9999}
self.regionMoves = set()
self.distances = {}
self.NRegion = []
self.N = 0
self.data = {}
self.objInfo = -1
self.assignAreasNoNeighs()
# PREDEFINED NUMBER OF REGIONS
seeds = []
regions2createKeys = []
emptyList = []
c = 0
lenUnassAreas = len(self.unassignedAreas)
s = 0
i = 0
lseeds = 0
if numRegionsType == "Exogenous":
if not initialSolution:
self.pRegions = pRegions
seeds = self.kmeansInit()
self.setSeeds(seeds)
c = 0
while lenUnassAreas > 0:
self.constructRegions()
lenUnassAreas = len(self.unassignedAreas)
c += 1
self.objInfo = self.getObj()
else:
uniqueInitSolution = set(initialSolution)
self.pRegions = len(uniqueInitSolution)
seeds = []
for s in uniqueInitSolution:
seeds.append(initialSolution.index(s))
self.setSeeds(seeds)
regions2create = {}
c = 0
for i in initialSolution:
regions2create.setdefault(i, []).append(c)
c += 1
c = 0
regions2createKeys = regions2create.keys()
for i in regions2createKeys:
self.unassignedAreas = regions2create[i][1:]
lenUnassAreas = len(self.unassignedAreas)
while lenUnassAreas > 0:
self.constructRegions(filteredCandidates=self.unassignedAreas,
filteredReg=i)
lenUnassAreas = len(self.unassignedAreas)
c += 1
self.objInfo = self.getObj()
# NUMBER OF REGIONS IS ENDOGENOUS WITH A THRESHOLD VALUE
if self.numRegionsType == "EndogenousThreshold":
self.constructionStage = "growing"
try:
self.areas[self.areas.keys()[0]].thresholdVar
except:
self.extractThresholdVar()
self.regionalThreshold = threshold
c = 0
self.feasibleRegions = {}
self.regionValue = {}
seeds = []
for aID in self.areas:
if self.areas[aID].thresholdVar >= self.regionalThreshold:
seed = aID
seeds = seeds + [seed]
self.regionValue[c] = self.areas[seed].thresholdVar
self.feasibleRegions[c] = [seed]
self.removeRegionAsCandidate()
c += 1
self.setSeeds(seeds)
while len(self.unassignedAreas) != 0:
np.random.shuffle(self.unassignedAreas)
vals = []
for index in self.unassignedAreas:
vals += [self.areas[index].thresholdVar]
seed = self.unassignedAreas[0]
self.setSeeds([seed], c)
self.regionValue[c] = self.areas[seed].thresholdVar
if self.regionValue[c] >= self.regionalThreshold:
self.feasibleRegions[c] = [seed]
self.removeRegionAsCandidate()
c += 1
else:
feasibleThreshold = 1
while self.regionValue[c] < self.regionalThreshold:
self.addedArea = -1
try:
self.constructRegions()
self.regionValue[c] += self.areas[self.addedArea].thresholdVar
except:
feasibleThreshold = 0
break
if feasibleThreshold == 1:
self.feasibleRegions[c] = self.region2Area[c]
self.removeRegionAsCandidate()
c += 1
# NUMBER OF REGIONS IS ENDOGENOUS WITH A RANGE VALUE
if self.numRegionsType == "EndogenousRange":
self.constructionStage = "growing" # there are two values for constructionStage: "growing" and "enclaves"
try:
self.areas[self.areas.keys()[0]].thresholdVar
except:
self.extractThresholdVar()
self.regionalThreshold = threshold
c = 0
self.feasibleRegions = {}
while len(self.unassignedAreas) != 0:
# select seed
np.random.shuffle(self.unassignedAreas)
seed = self.unassignedAreas[0]
self.setSeeds([seed],c)
# regionRange contains the current range per region
# regionalThreshold is the predefined threshold value
self.regionRange = {}
maxValue = self.areas[seed].thresholdVar
minValue = self.areas[seed].thresholdVar
currentRange = maxValue - minValue
self.regionRange[c] = currentRange
# grow region if possible
stop = 0
while stop == 0:
upplim = maxValue + self.regionalThreshold - currentRange
lowlim = minValue - self.regionalThreshold + currentRange
feasibleNeigh = 0
toRemove = []
for ext in self.externalNeighs:
if self.areas[ext].thresholdVar <= upplim and self.areas[ext].thresholdVar >= lowlim:
feasibleNeigh = 1
if self.areas[ext].thresholdVar > upplim or self.areas[ext].thresholdVar < lowlim:
toRemove.append(ext)
self.toRemove = toRemove
if feasibleNeigh == 0:
stop = 1
if feasibleNeigh == 1:
try:
self.constructRegions()
if self.areas[self.addedArea].thresholdVar > maxValue:
maxValue = self.areas[self.addedArea].thresholdVar
if self.areas[self.addedArea].thresholdVar < minValue:
minValue = self.areas[self.addedArea].thresholdVar
currentRange = maxValue - minValue
self.regionRange[c] = currentRange
except:
stop = 1
self.feasibleRegions[c] = self.region2Area[c]
self.removeRegionAsCandidate()
c += 1
self.getIntraBorderingAreas()
def kmeansInit(self):
cachedDistances = {}
y = self.am.y
n = len(y)
distances = np.ones(n)
total = sum(distances)
probabilities = map(lambda x: x / float(total), distances)
seeds = []
localDistanceType = self.distanceType
returnDistance2Area = AreaCl.returnDistance2Area
np.random.seed(int(time() * getpid()) % 4294967295)
for k in xrange(self.pRegions):
random = np.random.uniform(0, 1)
find = False
acum = 0
cont = 0
while not find:
inf = acum
sup = acum + probabilities[cont]
if inf <= random <= sup:
find = True
seeds += [cont]
selfAmAreas = self.am.areas
for area in selfAmAreas:
currentArea = selfAmAreas[area]
tempMap = []
for x in seeds:
if x < area:
k = (x, area)
elif x > area:
k = (area, x)
else:
k = (0,0)
cached = cachedDistances.get(k, -1)
if cached < 0:
newDist = returnDistance2Area(currentArea,
selfAmAreas[x],
distanceType = localDistanceType)
tempMap.append(newDist)
cachedDistances[k] = newDist
else:
tempMap.append(cached)
distancei = min(tempMap)
distances[area] = distancei
total = sum(distances)
probabilities = map(lambda x: x / float(total), distances)
else:
cont += 1
acum = sup
del cachedDistances
return seeds
def extractThresholdVar(self):
"""
Separate aggregation variables (data) from the variable selected
to satisfy a threshold value (thresholdVar)
"""
self.totalThresholdVar = 0.0
for areaId in self.areas.keys():
self.areas[areaId].thresholdVar = self.areas[areaId].data[-1]
self.areas[areaId].data = self.areas[areaId].data[0: -1]
self.totalThresholdVar += self.areas[areaId].thresholdVar
def removeRegionAsCandidate(self):
"""
Remove a region from candidates
"""
for i in self.candidateInfo.keys():
a, r = i
if r in self.feasibleRegions:
self.candidateInfo.pop(i)
def returnRegions(self):
"""
Return regions created
"""
areasId = self.area2Region.keys()
areasId = np.sort(areasId).tolist()
return [self.area2Region[area] for area in areasId]
def resetNow(self):
"""
Reset all variables
"""
self.unassignedAreas = self.areas.keys()
self.assignedAreas = []
self.area2Region = {}
self.region2Area = {}
self.potentialRegions4Area = {}
self.intraBorderingAreas = {}
self.candidateInfo = {}
self.externalNeighs = set([])
self.neighsMinusAssigned = set([])
def setSeeds(self, seeds, c=0):
"""
Sets the initial seeds for clustering
"""
if self.numRegionsType == "Exogenous" and len(seeds) <= self.pRegions:
idx = range(self.n)
didx = list((set(idx) - set(seeds)) - self.am.noNeighs)
np.random.shuffle(didx)
self.seeds = seeds + didx[0:(self.pRegions - len(seeds))]
else:
self.seeds = seeds
for seed in self.seeds:
self.NRegion += [0]
self.assignSeeds(seed, c)
c += 1
def assignAreaStep1(self, areaID, regionID):
"""
Assign an area to a region
"""
a = self.areas[areaID]
neighs = a.neighs
try:
self.region2Area[regionID].append(areaID)
if self.objectiveFunctionType == "GWalt":
try:
self.NRegion[regionID] += a.data[0]
for index in range(1,len(a.data)):
self.data[regionID][index - 1] += a.data[index] * a.data[0]
except:
self.NRegion[regionID] = a.data[0]
for index in range(1, len(a.data)):
self.data[regionID][index - 1] = a.data[index] * a.data[0]
self.N += a.data[0]
except:
self.region2Area[regionID] = [areaID]
if self.objectiveFunctionType == "GWalt":
self.NRegion[regionID] = a.data[0]
for index in range(1, len(a.data)):
if index == 1:
self.data[regionID] = [a.data[index] * a.data[0]]
else:
self.data[regionID] += [a.data[index] * a.data[0]]
self.N += a.data[0]
self.area2Region[areaID] = regionID
try:
aid = self.unassignedAreas.remove(areaID)
except:
pass
self.assignedAreas.append(areaID)
setNeighs = set(neighs)
setAssigned = set(self.assignedAreas)
self.oldExternal = self.externalNeighs
self.externalNeighs.update(setNeighs)
self.externalNeighs.difference_update(setAssigned)
self.newExternal = self.externalNeighs - self.oldExternal
self.neighsMinusAssigned = setNeighs - setAssigned
def assignSeeds(self, areaID, regionID):
"""
Assign an area to a region and updates potential regions for the neighs
Parameters
"""
self.assignAreaStep1(areaID, regionID)
for neigh in self.neighsMinusAssigned:
self.potentialRegions4Area.setdefault(neigh, set()).add(regionID)
try:
self.potentialRegions4Area.pop(areaID)
except:
pass
self.changedRegion = 'null'
self.newExternal = self.potentialRegions4Area.keys()
def assignAreasNoNeighs(self):
"""
Assign to the region "-1" for the areas without neighbours
"""
noNeighs = list(self.am.noNeighs)
nr = -1
for areaID in noNeighs:
self.area2Region[areaID] = nr
try:
aid = self.unassignedAreas.remove(areaID)
except:
pass
self.assignedAreas.append(areaID)
setAssigned = set(self.assignedAreas)
nr = nr - 1
def assignArea(self, areaID, regionID):
"""
Assign an area to a region and updates potential regions for neighs
"""
self.changedRegion = regionID
self.addedArea = areaID
self.assignAreaStep1(areaID, regionID)
for neigh in self.neighsMinusAssigned:
self.potentialRegions4Area.setdefault(neigh, set()).add(regionID)
try:
self.potentialRegions4Area.pop(areaID)
except:
pass
def returnBorderingAreas(self, regionID):
"""
Returns bordering areas of a region
"""
areas2Eval = self.region2Area[regionID]
borderingAreas = set()
for area in areas2Eval:
try:
if len(self.intraBorderingAreas[area]) > 0:
borderingAreas.add(area)
except:
pass
return borderingAreas
def getIntraBorderingAreas(self):
"""
Gets the intrabordering areas
"""
self.intraBorderingAreas = {}
if self.numRegionsType == "Exogenous":
nr = range(self.pRegions)
else:
nr = self.feasibleRegions
for regionID in nr:
setNeighsNoRegion = set()
try:
areas2Eval = self.region2Area[regionID]
except:
areas2Eval = []
for area in areas2Eval:
setNeighsNoRegion.update(self.areas[area].neighs)
setNeighsNoRegion.difference_update(areas2Eval)
for neigh in setNeighsNoRegion:
self.intraBorderingAreas.setdefault(neigh, set()).add(regionID)
def constructRegions(self, filteredCandidates=-99, filteredReg=-99):
"""
Construct potential regions per area
"""
_d_stat = self.distanceStat
_wd_stat = self.weightsDistanceStat
_ida_stat = self.indexDataStat
_fun_am_d2r = self.am.getDistance2Region
lastRegion = 0
for areaID in self.potentialRegions4Area.keys():
a = self.areas[areaID]
regionIDs = list(self.potentialRegions4Area[areaID])
for region in regionIDs:
if (self.numRegionsType != "Exogenous" and
self.constructionStage == "growing"
and region in self.feasibleRegions):
# Once a region reaches the threshold, the grow is
# rejected until the assignation of enclaves
continue
else:
if filteredCandidates == -99:
if (areaID not in self.newExternal and
region != self.changedRegion):
lastRegion = region
pass
else:
_reg_dist = 0.0
if self.selectionType != "FullRandom":
_reg_dist = _fun_am_d2r(self.areas[areaID],
self.region2Area[region],
distanceStat = _d_stat,
weights = _wd_stat,
indexData = _ida_stat)
self.candidateInfo[(areaID, region)] = _reg_dist
elif (filteredCandidates != -99 and
areaID in filteredCandidates and
region == filteredReg):
_reg_dist = _fun_am_d2r(self.areas[areaID],
self.region2Area[region],
distanceStat = _d_stat,
weights = _wd_stat,
indexData = _ida_stat)
self.candidateInfo[(areaID, region)] = _reg_dist
if len(self.candidateInfo) == 0:
self.changedRegion = lastRegion
if self.numRegionsType == "EndogenousRange":
self.filterCandidate(self.toRemove)
selectionTypeDispatcher[self.selectionType](self)
def filterCandidate(self, removeCandidate=[]):
"""
Filter candidates
"""
if len(removeCandidate) > 0:
toRemove = []
for _id in removeCandidate:
for cand, reg in self.candidateInfo.keys():
if cand == _id:
toRemove.append((cand, reg))
for remov in toRemove:
self.candidateInfo.pop(remov)
def graspList(self, xList, alpha=0.0):
"""
Return random index of values with specified range.
"""
maxX = max(xList)
minX = min(xList)
xRangeMax = minX + ((maxX - minX) * alpha)
candidates = [i <= xRangeMax for i in xList]
indices = indexMultiple(candidates, 1)
nCandidates = len(indices)
idx = range(nCandidates)
np.random.shuffle(idx)
random = idx[0]
index4Grasp = indices[random]
return index4Grasp
def getObjective(self, region2AreaDict):
"""
Return the value of the objective function from regions2area dictionary
This function acts as a proxy function since the idea behind the
getObjective and getObjectiveFast is the same. When the non-fast
approach is needed, this function will call getObjectiveFast with the
extra parameter as None. This way the fast function will execute as the
non-fast would have.
"""
return self.getObjectiveFast(region2AreaDict, modifiedRegions=None)
def getObjectiveFast(self, region2AreaDict, modifiedRegions=[]):
"""
Return the value of the objective function from regions2area dictionary
When this function gets called, the objectiveFunctionType property
could be either a String representing the type of the function (the
common case), or could be a list of function types, in which case it's
necessary to iterate over all the functions.
"""
distance = 0.0
_objFunType = self.objectiveFunctionType
if isinstance(_objFunType, "".__class__):
if len(self.indexDataOF) == 0:
indexData = range(len(self.areas[0].data))
else:
indexData = self.indexDataOF
_fun = None
if modifiedRegions == None:
_fun = objectiveFunctionTypeDispatcher[_objFunType]
distance = _fun(self, region2AreaDict, indexData)
else:
_fun = objectiveFunctionTypeDispatcher[_objFunType+'f']
distance=_fun(self, region2AreaDict, modifiedRegions, indexData)
else:
i = 0
for oFT in _objFunType:
if len(self.indexDataOF) == 0:
indexData = range(len(self.areas[0].data))
else:
indexData = self.indexDataOF[i]
if len(self.weightsObjectiveFunctionType) > 0:
_fun = objectiveFunctionTypeDispatcher[oFT]
distance += (self.weightsObjectiveFunctionType[i] *
_fun(self, region2AreaDict, indexData))
i += 1
else:
_fun = objectiveFunctionTypeDispatcher[oFT]
distance += _fun(self, region2AreaDict, indexData)
return distance
def getLambda(self):
L = np.matrix(np.identity(self.pRegions))
for r in range(self.pRegions):
L[r, r] = 1.0 * self.NRegion[r] / self.N
return L
def getB(self):
"""
Return matrix of parameters of all regions
"""
B = np.matrix(np.zeros(len(self.data[0]) * self.pRegions)).T
index = 0
for r in range(self.pRegions):
for i in range(len(self.data[0])):
B[index, 0] = self.data[r][i] / self.NRegion[r]
index += 1
return B
def getY(self):
"""
Return matrix of the average variance-covariance of all regions
"""
Y = np.matrix(np.identity(len(self.data[0])))
centroids = {}
for r in range(self.pRegions):
centroids[r] = calculateCentroid([self.areas[aID] for aID in self.region2Area[r]])
for r in range(self.pRegions):
Y += centroids[r].var *
|
np.power(self.NRegion[r] / self.N, 2)
|
numpy.power
|
import numpy as np, pandas as pd
from scipy.sparse import csc_matrix, csr_matrix, issparse, isspmatrix_csc, isspmatrix_csr, vstack as sp_vstack
import warnings
import multiprocessing
import ctypes
import json
import os
from copy import deepcopy
from ._cpp_interface import isoforest_cpp_obj, _sort_csc_indices, _reconstruct_csr_sliced, _reconstruct_csr_with_categ, _get_has_openmp
__all__ = ["IsolationForest"]
### Helpers
def _get_num_dtype(X_num=None, sample_weights=None, column_weights=None):
if X_num is not None:
return np.empty(0, dtype=X_num.dtype)
elif sample_weights is not None:
return np.empty(0, dtype=column_weights.dtype)
elif column_weights is not None:
return np.empty(0, dtype=sample_weights.dtype)
else:
return np.empty(0, dtype=ctypes.c_double)
def _get_int_dtype(X_num):
if (X_num is not None) and (issparse(X_num)):
return np.empty(0, dtype=X_num.indices.dtype)
else:
return np.empty(0, dtype=ctypes.c_size_t)
def _is_row_major(X_num):
if (X_num is None) or (issparse(X_num)):
return False
else:
return X_num.strides[1] == X_num.dtype.itemsize
def _is_col_major(X_num):
if (X_num is None) or (issparse(X_num)):
return False
else:
return X_num.strides[0] == X_num.dtype.itemsize
def _copy_if_subview(X_num, prefer_row_major=False):
### TODO: the C++ functions should accept a 'leading dimension'
### parameter so as to avoid copying the data here
if (X_num is not None) and (not issparse(X_num)):
col_major = _is_col_major(X_num)
leading_dimension = int(X_num.strides[1 if col_major else 0] / X_num.dtype.itemsize)
if (
(leading_dimension != X_num.shape[0 if col_major else 1]) or
(len(X_num.strides) != 2) or
(not X_num.flags.aligned) or
(not _is_row_major(X_num) and not _is_col_major(X_num))
):
X_num = X_num.copy()
if _is_col_major(X_num) != col_major:
if prefer_row_major:
X_num = np.ascontiguousarray(X_num)
else:
X_num = np.asfortranarray(X_num)
return X_num
def _all_equal(x, y):
if x.shape[0] != y.shape[0]:
return False
return np.all(x == y)
def _encode_categorical(cl, categories):
if (cl.shape[0] >= 100) and (cl.dtype.name == "category"):
if _all_equal(cl.cat.categories, categories):
return cl.cat.codes
return pd.Categorical(cl, categories).codes
class IsolationForest:
"""
Isolation Forest model
Isolation Forest is an algorithm originally developed for outlier detection that consists in splitting
sub-samples of the data according to some attribute/feature/column at random. The idea is that, the rarer
the observation, the more likely it is that a random uniform split on some feature would put outliers alone
in one branch, and the fewer splits it will take to isolate an outlier observation like this. The concept
is extended to splitting hyperplanes in the extended model (i.e. splitting by more than one column at a time), and to
guided (not entirely random) splits in the SCiForest model that aim at isolating outliers faster and
finding clustered outliers.
This version adds heuristics to handle missing data and categorical variables. Can be used to aproximate pairwise
distances by checking the depth after which two observations become separated, and to approximate densities by fitting
trees beyond balanced-tree limit. Offers options to vary between randomized and deterministic splits too.
Note
----
The default parameters in this software do not correspond to the suggested parameters in
any of the references.
In particular, the following default values are likely to cause huge differences when compared to the
defaults in other software: ``ndim``, ``sample_size``, ``ntrees``. The defaults here are
nevertheless more likely to result in better models. In order to mimic scikit-learn for example, one
would need to pass ``ndim=1``, ``sample_size=256``, ``ntrees=100``, ``missing_action="fail"``, ``nthreads=1``.
Note
----
Shorthands for parameter combinations that match some of the references:
'iForest' (reference [1]_):
``ndim=1``, ``sample_size=256``, ``max_depth=8``, ``ntrees=100``, ``missing_action="fail"``.
'EIF' (reference [3]_):
``ndim=2``, ``sample_size=256``, ``max_depth=8``, ``ntrees=100``, ``missing_action="fail"``,
``coefs="uniform"``, ``standardize_data=False`` (plus standardizing the data **before** passing it).
'SCiForest' (reference [4]_):
``ndim=2``, ``sample_size=256``, ``max_depth=8``, ``ntrees=100``, ``missing_action="fail"``,
``coefs="normal"``, ``ntry=10``, ``prob_pick_avg_gain=1``, ``penalize_range=True``.
Might provide much better results with ``max_depth=None`` despite the reference's recommendation.
Note
----
The model offers many tunable parameters. The most likely candidate to tune is
``prob_pick_pooled_gain``, for which higher values tend to
result in a better ability to flag outliers in the training data at the expense of hindered
performance when making predictions (calling method ``predict``) on new data (including out-of-bag
samples for each tree) and poorer
generalizability to inputs with values outside the variables' ranges to which the model was fit
(see plots generated from the examples in GitHub notebook for a better idea of the difference). The next candidate to tune is
``sample_size`` - the default is to use all rows, but in some datasets introducing sub-sampling can help,
especially for the single-variable model. In smaller datasets, one might also want to experiment
with ``weigh_by_kurtosis`` and perhaps lower ``ndim``. If using ``prob_pick_pooled_gain``, models
are likely to benefit from deeper trees (controlled by ``max_depth``), but using large samples
and/or deeper trees can result in significantly slower model fitting and predictions - in such cases,
using ``min_gain`` (with a value like 0.25) with ``max_depth=None`` can offer a better speed/performance
trade-off than changing ``max_depth``.
Note
----
The default parameters will not scale to large datasets. In particular,
if the amount of data is large, it's suggested to set a smaller sample size for each tree (parameter ``sample_size``)
and to fit fewer of them (parameter ``ntrees``).
As well, the default option for 'missing_action' might slow things down significantly.
See the documentation of the parameters for more details.
These defaults can also result in very big model sizes in memory and as serialized
files (e.g. models that weight over 10GB) when the number of rows in the data is large.
Using fewer trees, smaller sample sizes, and shallower trees can help to reduce model
sizes if that becomes a problem.
Note
----
See the documentation of ``predict`` for some considerations when serving models generated through
this library.
Parameters
----------
sample_size : str "auto", int, float(0,1), or None
Sample size of the data sub-samples with which each binary tree will be built. If passing 'None', each
tree will be built using the full data. Recommended value in [1]_, [2]_, [3]_ is 256, while
the default value in the author's code in [5]_ is 'None' here.
If passing "auto", will use the full number of rows in the data, up to 10,000 (i.e.
will take 'sample_size=min(nrows(X), 10000)') **when calling fit**, and the full amount
of rows in the data **when calling the variants** ``fit_predict`` or ``fit_transform``.
If passing ``None``, will take the full number of rows in the data (no sub-sampling).
If passing a number between zero and one, will assume it means taking a sample size that represents
that proportion of the rows in the data.
Hint: seeing a distribution of scores which is on average too far below 0.5 could mean that the
model needs more trees and/or bigger samples to reach convergence (unless using non-random
splits, in which case the distribution is likely to be centered around a much lower number),
or that the distributions in the data are too skewed for random uniform splits.
ntrees : int
Number of binary trees to build for the model. Recommended value in [1]_ is 100, while the default value in the
author's code in [5]_ is 10. In general, the number of trees required for good results
is higher when (a) there are many columns, (b) there are categorical variables, (c) categorical variables have many
categories, (d) `ndim` is high, (e) ``prob_pick_pooled_gain`` is used.
Hint: seeing a distribution of scores which is on average too far below 0.5 could mean that the
model needs more trees and/or bigger samples to reach convergence (unless using non-random
splits, in which case the distribution is likely to be centered around a much lower number),
or that the distributions in the data are too skewed for random uniform splits.
ndim : int
Number of columns to combine to produce a split. If passing 1, will produce the single-variable model described
in [1]_ and [2]_, while if passing values greater than 1, will produce the extended model described in [3]_ and [4]_.
Recommended value in [4]_ is 2, while [3]_ recommends a low value such as 2 or 3. Models with values higher than 1
are referred hereafter as the extended model (as in [3]_).
Note that, when using ``ndim>1`` plus ``standardize_data=True``, the variables are standardized at
each step as suggested in [4]_, which makes the models slightly different than in [3]_.
ntry : int
In the extended model with non-random splits, how many random combinations to try for determining the best gain.
Only used when deciding splits by gain (see documentation for parameters 'prob_pick_avg_gain' and 'prob_pick_pooled_gain').
Recommended value in [4]_ is 10. Ignored for single-variable model.
categ_cols : None or array-like
Columns that hold categorical features, when the data is passed as an array or matrix.
Categorical columns should contain only integer values with a continuous numeration starting at zero,
with negative values and NaN taken as missing,
and the array or list passed here should correspond to the column numbers, with numeration starting
at zero. The maximum categorical value should not exceed 'INT_MAX' (typically :math:`2^{31}-1`).
This might be passed either at construction time or when calling ``fit`` or variations of ``fit``.
This is ignored when the input is passed as a ``DataFrame`` as then it will consider columns as
categorical depending on their dtype (see the documentation for ``fit`` for details).
max_depth : int, None, or str "auto"
Maximum depth of the binary trees to grow. If passing None, will build trees until each observation ends alone
in a terminal node or until no further split is possible. If using "auto", will limit it to the corresponding
depth of a balanced binary tree with number of terminal nodes corresponding to the sub-sample size (the reason
being that, if trying to detect outliers, an outlier will only be so if it turns out to be isolated with shorter average
depth than usual, which corresponds to a balanced tree depth). When a terminal node has more than 1 observation, the
remaining isolation depth for them is estimated assuming the data and splits are both uniformly random (separation depth
follows a similar process with expected value calculated as in [6]_). Default setting for [1]_, [2]_, [3]_, [4]_ is "auto",
but it's recommended to pass higher values if using the model for purposes other than outlier detection.
Note that models that use ``prob_pick_pooled_gain`` or ``prob_pick_avg_gain`` are likely to benefit from
deeper trees (larger ``max_depth``), but deeper trees can result in much slower model fitting and
predictions.
If using pooled gain, one might want to substitute ``max_depth`` with ``min_gain``.
ncols_per_tree : None, int, or float(0,1]
Number of columns to use (have as potential candidates for splitting at each iteration) in each tree,
somewhat similar to the 'mtry' parameter of random forests.
In general, this is only relevant when using non-random splits and/or weighting by kurtosis.
If passing a number between zero and one, will assume it means taking a sample size that represents
that proportion of the columns in the data. If passing exactly 1, will assume it means taking
100% of the columns rather than taking 1 column.
If passing ``None`` (the default) or zero, will use the full number of available columns.
prob_pick_avg_gain : float(0, 1)
* For the single-variable model (``ndim=1``), this parameter indicates the probability
of making each split by choosing a column and split point in that
same column as both the column and split point that gives the largest averaged gain (as proposed in [4]_) across
all available columns and possible splits in each column. Note that this implies evaluating every single column
in the sample data when this type of split happens, which will potentially make the model fitting much slower,
but has no impact on prediction time. For categorical variables, will take the expected standard deviation that
would be gotten if the column were converted to numerical by assigning to each category a random number ~ Unif(0, 1)
and calculate gain with those assumed standard deviations.
* For the extended model, this parameter indicates the probability that the
split point in the chosen linear combination of variables will be decided by this averaged gain criterion.
Compared to a pooled average, this tends to result in more cases in which a single observation or very few of them
are put into one branch. Recommended to use sub-samples (parameter 'sample_size') when passing this parameter.
Note that, since this will create isolated nodes faster, the resulting object will be lighter (use less memory).
When splits are
not made according to any of 'prob_pick_avg_gain', 'prob_pick_pooled_gain', 'prob_split_avg_gain',
'prob_split_pooled_gain', both the column and the split point are decided at random. Default setting for [1]_, [2]_, [3]_ is
zero, and default for [4]_ is 1. This is the randomization parameter that can be passed to the author's original code in [5]_,
but note that the code in [5]_ suffers from a mathematical error in the calculation of running standard deviations,
so the results from it might not match with this library's.
Note that, if passing a value of 1 (100%) with no sub-sampling and using the single-variable model, every single tree will have
the exact same splits.
Under this option, models are likely to produce better results when increasing ``max_depth``.
Important detail: if using either ``prob_pick_avg_gain`` or ``prob_pick_pooled_gain``, the distribution of
outlier scores is unlikely to be centered around 0.5.
prob_pick_pooled_gain : float(0, 1)
* For the single-variable model (``ndim=1``), this parameter indicates the probability
of making each split by choosing a column and split point in that
same column as both the column and split point that gives the largest pooled gain (as used in decision tree
classifiers such as C4.5 in [7]_) across all available columns and possible splits in each column. Note
that this implies evaluating every single column in the sample data when this type of split happens, which
will potentially make the model fitting much slower, but has no impact on prediction time. For categorical
variables, will use shannon entropy instead (like in [7]_).
* For the extended model, this parameter indicates the probability
that the split point in the chosen linear combination of variables will be decided by this pooled gain
criterion.
Compared to a simple average, this tends to result in more evenly-divided splits and more clustered
groups when they are smaller. Recommended to pass higher values when used for imputation of missing values.
When used for outlier detection, higher values of this parameter result in models that are able to better flag
outliers in the training data of each tree, but generalize poorly to outliers in new data (including
out-of-bag samples for each tree) and to values of variables
outside of the ranges from the training data. Passing small 'sample_size' and high values of this parameter will
tend to flag too many outliers.
Note that, since this makes the trees more even and thus it takes more steps to produce isolated nodes,
the resulting object will be heavier. When splits are not made according to any of 'prob_pick_avg_gain',
'prob_pick_pooled_gain', 'prob_split_avg_gain', 'prob_split_pooled_gain', both the column and the split point
are decided at random. Note that, if passing value 1 (100%) with no sub-sampling and using the single-variable model,
every single tree will have the exact same splits.
Be aware that ``penalize_range`` can also have a large impact when using ``prob_pick_pooled_gain``.
Under this option, models are likely to produce better results when increasing ``max_depth``.
Alternatively, one can also control the depth through ``min_gain`` (for which one might want to
set ``max_depth=None``).
Important detail: if using either ``prob_pick_avg_gain`` or ``prob_pick_pooled_gain``, the distribution of
outlier scores is unlikely to be centered around 0.5.
prob_split_avg_gain : float(0, 1)
Probability of making each split by selecting a column at random and determining the split point as
that which gives the highest averaged gain. Not supported for the extended model as the splits are on
linear combinations of variables. See the documentation for parameter 'prob_pick_avg_gain' for more details.
prob_split_pooled_gain : float(0, 1)
Probability of making each split by selecting a column at random and determining the split point as
that which gives the highest pooled gain. Not supported for the extended model as the splits are on
linear combinations of variables. See the documentation for parameter 'prob_pick_pooled_gain' for more details.
min_gain : float > 0
Minimum gain that a split threshold needs to produce in order to proceed with a split. Only used when the splits
are decided by a gain criterion (either pooled or averaged). If the highest possible gain in the evaluated
splits at a node is below this threshold, that node becomes a terminal node.
This can be used as a more sophisticated depth control when using pooled gain (note that ``max_depth``
still applies on top of this heuristic).
missing_action : str, one of "divide" (single-variable only), "impute", "fail", "auto"
How to handle missing data at both fitting and prediction time. Options are:
``"divide"``:
(For the single-variable model only, recommended) Will follow both branches and combine the result with the
weight given by the fraction of the data that went to each branch when fitting the model.
``"impute"``:
Will assign observations to the branch with the most observations in the single-variable model, or fill in
missing values with the median of each column of the sample from which the split was made in the extended
model (recommended for the extended model).
``"fail"``:
Will assume there are no missing values and will trigger undefined behavior if it encounters any.
``"auto"``:
Will use "divide" for the single-variable model and "impute" for the extended model.
In the extended model, infinite values will be treated as missing.
Passing "fail" will produce faster fitting and prediction times along with decreased
model object sizes.
Models from [1]_, [2]_, [3]_, [4]_ correspond to "fail" here.
new_categ_action : str, one of "weighted" (single-variable only), "impute" (extended only), "smallest", "random"
What to do after splitting a categorical feature when new data that reaches that split has categories that
the sub-sample from which the split was done did not have. Options are:
``"weighted"``:
(For the single-variable model only, recommended) Will follow both branches and combine the result with weight given
by the fraction of the data that went to each branch when fitting the model.
``"impute"``:
(For the extended model only, recommended) Will assign them the median value for that column that was added to the linear
combination of features.
``"smallest"``:
In the single-variable case will assign all observations with unseen categories in the split to the branch that had
fewer observations when fitting the model, and in the extended case will assign them the coefficient of the least
common category.
``"random"``:
Will assing a branch (coefficient in the extended model) at random for each category beforehand, even if no observations
had that category when fitting the model. Note that this can produce biased results when deciding
splits by a gain criterion.
Important: under this option, if the model is fitted to a ``DataFrame``, when calling ``predict``
on new data which contains new categories (unseen in the data to which the model was fitted),
they will be added to the model's state on-the-fly. This means that, if calling ``predict`` on data
which has new categories, there might be inconsistencies in the results if predictions are done in
parallel or if passing the same data in batches or with different row orders. It also means that
the ``predict`` function will not be thread-safe (e.g. cannot be used alongside ``joblib`` with a
backend that uses shared memory).
``"auto"``:
Will select "weighted" for the single-variable model and "impute" for the extended model.
Ignored when passing 'categ_split_type' = 'single_categ'.
categ_split_type : str, one of "subset" or "single_categ"
Whether to split categorical features by assigning sub-sets of them to each branch, or by assigning
a single category to a branch and the rest to the other branch. For the extended model, whether to
give each category a coefficient, or only one while the rest get zero.
all_perm : bool
When doing categorical variable splits by pooled gain with ``ndim=1`` (regular model),
whether to consider all possible permutations of variables to assign to each branch or not. If ``False``,
will sort the categories by their frequency and make a grouping in this sorted order. Note that the
number of combinations evaluated (if ``True``) is the factorial of the number of present categories in
a given column (minus 2). For averaged gain, the best split is always to put the second most-frequent
category in a separate branch, so not evaluating all permutations (passing ``False``) will make it
possible to select other splits that respect the sorted frequency order.
Ignored when not using categorical variables or not doing splits by pooled gain or using ``ndim > 1``.
coef_by_prop : bool
In the extended model, whether to sort the randomly-generated coefficients for categories
according to their relative frequency in the tree node. This might provide better results when using
categorical variables with too many categories, but is not recommended, and not reflective of
real "categorical-ness". Ignored for the regular model (``ndim=1``) and/or when not using categorical
variables.
recode_categ : bool
Whether to re-encode categorical variables even in case they are already passed
as ``pd.Categorical``. This is recommended as it will eliminate potentially redundant categorical levels if
they have no observations, but if the categorical variables are already of type ``pd.Categorical`` with only
the levels that are present, it can be skipped for slightly faster fitting times. You'll likely
want to pass ``False`` here if merging several models into one through ``append_trees``.
weights_as_sample_prob : bool
If passing sample (row) weights when fitting the model, whether to consider those weights as row
sampling weights (i.e. the higher the weights, the more likely the observation will end up included
in each tree sub-sample), or as distribution density weights (i.e. putting a weight of two is the same
as if the row appeared twice, thus higher weight makes it less of an outlier). Note that sampling weight
is only used when sub-sampling data for each tree, which is not the default in this implementation.
sample_with_replacement : bool
Whether to sample rows with replacement or not (not recommended). Note that distance calculations,
if desired, don't work well with duplicate rows.
penalize_range : bool
Whether to penalize (add -1 to the terminal depth) observations at prediction time that have a value
of the chosen split variable (linear combination in extended model) that falls outside of a pre-determined
reasonable range in the data being split (given by 2 * range in data and centered around the split point),
as proposed in [4]_ and implemented in the authors' original code in [5]_. Not used in single-variable model
when splitting by categorical variables.
It's recommended to turn this off for faster predictions on sparse CSC matrices.
Note that this can make a very large difference in the results when using ``prob_pick_pooled_gain``.
Be aware that this option can make the distribution of outlier scores a bit different
(i.e. not centered around 0.5)
standardize_data : bool
Whether to standardize the features at each node before creating alinear combination of them as suggested
in [4]_. This is ignored when using ``ndim=1``.
weigh_by_kurtosis : bool
Whether to weigh each column according to the kurtosis obtained in the sub-sample that is selected
for each tree as briefly proposed in [1]_. Note that this is only done at the beginning of each tree
sample, so if not using sub-samples, it's better to pass column weights calculated externally. For
categorical columns, will calculate expected kurtosis if the column was converted to numerical by
assigning to each category a random number ~ Unif(0, 1).
Note that when using sparse matrices, the calculation of kurtosis will rely on a procedure that
uses sums of squares and higher-power numbers, which has less numerical precision than the
calculation used for dense inputs, and as such, the results might differ slightly.
Using this option makes the model more likely to pick the columns that have anomalous values
when viewed as a 1-d distribution, and can bring a large improvement in some datasets.
coefs : str, one of "normal" or "uniform"
For the extended model, whether to sample random coefficients according to a normal distribution ~ N(0, 1)
(as proposed in [4]_) or according to a uniform distribution ~ Unif(-1, +1) as proposed in [3]_. Ignored for the
single-variable model. Note that, for categorical variables, the coefficients will be sampled ~ N (0,1)
regardless - in order for both types of variables to have transformations in similar ranges (which will tend
to boost the importance of categorical variables), pass ``"uniform"`` here.
assume_full_distr : bool
When calculating pairwise distances (see [8]_), whether to assume that the fitted model represents
a full population distribution (will use a standardizing criterion assuming infinite sample,
and the results of the similarity between two points at prediction time will not depend on the
prescence of any third point that is similar to them, but will differ more compared to the pairwise
distances between points from which the model was fit). If passing 'False', will calculate pairwise distances
as if the new observations at prediction time were added to the sample to which each tree was fit, which
will make the distances between two points potentially vary according to other newly introduced points.
This will not be assumed when the distances are calculated as the model is being fit (see documentation
for method 'fit_transform').
build_imputer : bool
Whether to construct missing-value imputers so that later this same model could be used to impute
missing values of new (or the same) observations. Be aware that this will significantly increase the memory
requirements and serialized object sizes. Note that this is not related to 'missing_action' as missing
values inside the model are treated differently and follow their own imputation or division strategy.
min_imp_obs : int
Minimum number of observations with which an imputation value can be produced. Ignored if passing
'build_imputer' = 'False'.
depth_imp : str, one of "higher", "lower", "same"
How to weight observations according to their depth when used for imputing missing values. Passing
"higher" will weigh observations higher the further down the tree (away from the root node) the
terminal node is, while "lower" will do the opposite, and "same" will not modify the weights according
to node depth in the tree. Implemented for testing purposes and not recommended to change
from the default. Ignored when passing 'build_imputer' = 'False'.
weigh_imp_rows : str, one of "inverse", "prop", "flat"
How to weight node sizes when used for imputing missing values. Passing "inverse" will weigh
a node inversely proportional to the number of observations that end up there, while "proportional"
will weight them heavier the more observations there are, and "flat" will weigh all nodes the same
in this regard regardless of how many observations end up there. Implemented for testing purposes
and not recommended to change from the default. Ignored when passing 'build_imputer' = 'False'.
random_seed : int
Seed that will be used for random number generation.
nthreads : int
Number of parallel threads to use. If passing a negative number, will use
the same formula as joblib does for calculating number of threads (which is
n_cpus + 1 + n_jobs - i.e. pass -1 to use all available threads). Note that, the more threads,
the more memory will be allocated, even if the thread does not end up being used.
Be aware that most of the operations are bound by memory bandwidth, which means that
adding more threads will not result in a linear speed-up. For some types of data
(e.g. large sparse matrices with small sample sizes), adding more threads might result
in only a very modest speed up (e.g. 1.5x faster with 4x more threads),
even if all threads look fully utilized.
n_estimators : None or int
Synonym for ``ntrees``, kept for better compatibility with scikit-learn.
max_samples : None or int
Synonym for ``sample_size``, kept for better compatibility with scikit-learn.
n_jobs : None or int
Synonym for ``nthreads``, kept for better compatibility with scikit-learn.
random_state : None, int, or RandomState
Synonym for ``random_seed``, kept for better compatibility with scikit-learn.
bootstrap : None or bool
Synonym for ``sample_with_replacement``, kept for better compatibility with scikit-learn.
Attributes
----------
cols_numeric_ : array(n_num_features,)
Array with the names of the columns that were taken as numerical
(Only when fitting the model to a DataFrame object).
cols_categ_ : array(n_categ_features,)
Array with the names of the columns that were taken as categorical
(Only when fitting the model to a DataFrame object).
is_fitted_ : bool
Indicator telling whether the model has been fit to data or not.
References
----------
.. [1] Liu, <NAME>, <NAME>, and <NAME>. "Isolation forest."
2008 Eighth IEEE International Conference on Data Mining. IEEE, 2008.
.. [2] Liu, <NAME>, <NAME>, and <NAME>. "Isolation-based anomaly detection."
ACM Transactions on Knowledge Discovery from Data (TKDD) 6.1 (2012): 3.
.. [3] Hariri, Sahand, <NAME>, and <NAME>. "Extended Isolation Forest."
arXiv preprint arXiv:1811.02141 (2018).
.. [4] Liu, <NAME>, <NAME>, and <NAME>. "On detecting clustered anomalies using SCiForest."
Joint European Conference on Machine Learning and Knowledge Discovery in Databases. Springer, Berlin, Heidelberg, 2010.
.. [5] https://sourceforge.net/projects/iforest/
.. [6] https://math.stackexchange.com/questions/3388518/expected-number-of-paths-required-to-separate-elements-in-a-binary-tree
.. [7] <NAME>. C4. 5: programs for machine learning. Elsevier, 2014.
.. [8] <NAME>. "Distance approximation using Isolation Forests."
arXiv preprint arXiv:1910.12362 (2019).
.. [9] <NAME>. "Imputing missing values with unsupervised random trees."
arXiv preprint arXiv:1911.06646 (2019).
.. [10] https://math.stackexchange.com/questions/3333220/expected-average-depth-in-random-binary-tree-constructed-top-to-bottom
"""
def __init__(self, sample_size = "auto", ntrees = 500, ndim = 3, ntry = 3,
categ_cols = None, max_depth = "auto", ncols_per_tree = None,
prob_pick_avg_gain = 0.0, prob_pick_pooled_gain = 0.0,
prob_split_avg_gain = 0.0, prob_split_pooled_gain = 0.0,
min_gain = 0., missing_action = "auto", new_categ_action = "auto",
categ_split_type = "subset", all_perm = False,
coef_by_prop = False, recode_categ = False,
weights_as_sample_prob = True, sample_with_replacement = False,
penalize_range = False, standardize_data = True, weigh_by_kurtosis = False,
coefs = "normal", assume_full_distr = True,
build_imputer = False, min_imp_obs = 3,
depth_imp = "higher", weigh_imp_rows = "inverse",
random_seed = 1, nthreads = -1,
n_estimators = None, max_samples = None,
n_jobs = None, random_state = None, bootstrap = None):
self.sample_size = sample_size
self.ntrees = ntrees
self.ndim = ndim
self.ntry = ntry
self.categ_cols = categ_cols
self.max_depth = max_depth
self.ncols_per_tree = ncols_per_tree
self.prob_pick_avg_gain = prob_pick_avg_gain
self.prob_pick_pooled_gain = prob_pick_pooled_gain
self.prob_split_avg_gain = prob_split_avg_gain
self.prob_split_pooled_gain = prob_split_pooled_gain
self.min_gain = min_gain
self.missing_action = missing_action
self.new_categ_action = new_categ_action
self.categ_split_type = categ_split_type
self.all_perm = all_perm
self.coef_by_prop = coef_by_prop
self.recode_categ = recode_categ
self.weights_as_sample_prob = weights_as_sample_prob
self.sample_with_replacement = sample_with_replacement
self.penalize_range = penalize_range
self.standardize_data = standardize_data
self.weigh_by_kurtosis = weigh_by_kurtosis
self.coefs = coefs
self.assume_full_distr = assume_full_distr
self.build_imputer = build_imputer
self.min_imp_obs = min_imp_obs
self.depth_imp = depth_imp
self.weigh_imp_rows = weigh_imp_rows
self.random_seed = random_seed
self.nthreads = nthreads
self.n_estimators = n_estimators
self.max_samples = max_samples
self.n_jobs = n_jobs
self.random_state = random_state
self.bootstrap = bootstrap
self._reset_obj()
def _init(self, categ_cols = None):
if categ_cols is not None:
if self.categ_cols is not None:
warnings.warn("Passed 'categ_cols' in constructor and fit method. Will take the latter.")
self.categ_cols = categ_cols
self._initialize_full(
sample_size = self.sample_size if (self.max_samples is None) else self.max_samples,
ntrees = self.ntrees if (self.n_estimators is None) else self.n_estimators,
ndim = self.ndim, ntry = self.ntry,
categ_cols = self.categ_cols,
max_depth = self.max_depth, ncols_per_tree = self.ncols_per_tree,
prob_pick_avg_gain = self.prob_pick_avg_gain, prob_pick_pooled_gain = self.prob_pick_pooled_gain,
prob_split_avg_gain = self.prob_split_avg_gain, prob_split_pooled_gain = self.prob_split_pooled_gain,
min_gain = self.min_gain, missing_action = self.missing_action, new_categ_action = self.new_categ_action,
categ_split_type = self.categ_split_type, all_perm = self.all_perm,
coef_by_prop = self.coef_by_prop, recode_categ = self.recode_categ,
weights_as_sample_prob = self.weights_as_sample_prob,
sample_with_replacement = self.sample_with_replacement if (self.bootstrap is None) else self.bootstrap,
penalize_range = self.penalize_range, standardize_data = self.standardize_data,
weigh_by_kurtosis = self.weigh_by_kurtosis,
coefs = self.coefs, assume_full_distr = self.assume_full_distr,
build_imputer = self.build_imputer, min_imp_obs = self.min_imp_obs,
depth_imp = self.depth_imp, weigh_imp_rows = self.weigh_imp_rows,
random_seed = self.random_seed if (self.random_state is None) else self.random_state,
nthreads = self.nthreads if (self.n_jobs is None) else self.n_jobs)
def _initialize_full(self, sample_size = None, ntrees = 500, ndim = 3, ntry = 3,
categ_cols = None, max_depth = "auto", ncols_per_tree = None,
prob_pick_avg_gain = 0.0, prob_pick_pooled_gain = 0.0,
prob_split_avg_gain = 0.0, prob_split_pooled_gain = 0.0,
min_gain = 0., missing_action = "auto", new_categ_action = "auto",
categ_split_type = "subset", all_perm = False,
coef_by_prop = False, recode_categ = True,
weights_as_sample_prob = True, sample_with_replacement = False,
penalize_range = True, standardize_data = True, weigh_by_kurtosis = False,
coefs = "normal", assume_full_distr = True,
build_imputer = False, min_imp_obs = 3,
depth_imp = "higher", weigh_imp_rows = "inverse",
random_seed = 1, nthreads = -1):
if (sample_size is not None) and (sample_size != "auto"):
assert sample_size > 0
if sample_size > 1:
assert isinstance(sample_size, int)
elif sample_size == 1:
sample_size = None
if ncols_per_tree is not None:
assert ncols_per_tree > 0
if ncols_per_tree > 1:
assert isinstance(ncols_per_tree, int)
elif ncols_per_tree == 1:
ncols_per_tree = None
assert ntrees > 0
assert isinstance(ntrees, int)
if (max_depth != "auto") and (max_depth is not None):
assert max_depth > 0
assert isinstance(max_depth, int)
if (sample_size is not None) and (sample_size != "auto"):
assert max_depth < sample_size
assert ndim >= 1
assert isinstance(ndim, int)
assert ntry >= 1
assert isinstance(ntry, int)
if isinstance(random_seed, np.random.RandomState):
random_seed = random_seed.randint(np.iinfo(np.int32).max)
if isinstance(random_seed, np.random.Generator):
random_seed = random_seed.integers(np.iinfo(np.int32).max)
random_seed = int(random_seed)
assert random_seed >= 0
assert isinstance(min_imp_obs, int)
assert min_imp_obs >= 1
assert missing_action in ["divide", "impute", "fail", "auto"]
assert new_categ_action in ["weighted", "smallest", "random", "impute", "auto"]
assert categ_split_type in ["single_categ", "subset"]
assert coefs in ["normal", "uniform"]
assert depth_imp in ["lower", "higher", "same"]
assert weigh_imp_rows in ["inverse", "prop", "flat"]
assert prob_pick_avg_gain >= 0
assert prob_pick_pooled_gain >= 0
assert prob_split_avg_gain >= 0
assert prob_split_pooled_gain >= 0
assert min_gain >= 0
s = prob_pick_avg_gain + prob_pick_pooled_gain + prob_split_avg_gain + prob_split_pooled_gain
if s > 1:
warnings.warn("Split type probabilities sum to more than 1, will standardize them")
prob_pick_avg_gain /= s
prob_pick_pooled_gain /= s
prob_split_avg_gain /= s
prob_split_pooled_gain /= s
if (ndim == 1) and ((sample_size is None) or (sample_size == "auto")) and ((prob_pick_avg_gain >= 1) or (prob_pick_pooled_gain >= 1)) and (not sample_with_replacement):
msg = "Passed parameters for deterministic single-variable splits"
msg += " with no sub-sampling. "
msg += "Every tree fitted will end up doing exactly the same splits. "
msg += "It's recommended to set 'prob_pick_avg_gain' < 1, 'prob_pick_pooled_gain' < 1, "
msg += "or to use the extended model (ndim > 1)."
warnings.warn(msg)
if missing_action == "auto":
if ndim == 1:
missing_action = "divide"
else:
missing_action = "impute"
if new_categ_action == "auto":
if ndim == 1:
new_categ_action = "weighted"
else:
new_categ_action = "impute"
if (build_imputer) and (missing_action == "fail"):
raise ValueError("Cannot impute missing values when passing 'missing_action' = 'fail'.")
if ndim == 1:
if (categ_split_type != "single_categ") and (new_categ_action == "impute"):
raise ValueError("'new_categ_action' = 'impute' not supported in single-variable model.")
else:
if (prob_split_avg_gain > 0) or (prob_split_pooled_gain > 0):
msg = "Non-zero values for 'prob_split_avg_gain' "
msg += "and 'prob_split_pooled_gain' not meaningful in "
msg += "extended model."
raise ValueError(msg)
if missing_action == "divide":
raise ValueError("'missing_action' = 'divide' not supported in extended model.")
if (categ_split_type != "single_categ") and (new_categ_action == "weighted"):
raise ValueError("'new_categ_action' = 'weighted' not supported in extended model.")
if (weigh_by_kurtosis) and (ndim == 1) and (prob_pick_pooled_gain + prob_split_avg_gain) >= 1:
msg = "'weigh_by_kurtosis' is incompatible with deterministic column selection"
msg += " ('prob_pick_pooled_gain' and ' prob_split_avg_gain'). Will be forced to 'False'."
warnings.warn(msg)
weigh_by_kurtosis = False
if nthreads is None:
nthreads = 1
elif nthreads < 0:
nthreads = multiprocessing.cpu_count() + 1 + nthreads
assert nthreads > 0
assert isinstance(nthreads, int)
if (nthreads > 1) and (not _get_has_openmp()):
msg_omp = "Attempting to use more than 1 thread, but "
msg_omp += "package was built without multi-threading "
msg_omp += "support - see the project's GitHub page for "
msg_omp += "more information."
warnings.warn(msg_omp)
if categ_cols is not None:
categ_cols = np.array(categ_cols).reshape(-1).astype(int)
categ_cols.sort()
self.sample_size = sample_size
self.ntrees = ntrees
self.ndim = ndim
self.ntry = ntry
self.categ_cols = categ_cols
self.max_depth = max_depth
self.ncols_per_tree = ncols_per_tree
self.prob_pick_avg_gain = prob_pick_avg_gain
self.prob_pick_pooled_gain = prob_pick_pooled_gain
self.prob_split_avg_gain = prob_split_avg_gain
self.prob_split_pooled_gain = prob_split_pooled_gain
self.min_gain = min_gain
self.missing_action = missing_action
self.new_categ_action = new_categ_action
self.categ_split_type = categ_split_type
self.coefs = coefs
self.depth_imp = depth_imp
self.weigh_imp_rows = weigh_imp_rows
self.min_imp_obs = min_imp_obs
self.random_seed = random_seed
self.nthreads = nthreads
self.all_perm = bool(all_perm)
self.recode_categ = bool(recode_categ)
self.coef_by_prop = bool(coef_by_prop)
self.weights_as_sample_prob = bool(weights_as_sample_prob)
self.sample_with_replacement = bool(sample_with_replacement)
self.penalize_range = bool(penalize_range)
self.standardize_data = bool(standardize_data)
self.weigh_by_kurtosis = bool(weigh_by_kurtosis)
self.assume_full_distr = bool(assume_full_distr)
self.build_imputer = bool(build_imputer)
self._reset_obj()
def _reset_obj(self):
self.cols_numeric_ = np.array([])
self.cols_categ_ = np.array([])
self._cat_mapping = list()
self._cat_max_lev = np.array([])
self._ncols_numeric = 0
self._ncols_categ = 0
self.is_fitted_ = False
self._ntrees = 0
self._cpp_obj = isoforest_cpp_obj()
self._is_extended_ = self.ndim > 1
def copy(self):
"""
Get a deep copy of this object
Returns
-------
copied : obj
A deep copy of this object
"""
if not self.is_fitted_:
self._cpp_obj = isoforest_cpp_obj()
return deepcopy(self)
else:
obj_restore = self._cpp_obj
obj_new = self._cpp_obj.deepcopy()
try:
self._cpp_obj = None
out = deepcopy(self)
finally:
self._cpp_obj = obj_restore
out._cpp_obj = obj_new
return out
def get_params(self, deep=True):
"""
Get parameters for this estimator.
Kept for compatibility with scikit-learn.
Parameters
----------
deep : bool
Ignored.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
import inspect
return {param.name:getattr(self, param.name) for param in inspect.signature(self.__init__).parameters.values()}
def set_params(self, **params):
"""
Set the parameters of this estimator.
Kept for compatibility with scikit-learn.
Note
----
Setting any parameter other than the number of threads will reset the model
- that is, if it was fitted to some data, the fitted model will be lost,
and it will need to be refitted before being able to make predictions.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : estimator instance
Estimator instance.
"""
if not (len(params) == 1 and ("nthreads" in params or "n_jobs" in params)):
self.is_fitted_ = False
valid_params = self.get_params(deep=False)
for k,v in params.items():
if k not in valid_params:
raise ValueError("Invalid parameter: ", k)
setattr(self, k, v)
return self
def __str__(self):
msg = ""
if self._is_extended_:
msg += "Extended "
msg += "Isolation Forest model"
if (self.prob_pick_avg_gain + self.prob_pick_pooled_gain) > 0 or \
(self.ndim == 1 and (self.prob_split_avg_gain + self.prob_split_pooled_gain) > 0):
msg += " (using guided splits)"
msg += "\n"
if self.ndim > 1:
msg += "Splitting by %d variables at a time\n" % self.ndim
if self.is_fitted_:
msg += "Consisting of %d trees\n" % self._ntrees
if self._ncols_numeric > 0:
msg += "Numeric columns: %d\n" % self._ncols_numeric
if self._ncols_categ:
msg += "Categorical columns: %d\n" % self._ncols_categ
return msg
def __repr__(self):
return self.__str__()
def _get_model_obj(self):
return self._cpp_obj.get_cpp_obj(self._is_extended_)
def _get_imputer_obj(self):
return self._cpp_obj.get_imputer()
def _check_can_use_imputer(self, X_cat):
if (self.build_imputer) and (self.ndim == 1) and (X_cat is not None) and (X_cat.shape[1]):
if (self.categ_split_type != "single_categ") and (self.new_categ_action == "weighted"):
raise ValueError("Cannot build imputer with 'ndim=1' + 'new_categ_action=weighted'.")
if self.missing_action == "divide":
raise ValueError("Cannot build imputer with 'ndim=1' + 'missing_action=divide'.")
def fit(self, X, y = None, sample_weights = None, column_weights = None, categ_cols = None):
"""
Fit isolation forest model to data
Parameters
----------
X : array or array-like (n_samples, n_features)
Data to which to fit the model. Can pass a NumPy array, Pandas DataFrame, or SciPy sparse CSC matrix.
If passing a DataFrame, will assume that columns are:
- Numeric, if their dtype is a subtype of NumPy's 'number' or 'datetime64'.
- Categorical, if their dtype is 'object', 'Categorical', or 'bool'. Note that,
if `Categorical` dtypes are ordered, the order will be ignored here.
Other dtypes are not supported.
Note that, if passing NumPy arrays, they are used in column-major order (a.k.a. "Fortran arrays"),
and if they are not already in column-major format, will need to create a copy of the data.
y : None
Not used. Kept as argument for compatibility with SciKit-learn pipelining.
sample_weights : None or array(n_samples,)
Sample observation weights for each row of 'X', with higher weights indicating either higher sampling
probability (i.e. the observation has a larger effect on the fitted model, if using sub-samples), or
distribution density (i.e. if the weight is two, it has the same effect of including the same data
point twice), according to parameter 'weights_as_sample_prob' in the model constructor method.
column_weights : None or array(n_features,)
Sampling weights for each column in 'X'. Ignored when picking columns by deterministic criterion.
If passing None, each column will have a uniform weight. Cannot be used when weighting by kurtosis.
categ_cols : None or array-like
Columns that hold categorical features, when the data is passed as an array or matrix.
Categorical columns should contain only integer values with a continuous numeration starting at zero,
with negative values and NaN taken as missing,
and the array or list passed here should correspond to the column numbers, with numeration starting
at zero. The maximum categorical value should not exceed 'INT_MAX' (typically :math:`2^{31}-1`).
This might be passed either at construction time or when calling ``fit`` or variations of ``fit``.
This is ignored when the input is passed as a ``DataFrame`` as then it will consider columns as
categorical depending on their dtype.
Returns
-------
self : obj
This object.
"""
self._init(categ_cols)
if (
self.sample_size is None
and (sample_weights is not None)
and (self.weights_as_sample_prob)
):
raise ValueError("Sampling weights are only supported when using sub-samples for each tree.")
if column_weights is not None and self.weigh_by_kurtosis:
raise ValueError("Cannot pass column weights when weighting columns by kurtosis.")
self._reset_obj()
X_num, X_cat, ncat, sample_weights, column_weights, nrows = self._process_data(X, sample_weights, column_weights)
self._check_can_use_imputer(X_cat)
if self.sample_size is None:
sample_size = nrows
elif self.sample_size == "auto":
sample_size = min(nrows, 10000)
if (sample_weights is not None) and (self.weights_as_sample_prob):
raise ValueError("Sampling weights are only supported when using sub-samples for each tree.")
elif self.sample_size <= 1:
sample_size = int(np.ceil(self.sample_size * nrows))
if sample_size < 2:
raise ValueError("Sampling proportion amounts to a single row or less.")
else:
sample_size = self.sample_size
if self.max_depth == "auto":
max_depth = 0
limit_depth = True
elif self.max_depth is None:
max_depth = nrows - 1
limit_depth = False
else:
max_depth = self.max_depth
limit_depth = False
if self.ncols_per_tree is None:
ncols_per_tree = 0
elif self.ncols_per_tree <= 1:
ncols_tot = 0
if X_num is not None:
ncols_tot += X_num.shape[1]
if X_cat is not None:
ncols_tot += X_cat.shape[1]
ncols_per_tree = int(np.ceil(self.ncols_per_tree * ncols_tot))
else:
ncols_per_tree = self.ncols_per_tree
if isinstance(self.random_state, np.random.RandomState):
seed = self.random_state.randint(np.iinfo(np.int32).max)
else:
seed = self.random_seed
self._cpp_obj.fit_model(_get_num_dtype(X_num, sample_weights, column_weights),
_get_int_dtype(X_num),
X_num, X_cat, ncat, sample_weights, column_weights,
ctypes.c_size_t(nrows).value,
ctypes.c_size_t(self._ncols_numeric).value,
ctypes.c_size_t(self._ncols_categ).value,
ctypes.c_size_t(self.ndim).value,
ctypes.c_size_t(self.ntry).value,
self.coefs,
ctypes.c_bool(self.coef_by_prop).value,
ctypes.c_bool(self.sample_with_replacement).value,
ctypes.c_bool(self.weights_as_sample_prob).value,
ctypes.c_size_t(sample_size).value,
ctypes.c_size_t(self.ntrees).value,
ctypes.c_size_t(max_depth).value,
ctypes.c_size_t(ncols_per_tree).value,
ctypes.c_bool(limit_depth).value,
ctypes.c_bool(self.penalize_range).value,
ctypes.c_bool(self.standardize_data).value,
ctypes.c_bool(False).value,
ctypes.c_bool(False).value,
ctypes.c_bool(False).value,
ctypes.c_bool(False).value,
ctypes.c_bool(False).value,
ctypes.c_bool(self.weigh_by_kurtosis).value,
ctypes.c_double(self.prob_pick_avg_gain).value,
ctypes.c_double(self.prob_split_avg_gain).value,
ctypes.c_double(self.prob_pick_pooled_gain).value,
ctypes.c_double(self.prob_split_pooled_gain).value,
ctypes.c_double(self.min_gain).value,
self.missing_action,
self.categ_split_type,
self.new_categ_action,
ctypes.c_bool(self.build_imputer).value,
ctypes.c_size_t(self.min_imp_obs).value,
self.depth_imp,
self.weigh_imp_rows,
ctypes.c_bool(self.build_imputer).value,
ctypes.c_bool(False).value,
ctypes.c_uint64(seed).value,
ctypes.c_int(self.nthreads).value)
self.is_fitted_ = True
self._ntrees = self.ntrees
return self
def fit_predict(self, X, column_weights = None, output_outlierness = "score",
output_distance = None, square_mat = False, output_imputed = False,
categ_cols = None):
"""
Fit the model in-place and produce isolation or separation depths along the way
See the documentation of other methods ('init', 'fit', 'predict', 'predict_distance')
for details.
Note
----
The data must NOT contain any duplicate rows.
Note
----
This function will be faster at predicting average depths than calling 'fit' + 'predict'
separately when using full row samples.
Note
----
If using 'penalize_range' = 'True', the resulting scores/depths from this function might differ a bit
from those of 'fit' + 'predict' ran separately.
Note
----
Sample weights are not supported for this method.
Note
----
When using multiple threads, there can be small differences in the predicted scores or
average depth or separation/distance between runs due to roundoff error.
Parameters
----------
X : array or array-like (n_samples, n_features)
Data to which to fit the model. Can pass a NumPy array, Pandas DataFrame, or SciPy sparse CSC matrix.
If passing a DataFrame, will assume that columns are:
- Numeric, if their dtype is a subtype of NumPy's 'number' or 'datetime64'.
- Categorical, if their dtype is 'object', 'Categorical', or 'bool'. Note that,
if `Categorical` dtypes are ordered, the order will be ignored here.
Other dtypes are not supported.
column_weights : None or array(n_features,)
Sampling weights for each column in 'X'. Ignored when picking columns by deterministic criterion.
If passing None, each column will have a uniform weight. Cannot be used when weighting by kurtosis.
Note that, if passing a DataFrame with both numeric and categorical columns, the column names must
not be repeated, otherwise the column weights passed here will not end up matching.
output_outlierness : None or str in ["score", "avg_depth"]
Desired type of outlierness output. If passing "score", will output standardized outlier score.
If passing "avg_depth" will output average isolation depth without standardizing.
If passing 'None', will skip outlierness calculations.
output_distance : None or str in ["dist", "avg_sep"]
Type of distance output to produce. If passing "dist", will standardize the average separation
depths. If passing "avg_sep", will output the average separation depth without standardizing it
(note that lower separation depth means furthest distance). If passing 'None', will skip distance calculations.
square_mat : bool
Whether to produce a full square matrix with the distances. If passing 'False', will output
only the upper triangular part as a 1-d array in which entry (i,j) with 0 <= i < j < n is located at
position p(i,j) = (i * (n - (i+1)/2) + j - i - 1).
Ignored when passing 'output_distance' = 'None'.
output_imputed : bool
Whether to output the data with imputed missing values. Model object must have been initialized
with 'build_imputer' = 'True'.
categ_cols : None or array-like
Columns that hold categorical features, when the data is passed as an array or matrix.
Categorical columns should contain only integer values with a continuous numeration starting at zero,
with negative values and NaN taken as missing,
and the array or list passed here should correspond to the column numbers, with numeration starting
at zero. The maximum categorical value should not exceed 'INT_MAX' (typically :math:`2^{31}-1`).
This might be passed either at construction time or when calling ``fit`` or variations of ``fit``.
This is ignored when the input is passed as a ``DataFrame`` as then it will consider columns as
categorical depending on their dtype.
Returns
-------
output : array(n_samples,), or dict
Requested outputs about isolation depth (outlierness), pairwise separation depth (distance), and/or
imputed missing values. If passing either 'output_distance' or 'output_imputed', will return a dictionary
with keys "pred" (array(n_samples,)), "dist" (array(n_samples * (n_samples - 1) / 2,) or array(n_samples, n_samples)),
"imputed" (array-like(n_samples, n_columns)), according to whether each output type is present.
"""
self._init(categ_cols)
if (self.sample_size is not None) and (self.sample_size != "auto"):
raise ValueError("Cannot use 'fit_predict' when the sample size is limited.")
if self.sample_with_replacement:
raise ValueError("Cannot use 'fit_predict' or 'fit_transform' when sampling with replacement.")
if column_weights is not None and self.weigh_by_kurtosis:
raise ValueError("Cannot pass column weights when weighting columns by kurtosis.")
if (output_outlierness is None) and (output_distance is None):
raise ValueError("Must pass at least one of 'output_outlierness' or 'output_distance'.")
if output_outlierness is not None:
assert output_outlierness in ["score", "avg_depth"]
if output_distance is not None:
assert output_distance in ["dist", "avg_sep"]
if output_imputed:
if self.missing_action == "fail":
raise ValueError("Cannot impute missing values when using 'missing_action' = 'fail'.")
if not self.build_imputer:
msg = "Trying to impute missing values from object "
msg += "that was initialized with 'build_imputer' = 'False' "
msg += "- will force 'build_imputer' to 'True'."
warnings.warn(msg)
self.build_imputer = True
self._reset_obj()
X_num, X_cat, ncat, sample_weights, column_weights, nrows = self._process_data(X, None, column_weights)
self._check_can_use_imputer(X_cat)
if (output_imputed) and (issparse(X_num)):
msg = "Imputing missing values from CSC matrix on-the-fly can be very slow, "
msg += "it's recommended if possible to fit the model first and then pass the "
msg += "same matrix as CSR to 'transform'."
warnings.warn(msg)
if self.max_depth == "auto":
max_depth = 0
limit_depth = True
elif self.max_depth is None:
max_depth = nrows - 1
else:
max_depth = self.max_depth
limit_depth = False
if self.ncols_per_tree is None:
ncols_per_tree = 0
elif self.ncols_per_tree <= 1:
ncols_tot = 0
if X_num is not None:
ncols_tot += X_num.shape[1]
if X_cat is not None:
ncols_tot += X_cat.shape[1]
ncols_per_tree = int(np.ceil(self.ncols_per_tree * ncols_tot))
else:
ncols_per_tree = self.ncols_per_tree
if isinstance(self.random_state, np.random.RandomState):
seed = self.random_state.randint(np.iinfo(np.int32).max)
else:
seed = self.random_seed
depths, tmat, dmat, X_num, X_cat = self._cpp_obj.fit_model(_get_num_dtype(X_num, None, column_weights),
_get_int_dtype(X_num),
X_num, X_cat, ncat, None, column_weights,
ctypes.c_size_t(nrows).value,
ctypes.c_size_t(self._ncols_numeric).value,
ctypes.c_size_t(self._ncols_categ).value,
ctypes.c_size_t(self.ndim).value,
ctypes.c_size_t(self.ntry).value,
self.coefs,
ctypes.c_bool(self.coef_by_prop).value,
ctypes.c_bool(self.sample_with_replacement).value,
ctypes.c_bool(self.weights_as_sample_prob).value,
ctypes.c_size_t(nrows).value,
ctypes.c_size_t(self.ntrees).value,
ctypes.c_size_t(max_depth).value,
ctypes.c_size_t(ncols_per_tree).value,
ctypes.c_bool(limit_depth).value,
ctypes.c_bool(self.penalize_range).value,
ctypes.c_bool(self.standardize_data).value,
ctypes.c_bool(output_distance is not None).value,
ctypes.c_bool(output_distance == "dist").value,
ctypes.c_bool(square_mat).value,
ctypes.c_bool(output_outlierness is not None).value,
ctypes.c_bool(output_outlierness == "score").value,
ctypes.c_bool(self.weigh_by_kurtosis).value,
ctypes.c_double(self.prob_pick_avg_gain).value,
ctypes.c_double(self.prob_split_avg_gain).value,
ctypes.c_double(self.prob_pick_pooled_gain).value,
ctypes.c_double(self.prob_split_pooled_gain).value,
ctypes.c_double(self.min_gain).value,
self.missing_action,
self.categ_split_type,
self.new_categ_action,
ctypes.c_bool(self.build_imputer).value,
ctypes.c_size_t(self.min_imp_obs).value,
self.depth_imp,
self.weigh_imp_rows,
ctypes.c_bool(output_imputed).value,
ctypes.c_bool(self.all_perm).value,
ctypes.c_uint64(seed).value,
ctypes.c_int(self.nthreads).value)
self.is_fitted_ = True
self._ntrees = self.ntrees
if (not output_distance) and (not output_imputed):
return depths
else:
outp = {"pred" : depths}
if output_distance:
if square_mat:
outp["dist"] = dmat
else:
outp["dist"] = tmat
if output_imputed:
outp["imputed"] = self._rearrange_imputed(X, X_num, X_cat)
return outp
def _process_data(self, X, sample_weights, column_weights):
### TODO: this needs a refactoring after introducing 'categ_cols'
if X.__class__.__name__ == "DataFrame":
if self.categ_cols is not None:
warnings.warn("'categ_cols' is ignored when passing a DataFrame as input.")
self.categ_cols = None
### https://stackoverflow.com/questions/25039626/how-do-i-find-numeric-columns-in-pandas
X_num = X.select_dtypes(include = [np.number, np.datetime64]).to_numpy()
if X_num.dtype not in [ctypes.c_double, ctypes.c_float]:
X_num = X_num.astype(ctypes.c_double)
if not _is_col_major(X_num):
X_num = np.asfortranarray(X_num)
X_cat = X.select_dtypes(include = [pd.CategoricalDtype, "object", "bool"])
if (X_num.shape[1] + X_cat.shape[1]) == 0:
raise ValueError("Input data has no columns of numeric or categorical type.")
elif (X_num.shape[1] + X_cat.shape[1]) < X.shape[1]:
cols_num = np.array(X.select_dtypes(include = [np.number, np.datetime64]).columns.values)
cols_cat =
|
np.array(X_cat.columns.values)
|
numpy.array
|
"""ROSS plotting module.
This module returns graphs for each type of analyses in rotor_assembly.py.
"""
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
import scipy.linalg as la
from plotly.subplots import make_subplots
from scipy import interpolate
# set Plotly palette of colors
colors1 = px.colors.qualitative.Dark24
class ModalResults:
"""Class used to store results and provide plots for Modal Analysis.
Two options for plottting are available: plot_mode3D (mode shape 3D view)
and plot_mode2D (mode shape 2D view). The user chooses between them using
the respective methods.
Parameters
----------
speed : float
Rotor speed.
evalues : array
Eigenvalues array.
evectors : array
Eigenvectors array.
wn : array
Undamped natural frequencies array.
wd : array
Damped natural frequencies array.
log_dec : array
Logarithmic decrement for each .
damping_ratio : array
Damping ratio for each mode.
lti : StateSpaceContinuous
Space State Continuos with A, B, C and D matrices.
ndof : int
Number of degrees of freedom.
nodes : list
List of nodes number.
nodes_pos : list
List of nodes positions.
shaft_elements_length : list
List with Rotor shaft elements lengths.
"""
def __init__(
self,
speed,
evalues,
evectors,
wn,
wd,
damping_ratio,
log_dec,
lti,
ndof,
nodes,
nodes_pos,
shaft_elements_length,
):
self.speed = speed
self.evalues = evalues
self.evectors = evectors
self.wn = wn
self.wd = wd
self.damping_ratio = damping_ratio
self.log_dec = log_dec
self.lti = lti
self.ndof = ndof
self.nodes = nodes
self.nodes_pos = nodes_pos
self.shaft_elements_length = shaft_elements_length
self.modes = self.evectors[: self.ndof]
kappa_modes = []
for mode in range(len(self.wn)):
kappa_color = []
kappa_mode = self.kappa_mode(mode)
for kappa in kappa_mode:
kappa_color.append("blue" if kappa > 0 else "red")
kappa_modes.append(kappa_color)
self.kappa_modes = kappa_modes
@staticmethod
def whirl(kappa_mode):
"""Evaluate the whirl of a mode.
Parameters
----------
kappa_mode : list
A list with the value of kappa for each node related
to the mode/natural frequency of interest.
Returns
-------
whirldir : str
A string indicating the direction of precession related to the
kappa_mode.
Example
-------
>>> kappa_mode = [-5.06e-13, -3.09e-13, -2.91e-13, 0.011, -4.03e-13, -2.72e-13, -2.72e-13]
>>> ModalResults.whirl(kappa_mode)
'Forward'
"""
if all(kappa >= -1e-3 for kappa in kappa_mode):
whirldir = "Forward"
elif all(kappa <= 1e-3 for kappa in kappa_mode):
whirldir = "Backward"
else:
whirldir = "Mixed"
return whirldir
@staticmethod
@np.vectorize
def whirl_to_cmap(whirl):
"""Map the whirl to a value.
Parameters
----------
whirl: string
A string indicating the whirl direction related to the kappa_mode
Returns
-------
An array with reference index for the whirl direction
Example
-------
>>> whirl = 'Backward'
>>> whirl_to_cmap(whirl)
array(1.)
"""
if whirl == "Forward":
return 0.0
elif whirl == "Backward":
return 1.0
elif whirl == "Mixed":
return 0.5
def H_kappa(self, node, w, return_T=False):
r"""Calculate the H matrix for a given node and natural frequency.
The matrix H contains information about the whirl direction,
the orbit minor and major axis and the orbit inclination.
The matrix is calculated by :math:`H = T.T^T` where the
matrix T is constructed using the eigenvector corresponding
to the natural frequency of interest:
.. math::
:nowrap:
\begin{eqnarray}
\begin{bmatrix}
u(t)\\
v(t)
\end{bmatrix}
= \mathfrak{R}\Bigg(
\begin{bmatrix}
r_u e^{j\eta_u}\\
r_v e^{j\eta_v}
\end{bmatrix}\Bigg)
e^{j\omega_i t}
=
\begin{bmatrix}
r_u cos(\eta_u + \omega_i t)\\
r_v cos(\eta_v + \omega_i t)
\end{bmatrix}
= {\bf T}
\begin{bmatrix}
cos(\omega_i t)\\
sin(\omega_i t)
\end{bmatrix}
\end{eqnarray}
Where :math:`r_u e^{j\eta_u}` e :math:`r_v e^{j\eta_v}` are the
elements of the *i*\th eigenvector, corresponding to the node and
natural frequency of interest (mode).
.. math::
{\bf T} =
\begin{bmatrix}
r_u cos(\eta_u) & -r_u sin(\eta_u)\\
r_u cos(\eta_u) & -r_v sin(\eta_v)
\end{bmatrix}
Parameters
----------
node: int
Node for which the matrix H will be calculated.
w: int
Index corresponding to the natural frequency
of interest.
return_T: bool, optional
If True, returns the H matrix and a dictionary with the
values for :math:`r_u, r_v, \eta_u, \eta_v`.
Default is false.
Returns
-------
H: array
Matrix H.
Tdic: dict
Dictionary with values for :math:`r_u, r_v, \eta_u, \eta_v`.
It will be returned only if return_T is True.
"""
# get vector of interest based on freqs
vector = self.evectors[4 * node : 4 * node + 2, w]
# get translation sdofs for specified node for each mode
u = vector[0]
v = vector[1]
ru = np.absolute(u)
rv = np.absolute(v)
nu = np.angle(u)
nv = np.angle(v)
# fmt: off
T = np.array([[ru * np.cos(nu), -ru *
|
np.sin(nu)
|
numpy.sin
|
import sys, getopt
import numpy as np
import datetime
import locale
import platform
import time
import getpass
from random import randrange
from matplotlib import pyplot as plt
from matplotlib import ticker
from matplotlib import pylab
from pylab import *
from jira import JIRA
# prompt for Jira username
username = input("Please enter your Jira username: ")
# prompt for Jira password
password = getpass.getpass(prompt="Please enter your Jira password: ")
# Note that you'll need to have numpy, matplotlib, pylab, and jira packages installed (all are available on pip)
if platform.system() == 'Windows':
locale.setlocale( locale.LC_MONETARY, 'fr-FR' )
else:
locale.setlocale( locale.LC_ALL, 'en_IE.UTF-8' )
def mybar(ax,x1,x2,y2):
Xbars = [[0., .3],[.7,.4]]
left,right = x1,x2
bottom,top = 0.0,y2
## ax.imshow(Xbars, interpolation='bicubic', cmap=cm.Blues,
## extent=(left, right, bottom, top), alpha=1)
return
# Format the date to the proper form of year, month, day
def format_date(x, pos=None):
# return pl.num2date(x).strftime('%Y-%m-%d')
return pylab.num2date(x).strftime('%b, %Y')
# Use a sorting method of the "data" to find the total cost at a specified confidence level "breakfraction"
def percentage(data,breakfraction):
breaknumber = int(breakfraction * len(data))
# data is a list of total costs for each iteration, sort from lowest cost to highest cost
data.sort() # sorts input from lowest to highest value
return data[breaknumber]
def xstr(s): # this is just to handle converting lists to strings when there might be some empty values
if s is None:
return ''
return str(s)
def montecarlorisk(num_trials,annual_escalation,subsystem,output_file):
## define output location; if variable output_file is true then output goes to test.txt in working directory
fhold = sys.stdout
if output_file:
f = open('./test.txt', 'w')
sys.stdout = f
#########################################################################################
###################### Some basic values ###############################
#########################################################################################
total_contingency = 230694.0 # total contingency in K EUR TBD !!!!
nyears = 7 ## number of years with construction activity
date_start = "2021-07-01"
date_end = "2028-07-31"
date_commissioning_start = "2028-08-01"
date_base_year = "2021"
date_year_start = "2021"
date_year_end = "2028"
annual_esc = 1.0 + annual_escalation # convert annual fractional escalation to factor
yer = ['2021','2022','2023','2024','2025','2026','2027','2028']
final_totals_distribution = []
#cost_lowest = np.zeros(1000)
#cost_expected = np.zeros(1000)
#cost_highest = np.zeros(1000)
subsystem = subsystem.upper()
if subsystem == 'ALL':
fundingstring = " "
projectname = "SKA"
elif subsystem == 'MID':
fundingstring = " AND component = 'MID' "
projectname = 'MID'
elif subsystem == 'LOW':
fundingstring = " AND component = 'LOW' "
projectname = 'LOW'
elif subsystem == 'OCS':
fundingstring = " AND component = 'OCS' "
projectname = 'OCS'
elif subsystem == 'PM':
fundingstring = " AND component = 'PM' "
projectname = 'PM'
##############################################################################
################### Simple escalation model
##############################################################################
escalate = array(10)
sum = 0.0
escalate = {} # a dictionary
escalate[date_base_year] = 1.0
for jj in range(nyears):
escalate[yer[jj+1]] = escalate[yer[jj]] * annual_esc
sum += escalate[yer[jj+1]]
escalate['dist_sum'] = sum/nyears
server = "https://jira.skatelescope.org"
auth_inf = (username,password)
try:
jira = JIRA(server=server,basic_auth=auth_inf)
except:
print("ERROR: Jira authentication failed. Have you provided the correct username and password?")
return
# AND (cf[12916] is EMPTY OR cf[12916] ='False')
query = "project=RM AND issuetype='RM-Risk' AND status in ('Active Risk/Opportunity','Subordinated') " + fundingstring + "ORDER BY cf[12933]"
fields="components,summary,customfield_12926,customfield_12901,customfield_12905,customfield_12915,customfield_12933,customfield_12936,customfield_12938,description"
print(('\n\r Query to database \n\r\n\r'+ query +'\n\r'))
issues = jira.search_issues(query,maxResults=None,fields=fields)
nrisks = len(issues)
rows=[]
mean_prob_lookup = {'2%':0.02,
'5%':0.05,
'10%':0.1,
'25%':0.25,
'50%':0.5,
'80%':0.8}
rows=[]
for i in range(len(issues)):
rows.append({'riskid':int(''.join([i for i in issues[i].key if i.isdigit()])),
'projectsystem':xstr(issues[i].fields.components[0].name),
'current_probability':xstr(issues[i].fields.customfield_12926), #map from 13200
'current_expense_expected':(float(issues[i].fields.customfield_12901) if issues[i].fields.customfield_12901 else 0.0), #map from 13404
'current_schedule_cost_expected':(float(issues[i].fields.customfield_12905) if issues[i].fields.customfield_12905 else 0.0), #map from 13606
'meanprobability':mean_prob_lookup[issues[i].fields.customfield_12926.value], #map from 13200
'total_cost':0.0,
'obligationmodel':xstr(issues[i].fields.customfield_12915), #map from 13107
'triggerdate':(datetime.datetime.strptime(issues[i].fields.customfield_12933,'%Y-%m-%d').date() if issues[i].fields.customfield_12933 else datetime.date(2000,1,1)), #map from 13108
'randomtrigger':(int(issues[i].fields.customfield_12938) if issues[i].fields.customfield_12938 else 0), #map from 13110
'risktitle':xstr(issues[i].fields.summary),
'riskdescription':xstr(issues[i].fields.description),
'randomperiod':xstr(issues[i].fields.customfield_12936) }) # map from 13111
# setup lists
nyears=[1 for i in range(nrisks)]
riskheader = [' ' for i in range(20000)]
riskid=[] # issue.key
projectsystem=[] # issue.fields.components
current_probability=[] # issue.fields.customfield_12926
current_expense_expected=[] # issue.fields.customfield_12901
current_schedule_cost_expected=[] # issue.fields.customfield_12905
meanprobability=[] # calculate from cf 12926
total_cost=[] # issue.fields.customfield_12905 + issue.customfield_12901
obligationmodel=[] # issue.fields.customfield_12915
triggerdate=[] # issue.fields.customfield_12933
randomtrigger=[] # issue.fields.customfield_12936 and issue.customfield_12938
risktitle=[] # issue.fields.summary
riskdescription = [] # issue.fields.description
randomperiod = []
## Rule 0 - Accept all risks, simple passthrough
## print "\n\r Rule 1 - Accept only risks that have total cost of more than €1M \n\r"
## print "\n\r Rule 2 - Accept only risks that have expected exposure of more that €200K \n\r"
## print "\n\r Rule 3 - Accept risks that pass Rule 1 OR Rule 2 \n\r"
## Store the database values into arrays
print('\n\r Summary of risks ordered by triggerdate \n\r\n\r')
for ii in range(nrisks):
lasttotalcost = (float(rows[ii]['current_expense_expected'])+float(rows[ii]['current_schedule_cost_expected']))
##############################################################################
################### Use simple model of escalation to convert to as-spent dollars
##############################################################################
if rows[ii]['obligationmodel'] == "trigger" :
yr = rows[ii]['triggerdate'].year
yr = max(int(date_year_start),int(yr))
yr = min(int(date_year_end),int(yr))
lasttotalcost = lasttotalcost * escalate[str(yr)]
else:
lasttotalcost = lasttotalcost * escalate['dist_sum']
##############################################################################
if lasttotalcost >= 0.00:
## print("\n\r Rule 0 - Accept all risks, simple passthrough \n\r")
## Rule 1 - Accept only risks that have total cost of more than €1M
## if lasttotalcost >= 1000.00:
## Rule 2 - Accept only risks that have expected exposure of more that €200K
## if float(rows[ii]['meanprobability'])*lasttotalcost >= 200.0:
## Rule 3 - Accept risks that pass Rule 1 OR Rule 2
## if float(rows[ii]['meanprobability'])*lasttotalcost >= 200.0 or lasttotalcost >= 1000.00:
riskid.append(rows[ii]['riskid'])
projectsystem.append(rows[ii]['projectsystem'])
current_probability.append(rows[ii]['current_probability'])
current_expense_expected.append(rows[ii]['current_expense_expected'])
current_schedule_cost_expected.append(rows[ii]['current_schedule_cost_expected'])
meanprobability.append(float(rows[ii]['meanprobability']))
obligationmodel.append(rows[ii]['obligationmodel'])
triggerdate.append(rows[ii]['triggerdate'])
randomtrigger.append(rows[ii]['randomtrigger'])
risktitle.append(rows[ii]['risktitle'])
riskdescription.append(rows[ii]['riskdescription'])
total_cost.append(lasttotalcost)
randomperiod.append(rows[ii]['randomperiod'])
## Print formatted output
print('{:>30} RM-{:4} {:>10} {:>22} {:>5} [{:>8.2f} {:>8.2f}] {:>8.2f} {:40} {:80}'.format(
rows[ii]['projectsystem'],
str(rows[ii]['riskid']),
str(rows[ii]['triggerdate']),
#rows[ii]['obligationmodel'][0:4],
rows[ii]['obligationmodel'],
#rows[ii]['randomtrigger'] % 1000,
rows[ii]['randomtrigger'],
lasttotalcost,
rows[ii]['meanprobability'],
float(rows[ii]['meanprobability'])*lasttotalcost,
str(rows[ii]['risktitle']),
str(rows[ii]['riskdescription']),
))
nrisks = len(riskid)
## Print risks ordered by riskid
print(('\n\r Summary of {:>3} risks ordered by riskid \n\r\n\r'.format(str(nrisks))))
hold_riskid,hold_projectsystem,hold_risktitle = (list(t) for t in zip(*sorted(zip(riskid,projectsystem,risktitle))))
for ii in range(nrisks):
print('{:>30} RM-{:3} {:40}'.format( hold_projectsystem[ii],str(hold_riskid[ii]),hold_risktitle[ii]))
## Print risk description ordered by totalcost
print(('\n\r Summary of {:>3} risks ordered by totalcost \n\r\n\r'.format(str(nrisks))))
hold_total_cost,hold_riskdescription,hold_projectsystem,hold_riskid,hold_meanprobability = (list(t) for t in zip(*sorted(zip(total_cost,riskdescription,projectsystem,riskid,meanprobability), reverse=True)))
for ii in range(nrisks):
print('{:>30} RM-{:3} €{:8,.7}K [{:<4}] {:<100}'.format( hold_projectsystem[ii],str(hold_riskid[ii]),hold_total_cost[ii],hold_meanprobability[ii],hold_riskdescription[ii]))
## Figure 4
## Interaction loop over risks. Also, plot fig 4 with the risk spend curve
max_hold = 0.0
fig4 = plt.figure(4)
ax1 = fig4.add_subplot(111)
###################################################################
############ Begin main Monte Carlo iteration loop ################
###################################################################
for ii in range(num_trials):
delta_this_iteration = []
triggerdate_this_iteration = []
projectsystem_this_iteration = []
riskid_this_iteration = []
###################################################################
############ Random loop over each risk ################
###################################################################
##
## Each risk has a specified date of possible occurence. A risk can occur at a specified trigger date;
# at some random time; or a risk may occur more than once over a specified range of dates.
## Trigger case
for jj in range(nrisks):
if obligationmodel[jj] == "Trigger date":
choice=np.random.uniform(0.0,1.0,1)
if choice <= meanprobability[jj] :
addit = float(total_cost[jj])
else:
addit = float(0.0)
delta_this_iteration.append(addit)
triggerdate_this_iteration.append(triggerdate[jj])
projectsystem_this_iteration.append(projectsystem[jj])
riskid_this_iteration.append(int(riskid[jj]))
## Random case
elif obligationmodel[jj] == "Random occurrence(s)":
nrandom = randomtrigger[jj]
#print("random risk; nrandom = "+str(nrandom))
#periodcode = randomtrigger[jj] / 1000
#print("random risk periodcode = "+str(periodcode))
periodcode = 3
if randomperiod[jj] == 'Construction only':
periodcode = 1
elif randomperiod[jj] == 'Commissioning only':
periodcode = 2
elif randomperiod[jj] == 'Both Construction and Commissioning':
periodcode = 3
date1 = date_start
date2 = date_commissioning_start
if periodcode == 1: # random during construction only
date1 = date_start
date2 = date_commissioning_start
elif periodcode == 2: # random during commissioning only
date1 = date_commissioning_start
date2 = date_end
elif periodcode == 3: # random throughout project
date1 = date_start
date2 = date_end
for kk in range(nrandom):
stime = time.mktime(time.strptime(date1, '%Y-%m-%d'))
etime = time.mktime(time.strptime(date2, '%Y-%m-%d'))
ptime = stime + np.random.uniform(etime - stime)
randomdate = datetime.date.fromtimestamp(int(ptime))
#print(randomdate)
choice = np.random.uniform(0.0,1.0)
if choice <= meanprobability[jj] :
addit = float(total_cost[jj])/float(nrandom)
else:
addit = float(0.0)
delta_this_iteration.append(addit)
triggerdate_this_iteration.append(randomdate)
projectsystem_this_iteration.append(projectsystem[jj])
riskid_this_iteration.append(int(riskid[jj]))
## Distributed case
elif obligationmodel[jj] == "Distributed occurrence":
if ii == 0: # only on first pass through will triggerdate always have the proper value
#print ii,jj,triggerdate[jj],triggerdate[jj].year
ny = max(triggerdate[jj].year - 2021,1) # risk is distributed over this many years but must be at least 1
nyears[jj] = min(ny,8) # must store the corect values of nyears for each distributed risk
for kk in range(nyears[jj]):
year = 2022 + kk #kk starts at zero. Don't include short period in 2021
choice=np.random.uniform(0.0,1.0,1)
if choice <= meanprobability[jj] :
addit = float(total_cost[jj])/float(nyears[jj])
else:
addit = float(0.0)
delta_this_iteration.append(addit)
triggerdate_this_iteration.append(datetime.date(year,randrange(1,12),1)) # random month in year, always assign the first day of the month
projectsystem_this_iteration.append(projectsystem[jj])
riskid_this_iteration.append(int(riskid[jj]))
else:
sys.exit(" obligationmode not defined for risk "+str(projectsystem[jj]) + str(riskid[jj])+" " +str(jj))
###################################################################
############ End short random loop over risk ################
###################################################################
# Since random and distributed risks have been added the lists are no longer in date order.
# Need to resort the two arrays by effective trigger dates using: list1, list2 = (list(t) for t in zip(*sorted(zip(list1, list2)))) - YIKES
#print(riskid_this_iteration)
triggerdate_this_iteration, delta_this_iteration,projectsystem_this_iteration, riskid_this_iteration = (list(t) for t in zip(*sorted(zip(triggerdate_this_iteration,delta_this_iteration,projectsystem_this_iteration,riskid_this_iteration))))
#print(type(riskid_this_iteration),riskid_this_iteration)
#print(" ")
#print(delta_this_iteration)
# Compute the running sum
xx_this_iteration =
|
np.cumsum(delta_this_iteration)
|
numpy.cumsum
|
"""
Definition of tasks needed for calculating features
"""
import abc
from datetime import date, datetime, time, timedelta
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
from eolearn.core import EOPatch, EOTask, FeatureType, MapFeatureTask
from ..utils.types import Feature
def join_valid_and_cloud_masks(valid_mask: np.ndarray, cloud_mask: np.ndarray) -> np.ndarray:
"""Used to zip together information about valid data and clouds into a combined validity mask"""
return valid_mask.astype(bool) & (cloud_mask == 0)
class ValidDataFractionPredicate:
"""
Predicate that defines if a frame from EOPatch's time-series is valid or not. Frame is valid, if the
valid data fraction is above the specified threshold.
"""
def __init__(self, validity_threshold: float):
self.validity_threshold = validity_threshold
def __call__(self, array: np.ndarray) -> np.ndarray:
coverage = np.sum(array.astype(np.uint8)) / np.prod(array.shape)
return coverage > self.validity_threshold
class MaxNDVI(MapFeatureTask):
def map_method(self, feature: np.ndarray) -> np.ndarray:
if feature.shape[0]:
return np.nanmax(feature, axis=0)
# A special case of arrays with time dimension of size 0
return np.full(feature.shape[1:], np.nan, dtype=feature.dtype)
class MosaickingTask(EOTask, metaclass=abc.ABCMeta):
"""Base class for mosaicking images given an interval of edge dates"""
def __init__(
self,
feature: Feature,
dates: Union[List[date], Tuple[date, date, int]],
valid_mask: Optional[Feature] = None,
ndvi_feature: Optional[Feature] = None,
):
self.feature_type, self.feature_name, self.new_feature_name = self.parse_renamed_feature(
feature, allowed_feature_types={FeatureType.DATA}
)
self.valid_mask_type, self.valid_mask_name = None, None
if valid_mask is not None:
self.valid_mask_type, self.valid_mask_name = self.parse_feature(
valid_mask, allowed_feature_types={FeatureType.MASK}
)
self.ndvi_feature_type, self.ndvi_feature_name = None, None
if ndvi_feature is not None:
self.ndvi_feature_type, self.ndvi_feature_name = self.parse_feature(
ndvi_feature, allowed_feature_types={FeatureType.DATA}
)
self.dates = self._get_dates(dates)
def _get_dates(self, dates: Union[List[date], Tuple[date, date, int]]) -> np.ndarray:
"""Set dates either from list of dates or a tuple (start_date, end_date, n_mosaics)"""
if all(isinstance(d, (date, datetime)) for d in dates):
return np.array(dates)
if len(dates) == 3 and isinstance(dates[-1], int):
return self._get_date_edges(*dates)
raise ValueError(
"dates parameter can be either a list of date(time)s or a tuple "
"(start_date, end_date, n_mosaics) for equidistant intervals between start and end date."
)
@staticmethod
def _get_date_edges(start_date: date, end_date: date, parts: int) -> np.ndarray:
"""Help function to get dates of year split into equal parts
:param start_date: first date of time interval
:param end_date: last date of time interval
:param parts: Number of parts to split the year into
:return: numpy array of dates that split the time interval into equal parts
"""
start = datetime.combine(start_date, time.min)
end = datetime.combine(end_date, time.min) + timedelta(days=1)
diff = (end - start) / parts
edges = [start + diff * i for i in range(parts)]
edges.append(end)
return np.array(edges)
def _find_time_indices(self, timestamps: Sequence[date], index: int) -> Tuple[np.ndarray, ...]:
"""Compute indices of images to use for mosaicking"""
if index == 1:
array = np.where((np.array(timestamps) <= self.dates[index]))
elif index == len(self.dates) - 1:
array = np.where((np.array(timestamps) > self.dates[index - 1]))
else:
array = np.where(
(np.array(timestamps) > self.dates[index - 1]) & (np.array(timestamps) <= self.dates[index])
)
return array
def compute_mosaic_dates(self) -> np.ndarray:
"""Compute dates of corresponding mosaics"""
# calculate centers of date edges
delta = self.dates[1:] - self.dates[:-1]
return self.dates[:-1] + delta / 2
@abc.abstractmethod
def _compute_single_mosaic(self, eopatch: EOPatch, idate: int) -> np.ndarray:
"""Compute single mosaic given index of edge date"""
def compute_mosaic(self, eopatch: EOPatch) -> np.ndarray:
"""Computes mosaic"""
return np.array([self._compute_single_mosaic(eopatch, idate) for idate in range(1, len(self.dates))])
def execute(self, eopatch: EOPatch) -> EOPatch:
"""Compute mosaic for given dates"""
eopatch.timestamp = [ts.replace(tzinfo=None) for ts in eopatch.timestamp]
eopatch[self.feature_type][self.new_feature_name] = self.compute_mosaic(eopatch)
eopatch.timestamp = list(self.compute_mosaic_dates())
return eopatch
class MaxNDVIMosaickingTask(MosaickingTask):
"""
Task to create mosaics of data based on the max NDVI value between provided dates
"""
def __init__(
self,
feature: Feature,
dates: Union[List[date], Tuple[date, date, int]],
ndvi_feature: Feature,
valid_mask: Optional[Feature] = None,
):
super().__init__(feature, dates, ndvi_feature=ndvi_feature, valid_mask=valid_mask)
def _compute_single_mosaic(self, eopatch: EOPatch, idate: int) -> np.ndarray:
"""Compute single mosaic using values of the max NDVI"""
array = self._find_time_indices(eopatch.timestamp, idate)
feat_values = eopatch[self.feature_type][self.feature_name][array].astype(np.float32)
ndvi_values = eopatch[self.ndvi_feature_type][self.ndvi_feature_name][array]
valid_mask = (
eopatch[self.valid_mask_type][self.valid_mask_name][array]
if self.valid_mask_type is not None
else np.ones(feat_values.shape, dtype=bool)
).astype(bool)
ndvi_values[~valid_mask] = np.nan
feat_values[~np.broadcast_to(valid_mask, feat_values.shape)] = np.nan
mask_nan_slices = np.all(np.isnan(ndvi_values), axis=0, keepdims=True)
ndvi_values[np.broadcast_to(mask_nan_slices, ndvi_values.shape)] = -999
feat_values[np.broadcast_to(mask_nan_slices, feat_values.shape)] = -999
timeframes, height, width, depth = feat_values.shape
if timeframes == 0:
mosaic = np.full((height, width, depth), np.nan)
else:
if timeframes == 1:
mosaic = feat_values[0]
else:
indices = np.nanargmax(ndvi_values, axis=0).squeeze(axis=-1)
ixgrid: Tuple[np.ndarray, ...] = np.ix_(np.arange(timeframes), np.arange(height), np.arange(width))
mosaic = feat_values[indices, ixgrid[1], ixgrid[2], :].squeeze(axis=0)
mosaic[np.broadcast_to(mask_nan_slices[0], mosaic.shape)] = np.nan
return mosaic
class MedianMosaickingTask(MosaickingTask):
"""
Task to create mosaics of data based on the median value between provided dates
"""
def __init__(
self,
feature: Feature,
dates: Union[List[date], Tuple[date, date, int]],
valid_mask: Optional[Feature] = None,
):
super().__init__(feature, dates, valid_mask=valid_mask)
def _compute_single_mosaic(self, eopatch: EOPatch, idate: int) -> np.ndarray:
"""Compute single mosaic using the median of values"""
array = self._find_time_indices(eopatch.timestamp, idate)
feat_values = eopatch[self.feature_type][self.feature_name][array].astype(np.float32)
valid_mask = (
eopatch[self.valid_mask_type][self.valid_mask_name][array]
if self.valid_mask_type is not None
else np.ones(feat_values.shape, dtype=bool)
)
feat_values[~np.broadcast_to(valid_mask, feat_values.shape)] = np.nan
mask_nan_slices = np.all(np.isnan(feat_values), axis=0, keepdims=True)
feat_values[np.broadcast_to(mask_nan_slices, feat_values.shape)] = -999
timeframes, height, width, depth = feat_values.shape
if timeframes == 0:
mosaic =
|
np.full((height, width, depth), np.nan)
|
numpy.full
|
#!/usr/bin/python
# Wflow is Free software, see below:
#
# Copyright (c) <NAME>/Deltares 2005-2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Run the wflow_sbm hydrological model..
usage
::
wflow_sbm [-h][-v level][-F runinfofile][-L logfile][-C casename][-R runId]
[-c configfile][-T last_step][-S first_step][-s seconds][-W][-E][-N][-U discharge]
[-P parameter multiplication][-X][-f][-I][-i tbl_dir][-x subcatchId][-u updatecols]
[-p inputparameter multiplication][-l loglevel]
-X: save state at the end of the run over the initial conditions at the start
-f: Force overwrite of existing results
-T: Set end time of the run: yyyy-mm-dd hh:mm:ss
-S: Set start time of the run: yyyy-mm-dd hh:mm:ss
-s: Set the model timesteps in seconds
-I: re-initialize the initial model conditions with default
-i: Set input table directory (default is intbl)
-x: Apply multipliers (-P/-p ) for subcatchment only (e.g. -x 1)
-C: set the name of the case (directory) to run
-R: set the name runId within the current case
-L: set the logfile
-E: Switch on reinfiltration of overland flow
-c: name of wflow the configuration file (default: Casename/wflow_sbm.ini).
-h: print usage information
-W: If set, this flag indicates that an ldd is created for the water level
for each timestep. If not the water is assumed to flow according to the
DEM. Wflow will run a lot slower with this option. Most of the time
(shallow soil, steep topography) you do not need this option. Also, if you
need it you migth actually need another model.
-U: The argument to this option should be a .tss file with measured discharge in
[m^3/s] which the progam will use to update the internal state to match
the measured flow. The number of columns in this file should match the
number of gauges in the wflow_gauges.map file.
-u: list of gauges/columns to use in update. Format:
-u [1 , 4 ,13]
The above example uses column 1, 4 and 13
-P: set parameter change string (e.g: -P "self.FC = self.FC * 1.6") for non-dynamic variables
-p: set parameter change string (e.g: -P "self.Precipitation = self.Precipitation * 1.11") for
dynamic variables
-l: loglevel (most be one of DEBUG, WARNING, ERROR)
"""
import os.path
import numpy as np
import pcraster.framework
from wflow.wf_DynamicFramework import *
from wflow.wflow_adapt import *
from wflow.wflow_funcs import *
import pcraster as pcr
import pdb
import math
from numba import jit
wflow = "wflow_sbm: "
updateCols = []
def usage(*args):
sys.stdout = sys.stderr
"""Way"""
for msg in args:
print(msg)
print(__doc__)
sys.exit(0)
def estimate_iterations_kin_wave(Q, Beta, alpha, timestepsecs, dx, mv):
celerity = pcr.ifthen(Q > 0.0, 1.0 / (alpha * Beta * Q**(Beta-1)))
courant = (timestepsecs / dx) * celerity
np_courant = pcr.pcr2numpy(courant, mv)
np_courant[np_courant==mv] = np.nan
try:
it_kin = int(np.ceil(1.25*(np.nanpercentile(np_courant,95))))
except:
it_kin = 1
return it_kin
@jit(nopython=True)
def _sCurve(X, a=0.0, b=1.0, c=1.0):
"""
sCurve function:
Input:
- X input map
- C determines the steepness or "stepwiseness" of the curve.
The higher C the sharper the function. A negative C reverses the function.
- b determines the amplitude of the curve
- a determines the centre level (default = 0)
Output:
- result
"""
s = 1.0 / (b + np.exp(-c * (X - a)))
return s
@jit(nopython=True)
def actEvap_unsat_SBM(
RootingDepth,
UStoreDepth,
UStoreLayerThickness,
sumLayer,
RestPotEvap,
sumActEvapUStore,
c,
L,
thetaS,
thetaR,
ust=0,
):
"""
Actual evaporation function:
- first try to get demand from the saturated zone, using the rootingdepth as a limiting factor
- secondly try to get the remaining water from the unsaturated store
- it uses an S-Curve the make sure roots het wet/dry gradually (basically)
representing a root-depth distribution
if ust is True, all ustore is deems to be avaiable fro the roots a
Input:
- RootingDepth, UStoreDepth, FirstZoneDepth, PotTrans, smoothpar
Output:
- ActEvap, FirstZoneDepth, UStoreDepth ActEvapUStore
"""
# AvailCap is fraction of unsat zone containing roots
if ust >= 1:
AvailCap = UStoreDepth * 0.99
else:
if L > 0:
AvailCap = min(1.0, max(0.0, (RootingDepth - sumLayer) / L))
else:
AvailCap = 0.0
MaxExtr = AvailCap * UStoreDepth
# Calculate the reduction of RestPotEvap due to differences in rooting density in the soil column
# The used model is based on Vrugt et al. (2001) and uses as input parameters for z* and Pz the
# values of Hoffman and van Genuchten (z* = 0.20 and Pz = 1.00)
# Next step is to make use of the Feddes curve in order to decrease ActEvapUstore when soil moisture values
# occur above or below ideal plant growing conditions (see also Feddes et al., 1978). h1-h4 values are
# actually negative, but all values are made positive for simplicity.
hb = 1 # cm (pF 1 for atmospheric pressure)
h1 = 1 # cm
h2 = 100 # cm (pF 2 for field capacity)
h3 = 400 # cm (pF 3, critical pF value)
h4 = 15849 # cm (pF 4.2, wilting point)
# According to Brooks-Corey
par_lambda = 2 / (c - 3)
if L > 0.0:
vwc = UStoreDepth / L
else:
vwc = 0.0
vwc = max(vwc, 0.0000001)
head = hb / (
((vwc) / (thetaS - thetaR)) ** (1 / par_lambda)
) # Note that in the original formula, thetaR is extracted from vwc, but thetaR is not part of the numerical vwc calculation
head = max(head,hb)
# Transform h to a reduction coefficient value according to Feddes et al. (1978).
if(head <= h1):
alpha = 0
elif(head >= h4):
alpha = 0
elif((head < h2) & (head > h1)):
alpha = (head - h1) / (h2 - h1)
elif((head > h3) & (head < h4)):
alpha = 1 - (head - h3) / (h4 - h3)
else:
alpha = 1
ActEvapUStore = (min(MaxExtr, RestPotEvap, UStoreDepth)) * alpha
UStoreDepth = UStoreDepth - ActEvapUStore
RestPotEvap = RestPotEvap - ActEvapUStore
sumActEvapUStore = ActEvapUStore + sumActEvapUStore
return UStoreDepth, sumActEvapUStore, RestPotEvap
@jit(nopython=True)
def infiltration(AvailableForInfiltration, PathFrac, cf_soil, TSoil,InfiltCapSoil,InfiltCapPath, UStoreCapacity, modelSnow, soilInfReduction):
SoilInf = AvailableForInfiltration * (1 - PathFrac)
PathInf = AvailableForInfiltration * PathFrac
if modelSnow & soilInfReduction:
bb = 1.0 / (1.0 - cf_soil)
soilInfRedu = _sCurve(TSoil, a=0.0, b=bb, c=8.0)
else:
soilInfRedu = 1.0
MaxInfiltSoil = min(InfiltCapSoil * soilInfRedu, SoilInf)
MaxInfiltPath = min(InfiltCapPath * soilInfRedu, PathInf)
InfiltSoilPath = min(MaxInfiltPath + MaxInfiltSoil, max(0.0, UStoreCapacity))
return InfiltSoilPath
@jit(nopython=True)
def unsatzone_flow(UStoreLayerDepth, InfiltSoilPath, L, z, KsatVerFrac, c, KsatVer, f, thetaS, thetaR, SoilWaterCapacity, SWDold, shape_layer, TransferMethod):
m = 0
UStoreLayerDepth[m] = UStoreLayerDepth[m] + InfiltSoilPath
if L[m] > 0.0:
#sbm option for vertical transfer (only for 1 layer)
if (TransferMethod == 1 and shape_layer == 1):
Sd = SoilWaterCapacity - SWDold
if Sd <= 0.00001:
st = 0.0
else:
st = KsatVerFrac[m] * KsatVer * (min(UStoreLayerDepth[m],L[m]*(thetaS-thetaR))/Sd)
else:
st = KsatVerFrac[m] * KsatVer * np.exp(-f * z[m]) * min((UStoreLayerDepth[m]/(L[m] * (thetaS-thetaR)))**c[m],1.0)
ast = min(st,UStoreLayerDepth[m])
UStoreLayerDepth[m] = UStoreLayerDepth[m] - ast
else:
ast = 0.0
for m in range(1,len(L)):
UStoreLayerDepth[m] = UStoreLayerDepth[m] + ast
if L[m] > 0.0:
st = KsatVerFrac[m] * KsatVer * np.exp(-f* z[m]) * min((UStoreLayerDepth[m]/(L[m] * (thetaS-thetaR)))**c[m],1.0)
ast = min(st,UStoreLayerDepth[m])
else:
ast = 0.0
UStoreLayerDepth[m] = UStoreLayerDepth[m] - ast
return ast, UStoreLayerDepth
@jit(nopython=True)
def sbm_cell(nodes, nodes_up, ldd, layer, static, dyn, modelSnow, soilInfReduction, timestepsecs, basetimestep, deltaT, nrpaddyirri, shape, TransferMethod, it_kinL=1, ust=0):
shape_layer = layer['UStoreLayerThickness'].shape
# flat new state
ssf_new = np.zeros(dyn['ssf'].size, dtype=dyn['ssf'].dtype)
qo_new = np.zeros(dyn['LandRunoff'].size, dtype=dyn['LandRunoff'].dtype)
qo_new = np.concatenate((qo_new, np.array([0], dtype=dyn['LandRunoff'].dtype)))
# append zero to end to deal with nodata (-1) in indices
ssf_new = np.concatenate((ssf_new, np.array([0], dtype=dyn['ssf'].dtype)))
ldd_ = np.concatenate((ldd, np.array([0], dtype=ldd.dtype)))
slope_ = np.concatenate((static['slope'], np.array([0], dtype=static['slope'].dtype)))
SWDold = np.zeros(dyn['ssf'].size, dtype=dyn['ssf'].dtype)
sumUSold = np.zeros(dyn['ssf'].size, dtype=dyn['ssf'].dtype)
for i in range(len(nodes)):
for j in range(len(nodes[i])):
idx = nodes[i][j]
nbs = nodes_up[i][j]
sumlayer = np.unique(layer['UStoreLayerThickness'][:,idx].cumsum())
sumlayer_0 = np.concatenate((np.array([0.0]), sumlayer))
SWDold[idx] = dyn['SatWaterDepth'][idx]
sumUSold[idx] = layer['UStoreLayerDepth'][:,idx].sum()
n = np.where(dyn['zi'][idx] > sumlayer_0)[0]
if len(n) > 1:
L = np.concatenate((layer['UStoreLayerThickness'][n[0:-1],idx], np.array([dyn['zi'][idx] - sumlayer_0[n[-1]]]))).astype(np.float64)
else:
L = np.array([dyn['zi'][idx]]).astype(np.float64)
z = L.cumsum()
dyn['ActEvapUStore'][idx] = 0.0
if static['River'][idx]:
ind = np.where(ldd_[nbs] != ldd_[idx])
chanperc = np.zeros(ldd_[nbs].size)
chanperc[ind] = slope_[nbs][ind]/(slope_[idx]+slope_[nbs][ind])
ssf_in = np.sum((1-chanperc)*ssf_new[nbs])
dyn['ssf_toriver'][idx] = np.sum((chanperc)*ssf_new[nbs])/(1000*1000*1000)/timestepsecs
else:
ssf_in = np.sum(ssf_new[nbs])
dyn['CellInFlow'][idx] = ssf_in
UStoreCapacity = static['SoilWaterCapacity'][idx] - dyn['SatWaterDepth'][idx] - layer['UStoreLayerDepth'][n,idx].sum()
InfiltSoilPath = infiltration(dyn['AvailableForInfiltration'][idx], static['PathFrac'][idx], static['cf_soil'][idx],
dyn['TSoil'][idx],static['InfiltCapSoil'][idx],static['InfiltCapPath'][idx],UStoreCapacity, modelSnow, soilInfReduction)
dyn['InfiltSoilPath'][idx] = InfiltSoilPath
# unsat fluxes first
ast, layer['UStoreLayerDepth'][:,idx] = unsatzone_flow(layer['UStoreLayerDepth'][:,idx], InfiltSoilPath, L, z, layer['KsatVerFrac'][:,idx], layer['c'][:,idx], static['KsatVer'][idx], static['f'][idx],
static['thetaS'][idx], static['thetaR'][idx], static['SoilWaterCapacity'][idx], SWDold[idx], shape_layer[0], TransferMethod)
dyn['Transfer'][idx] = ast
# then evaporation from layers
for k in range(len(L)):
if k==0:
SaturationDeficit = static['SoilWaterCapacity'][idx] - dyn['SatWaterDepth'][idx]
if shape_layer[0] == 1:
soilevapunsat = dyn['restEvap'][idx] * min(1.0, SaturationDeficit / static['SoilWaterCapacity'][idx])
else:
if len(L) == 1:
if dyn['zi'][idx] > 0:
soilevapunsat = dyn['restEvap'][idx] * min(1.0, layer['UStoreLayerDepth'][k,idx]/dyn['zi'][idx])
else:
soilevapunsat = 0.0
else:
soilevapunsat = dyn['restEvap'][idx] * min(1.0, layer['UStoreLayerDepth'][k,idx]/(layer['UStoreLayerThickness'][k,idx]*(static['thetaS'][idx]-static['thetaR'][idx])))
soilevapunsat = min(soilevapunsat, layer['UStoreLayerDepth'][k,idx])
dyn['restEvap'][idx] = dyn['restEvap'][idx] - soilevapunsat
layer['UStoreLayerDepth'][k,idx] = layer['UStoreLayerDepth'][k,idx] - soilevapunsat
if shape_layer[0] == 1:
soilevapsat = 0.0
else:
if len(L) == 1:
soilevapsat = dyn['restEvap'][idx] * min(1.0, (layer['UStoreLayerThickness'][k,idx] - dyn['zi'][idx])/ layer['UStoreLayerThickness'][k,idx])
soilevapsat = min(soilevapsat, (layer['UStoreLayerThickness'][k,idx] - dyn['zi'][idx]) * (static['thetaS'][idx] - static['thetaR'][idx]))
else:
soilevapsat = 0.0
dyn['soilevap'][idx] = soilevapunsat + soilevapsat
dyn['SatWaterDepth'][idx] = dyn['SatWaterDepth'][idx] - soilevapsat
# evaporation available for transpiration
PotTrans = dyn['PotTransSoil'][idx] - dyn['soilevap'][idx] - dyn['ActEvapOpenWaterLand'][idx]
# evaporation from saturated store
wetroots = _sCurve(dyn['zi'][idx], a=static['ActRootingDepth'][idx], c=static['rootdistpar'][idx])
dyn['ActEvapSat'][idx] = min(PotTrans * wetroots, dyn['SatWaterDepth'][idx])
dyn['SatWaterDepth'][idx] = dyn['SatWaterDepth'][idx] - dyn['ActEvapSat'][idx]
RestPotEvap = PotTrans - dyn['ActEvapSat'][idx]
# actual evaporation from UStore
layer['UStoreLayerDepth'][k,idx], dyn['ActEvapUStore'][idx], RestPotEvap = actEvap_unsat_SBM(static['ActRootingDepth'][idx], layer['UStoreLayerDepth'][k,idx], layer['UStoreLayerThickness'][k,idx],
sumlayer[k], RestPotEvap, dyn['ActEvapUStore'][idx], layer['c'][k,idx], L[k], static['thetaS'][idx], static['thetaR'][idx], ust)
else:
# actual evaporation from UStore
layer['UStoreLayerDepth'][k,idx], dyn['ActEvapUStore'][idx], RestPotEvap = actEvap_unsat_SBM(static['ActRootingDepth'][idx], layer['UStoreLayerDepth'][k,idx], layer['UStoreLayerThickness'][k,idx],
sumlayer[k], RestPotEvap, dyn['ActEvapUStore'][idx], layer['c'][k,idx], L[k], static['thetaS'][idx], static['thetaR'][idx], ust)
#check soil moisture balance per layer
du = 0.0
for k in range(L.size-1,-1,-1):
du = max(0,layer['UStoreLayerDepth'][k,idx] - L[k]*(static['thetaS'][idx]-static['thetaR'][idx]))
layer['UStoreLayerDepth'][k,idx] = layer['UStoreLayerDepth'][k,idx] - du
if k > 0:
layer['UStoreLayerDepth'][k-1,idx] = layer['UStoreLayerDepth'][k-1,idx] + du
Ksat = layer['KsatVerFrac'][len(L)-1,idx] * static['KsatVer'][idx] * np.exp(-static['f'][idx] * dyn['zi'][idx])
UStoreCapacity = static['SoilWaterCapacity'][idx] - dyn['SatWaterDepth'][idx] - layer['UStoreLayerDepth'][n,idx].sum()
MaxCapFlux = max(0.0, min(Ksat, dyn['ActEvapUStore'][idx], UStoreCapacity, dyn['SatWaterDepth'][idx]))
if dyn['zi'][idx] > static['ActRootingDepth'][idx]:
CapFluxScale = static['CapScale'][idx] / (static['CapScale'][idx] + dyn['zi'][idx] - static['ActRootingDepth'][idx]) * timestepsecs / basetimestep
else:
CapFluxScale = 0.0
CapFlux = MaxCapFlux * CapFluxScale
netCapflux = CapFlux
actCapFlux = 0.0
for k in range(L.size-1,-1,-1):
toadd = min(netCapflux, max(L[k]*(static['thetaS'][idx]-static['thetaR'][idx]) - layer['UStoreLayerDepth'][k,idx], 0.0))
layer['UStoreLayerDepth'][k,idx] = layer['UStoreLayerDepth'][k,idx] + toadd
netCapflux = netCapflux - toadd
actCapFlux = actCapFlux + toadd
dyn['CapFlux'][idx] = actCapFlux
DeepKsat = static['KsatVer'][idx] * np.exp(-static['f'][idx] * static['SoilThickness'][idx])
DeepTransfer = min(dyn['SatWaterDepth'][idx], DeepKsat)
dyn['ActLeakage'][idx] = max(0.0, min(static['MaxLeakage'][idx], DeepTransfer))
r = (ast - actCapFlux - dyn['ActLeakage'][idx] - dyn['ActEvapSat'][idx] - soilevapsat) * static['DW'][idx]*1000
ssf_new[idx], dyn['zi'][idx], ExfiltSatWater = kinematic_wave_ssf(ssf_in, dyn['ssf'][idx], dyn['zi'][idx], r, static['KsatHorFrac'][idx],
static['KsatVer'][idx], static['slope'][idx], static['neff'][idx], static['f'][idx],
static['SoilThickness'][idx], deltaT, static['DL'][idx]*1000, static['DW'][idx]*1000, static['ssfmax'][idx])
dyn['zi'][idx] = min(dyn['zi'][idx], static['SoilThickness'][idx])
dyn['SatWaterDepth'][idx] = (static['SoilThickness'][idx] - dyn['zi'][idx]) * (static['thetaS'][idx] - static['thetaR'][idx])
n_new = np.where(dyn['zi'][idx] > sumlayer_0)[0]
if len(n_new) > 1:
L_new = np.concatenate((layer['UStoreLayerThickness'][n_new[0:-1],idx], np.array([dyn['zi'][idx] - sumlayer_0[n_new[-1]]]))).astype(np.float64)
else:
L_new = np.array([dyn['zi'][idx]]).astype(np.float64)
ExfiltFromUstore = 0.0
for k in range(L.size-1,-1,-1):
if (np.where(n_new == k))[0].size > 0:
ExfiltFromUstore = max(0,layer['UStoreLayerDepth'][k,idx] - L_new[k]*(static['thetaS'][idx]-static['thetaR'][idx]))
else:
ExfiltFromUstore = layer['UStoreLayerDepth'][k,idx]
layer['UStoreLayerDepth'][k,idx] = layer['UStoreLayerDepth'][k,idx] - ExfiltFromUstore
if k > 0:
layer['UStoreLayerDepth'][k-1,idx] = layer['UStoreLayerDepth'][k-1,idx] + ExfiltFromUstore
dyn['ExfiltWater'][idx] = ExfiltSatWater + ExfiltFromUstore
dyn['ExcessWater'][idx] = dyn['AvailableForInfiltration'][idx] - InfiltSoilPath + du
dyn['ActInfilt'][idx] = InfiltSoilPath - du
ponding_add = 0
if nrpaddyirri > 0:
if static['h_p'][idx] > 0:
ponding_add = min(dyn['ExfiltWater'][idx] + dyn['ExcessWater'][idx], static['h_p'][idx] - dyn['PondingDepth'][idx])
dyn['PondingDepth'][idx] = dyn['PondingDepth'][idx] + ponding_add
dyn['InwaterO'][idx] = max(dyn['ExfiltWater'][idx] + dyn['ExcessWater'][idx] + dyn['RunoffLandCells'][idx] - dyn['ActEvapOpenWaterLand'][idx] - ponding_add, 0.0) * (static['xl'][idx] * static['yl'][idx]) * 0.001 / timestepsecs
dyn['sumUStoreLayerDepth'][idx] = layer['UStoreLayerDepth'][:,idx].sum()
# volumetric water contents per soil layer and root zone
for k in range(layer['UStoreLayerThickness'][:,idx].size):
if (np.where(n_new == k))[0].size > 0:
if layer['UStoreLayerThickness'][k,idx] > 0:
layer['vwc'][k,idx] = (layer['UStoreLayerDepth'][k,idx] + (layer['UStoreLayerThickness'][k,idx] - L_new[k]) * (static['thetaS'][idx] - static['thetaR'][idx])) / layer['UStoreLayerThickness'][k,idx] + static['thetaR'][idx]
else:
layer['vwc'][k,idx] = static['thetaS'][idx]
layer['vwc_perc'][k,idx] = (layer['vwc'][k,idx]/static['thetaS'][idx]) * 100.0
rootStore_unsat = 0
for k in range(L_new.size):
if L_new[k] > 0:
rootStore_unsat = rootStore_unsat + (max(0.0, static['ActRootingDepth'][idx] - sumlayer_0[k])/L_new[k]) * layer['UStoreLayerDepth'][k,idx]
dyn['RootStore_unsat'][idx] = rootStore_unsat
acc_flow = np.zeros(dyn['LandRunoff'].size, dtype=dyn['LandRunoff'].dtype)
acc_flow = np.concatenate((acc_flow, np.array([0], dtype=dyn['LandRunoff'].dtype)))
qo_toriver_acc = np.copy(acc_flow)
q = dyn['InwaterO'] / static['DL']
for v in range(0,it_kinL):
qo_new = np.zeros(dyn['LandRunoff'].size, dtype=dyn['LandRunoff'].dtype)
qo_new = np.concatenate((qo_new, np.array([0], dtype=dyn['LandRunoff'].dtype)))
for i in range(len(nodes)):
for j in range(len(nodes[i])):
idx = nodes[i][j]
nbs = nodes_up[i][j]
if static['River'][idx]:
ind = np.where(ldd_[nbs] != ldd_[idx])
chanperc = np.zeros(ldd_[nbs].size)
chanperc[ind] = slope_[nbs][ind]/(slope_[idx]+slope_[nbs][ind])
if static['SW'][idx] > 0.0:
qo_in = np.sum((1-chanperc)*qo_new[nbs])
qo_toriver_vol = np.sum(chanperc*qo_new[nbs]) * (timestepsecs/it_kinL)
else:
qo_in = 0.0
qo_toriver_vol = np.sum(qo_new[nbs]) * (timestepsecs/it_kinL)
else:
qo_in = np.sum(qo_new[nbs])
qo_toriver_vol = 0.0
qo_new[idx] = kinematic_wave(qo_in, dyn['LandRunoff'][idx], q[idx], dyn['AlphaL'][idx], static['Beta'][idx], timestepsecs/it_kinL, static['DL'][idx])
acc_flow[idx] = acc_flow[idx] + qo_new[idx] * (timestepsecs/it_kinL)
dyn['Qo_in'][idx] = dyn['Qo_in'][idx] + qo_in * (timestepsecs/it_kinL)
qo_toriver_acc[idx] = qo_toriver_acc[idx] + qo_toriver_vol
if static['SW'][idx] > 0:
WaterLevelL = (dyn['AlphaL'][idx] * np.power(qo_new[idx], static['Beta'][idx])) / static['SW'][idx]
Pl = static['SW'][idx] + (2.0 * WaterLevelL)
dyn['AlphaL'][idx] = static['AlpTermR'][idx] * np.power(Pl, static['AlpPow'][idx])
dyn['LandRunoff'][idx]= qo_new[idx]
qo_new = acc_flow/timestepsecs
dyn['qo_toriver'][:] = qo_toriver_acc[:-1]/timestepsecs
dyn['Qo_in'][:] = dyn['Qo_in'][:] / timestepsecs
dyn['SoilWatbal'][:] = (dyn['ActInfilt'][:] - ((dyn['SatWaterDepth'][:] + dyn['sumUStoreLayerDepth'][:]) - (sumUSold[:] + SWDold[:])) +
(dyn['CellInFlow'][:]-ssf_new[:-1])/(static['DW'][:]*static['DL'][:]*1000*1000) - dyn['ExfiltWater'][:] - dyn['soilevap'][:] - dyn['ActEvapUStore'][:] -
dyn['ActEvapSat'][:] - dyn['ActLeakage'][:])
return ssf_new[:-1], qo_new[:-1], dyn, layer
def SnowPackHBV(Snow, SnowWater, Precipitation, Temperature, TTI, TT, TTM, Cfmax, WHC):
"""
HBV Type snowpack modelling using a Temperature degree factor. All correction
factors (RFCF and SFCF) are set to 1. The refreezing efficiency factor is set to 0.05.
:param Snow:
:param SnowWater:
:param Precipitation:
:param Temperature:
:param TTI:
:param TT:
:param TTM:
:param Cfmax:
:param WHC:
:return: Snow,SnowMelt,Precipitation
"""
RFCF = 1.0 # correction factor for rainfall
CFR = 0.05000 # refreeing efficiency constant in refreezing of freewater in snow
SFCF = 1.0 # correction factor for snowfall
RainFrac = pcr.ifthenelse(
1.0 * TTI == 0.0,
pcr.ifthenelse(Temperature <= TT, pcr.scalar(0.0), pcr.scalar(1.0)),
pcr.min((Temperature - (TT - TTI / 2)) / TTI, pcr.scalar(1.0)),
)
RainFrac = pcr.max(
RainFrac, pcr.scalar(0.0)
) # fraction of precipitation which falls as rain
SnowFrac = 1 - RainFrac # fraction of precipitation which falls as snow
Precipitation = (
SFCF * SnowFrac * Precipitation + RFCF * RainFrac * Precipitation
) # different correction for rainfall and snowfall
SnowFall = SnowFrac * Precipitation # snowfall depth
RainFall = RainFrac * Precipitation # rainfall depth
PotSnowMelt = pcr.ifthenelse(
Temperature > TTM, Cfmax * (Temperature - TTM), pcr.scalar(0.0)
) # Potential snow melt, based on temperature
PotRefreezing = pcr.ifthenelse(
Temperature < TTM, Cfmax * CFR * (TTM - Temperature), 0.0
) # Potential refreezing, based on temperature
Refreezing = pcr.ifthenelse(
Temperature < TTM, pcr.min(PotRefreezing, SnowWater), 0.0
) # actual refreezing
# No landuse correction here
SnowMelt = pcr.min(PotSnowMelt, Snow) # actual snow melt
Snow = Snow + SnowFall + Refreezing - SnowMelt # dry snow content
SnowWater = SnowWater - Refreezing # free water content in snow
MaxSnowWater = Snow * WHC # Max water in the snow
SnowWater = (
SnowWater + SnowMelt + RainFall
) # Add all water and potentially supersaturate the snowpack
RainFall = pcr.max(SnowWater - MaxSnowWater, 0.0) # rain + surpluss snowwater
SnowWater = SnowWater - RainFall
return Snow, SnowWater, SnowMelt, RainFall, SnowFall
class WflowModel(pcraster.framework.DynamicModel):
"""
.. versionchanged:: 0.91
- Calculation of GWScale moved to resume() to allow fitting.
.. versionadded:: 0.91
- added S-curve for freezing soil infiltration reduction calculations
.. todo::
- add slope based quick-runoff -> less percolation on hillslopes...
"""
def __init__(self, cloneMap, Dir, RunDir, configfile):
pcraster.framework.DynamicModel.__init__(self)
self.UStoreLayerDepth = []
self.caseName = os.path.abspath(Dir)
self.clonemappath = os.path.join(os.path.abspath(Dir), "staticmaps", cloneMap)
pcr.setclone(self.clonemappath)
self.runId = RunDir
self.Dir = os.path.abspath(Dir)
self.configfile = configfile
self.SaveDir = os.path.join(self.Dir, self.runId)
def irrigationdemand(self, pottrans, acttrans, irareas):
"""
Determine irrigation water demand from the difference bewteen potential
transpiration and actual transpiration.
:param pottrans: potential transpiration (epot minus interception and soil/open water evaporation)
:param acttrans: actual transpiration
:param ir_areas: maps of irrigation areas
:return: demand
"""
Et_diff = pcr.areaaverage(pottrans - acttrans, pcr.nominal(irareas))
# Now determine demand in m^3/s for each area
sqmarea = pcr.areatotal(self.reallength * self.reallength, pcr.nominal(irareas))
m3sec = Et_diff * sqmarea / 1000.0 / self.timestepsecs
return Et_diff, m3sec
def updateRunOff(self):
"""
Updates the kinematic wave reservoir. Should be run after updates to Q
"""
self.WaterLevelR = (self.AlphaR * pow(self.RiverRunoff, self.Beta)) / self.Bw
# wetted perimeter (m)
Pr = self.Bw + (2 * self.WaterLevelR)
# Alpha
self.AlphaR = self.AlpTermR * pow(Pr, self.AlpPow)
self.OldKinWaveVolumeR = self.KinWaveVolumeR
self.KinWaveVolumeR = self.WaterLevelR * self.Bw * self.DCL
self.dyn['AlphaR'] = pcr.pcr2numpy(self.AlphaR,self.mv).ravel()
self.WaterLevelL = pcr.ifthenelse( self.SW > 0, (self.AlphaL * pow(self.LandRunoff, self.Beta)) / self.SW, 0.0)
Pl = self.SW + (2 * self.WaterLevelL)
# Alpha
self.AlphaL = self.AlpTermL * pow(Pl, self.AlpPow)
self.OldKinWaveVolumeL = self.KinWaveVolumeL
self.KinWaveVolumeL = self.WaterLevelL * self.SW * self.DL
self.dyn['AlphaL'] = pcr.pcr2numpy(self.AlphaL,self.mv).ravel()
def stateVariables(self):
"""
returns a list of state variables that are essential to the model.
This list is essential for the resume and suspend functions to work.
This function is specific for each model and **must** be present.
:var self.RiverRunoff: Surface runoff in the kin-wave resrvoir [m^3/s]
:var self.LandRunoff: Surface runoff in the kin-wave resrvoir [m^3/s]
:var self.SurfaceRunoffDyn: Surface runoff in the dyn-wave resrvoir [m^3/s]
:var self.WaterLevelR: Water level in the river kin-wave reservoir [m]
:var self.WaterLevelL: Water level in the land kin-wave reservoir [m]
:var self.WaterLevelDyn: Water level in the dyn-wave resrvoir [m^]
:var self.Snow: Snow pack [mm]
:var self.SnowWater: Snow pack water [mm]
:var self.TSoil: Top soil temperature [oC]
:var self.UStoreDepth: Water in the Unsaturated Store [mm]
:var self.SatWaterDepth: Water in the saturated store [mm]
:var self.CanopyStorage: Amount of water on the Canopy [mm]
:var self.ReservoirVolume: Volume of each reservoir [m^3]
:var self.GlacierStore: Thickness of the Glacier in a gridcell [mm]
"""
states = [
"RiverRunoff",
"WaterLevelR",
"LandRunoff",
"WaterLevelL",
"SatWaterDepth",
"Snow",
"TSoil",
"UStoreLayerDepth",
"SnowWater",
"CanopyStorage",
"SubsurfaceFlow",
]
if hasattr(self, "GlacierFrac"):
states.append("GlacierStore")
if hasattr(self, "ReserVoirSimpleLocs"):
states.append("ReservoirVolume")
if hasattr(self, "ReserVoirComplexLocs"):
states.append("ReservoirWaterLevel")
if hasattr(self, "nrpaddyirri"):
if self.nrpaddyirri > 0:
states.append("PondingDepth")
return states
def supplyCurrentTime(self):
"""
gets the current time in seconds after the start of the run
"""
return self.currentTimeStep() * self.timestepsecs
def suspend(self):
self.logger.info("Saving initial conditions...")
self.wf_suspend(os.path.join(self.SaveDir, "outstate"))
if self.OverWriteInit:
self.logger.info("Saving initial conditions over start conditions...")
self.wf_suspend(self.SaveDir + "/instate/")
def parameters(self):
"""
Define all model parameters here that the framework should handle for the model
See wf_updateparameters and the parameters section of the ini file
If you use this make sure to all wf_updateparameters at the start of the dynamic section
and at the start/end of the initial section
"""
modelparameters = []
# Static model parameters e.g.
# modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
# 3: Input time series ###################################################
self.P_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "Precipitation", "/inmaps/P"
) # timeseries for rainfall
self.PET_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "EvapoTranspiration", "/inmaps/PET"
) # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
self.TEMP_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "Temperature", "/inmaps/TEMP"
) # timeseries for rainfall "/inmaps/TEMP" # global radiation
self.Inflow_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "Inflow", "/inmaps/IF"
) # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
# Meteo and other forcing
modelparameters.append(
self.ParamType(
name="Precipitation",
stack=self.P_mapstack,
type="timeseries",
default=0.0,
verbose=True,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="PotenEvap",
stack=self.PET_mapstack,
type="timeseries",
default=0.0,
verbose=True,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="Temperature",
stack=self.TEMP_mapstack,
type="timeseries",
default=10.0,
verbose=True,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="Inflow",
stack=self.Inflow_mapstack,
type="timeseries",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="IrrigationAreas",
stack="staticmaps/wflow_irrigationareas.map",
type="staticmap",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="IrrigationSurfaceIntakes",
stack="staticmaps/wflow_irrisurfaceintake.map",
type="staticmap",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="IrrigationPaddyAreas",
stack="staticmaps/wflow_irrigationpaddyareas.map",
type="staticmap",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="IrrigationSurfaceReturn",
stack="staticmaps/wflow_irrisurfacereturns.map",
type="staticmap",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="h_max",
stack="staticmaps/wflow_hmax.map",
type="staticmap",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="h_min",
stack="staticmaps/wflow_hmin.map",
type="staticmap",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="h_p",
stack="staticmaps/wflow_hp.map",
type="staticmap",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
return modelparameters
def initial(self):
"""
Initial part of the model, executed only once. Reads all static data from disk
*Soil*
:var M.tbl: M parameter in the SBM model. Governs the decay of Ksat with depth [-]
:var thetaR.tbl: Residual water content [mm/mm]
:var thetaS.tbl: Saturated water content (porosity) [mm/mm]
:var KsatVer.tbl: Saturated conductivity [mm/d]
:var PathFrac.tbl: Fraction of compacted area per grid cell [-]
:var InfiltCapSoil.tbl: Soil infiltration capacity [m/d]
:var InfiltCapPath.tbl: Infiltration capacity of the compacted areas [mm/d]
:var SoilMinThickness.tbl: Minimum wdepth of the soil [mm]
:var SoilThickness.tbl: Maximum depth of the soil [m]
:var RootingDepth.tbl: Depth of the roots [mm]
:var MaxLeakage.tbl: Maximum leakage out of the soil profile [mm/d]
:var CapScale.tbl: Scaling factor in the Capilary rise calculations (100) [mm/d]
:var RunoffGeneratingGWPerc: Fraction of the soil depth that contributes to subcell runoff (0.1) [-]
:var rootdistpar.tbl: Determine how roots are linked to water table. The number
should be negative. A more negative number means that all roots are wet if the water
table is above the lowest part of the roots.
A less negative number smooths this. [mm] (default = -80000)
*Canopy*
:var CanopyGapFraction.tbl: Fraction of precipitation that does not hit the canopy directly [-]
:var MaxCanopyStorage.tbl: Canopy interception storage capacity [mm]
:var EoverR.tbl: Ratio of average wet canopy evaporation rate over rainfall rate [-]
*Surface water*
:var N.tbl: Manning's N parameter
:var N_river.tbl: Manning's N parameter for cells marked as river
*Snow and frozen soil modelling parameters*
:var cf_soil.tbl: Soil infiltration reduction factor when soil is frozen [-] (< 1.0)
:var TTI.tbl: critical temperature for snowmelt and refreezing (1.000) [oC]
:var TT.tbl: defines interval in which precipitation falls as rainfall and snowfall (-1.41934) [oC]
:var Cfmax.tbl: meltconstant in temperature-index ( 3.75653) [-]
:var WHC.tbl: fraction of Snowvolume that can store water (0.1) [-]
:var w_soil.tbl: Soil temperature smooth factor. Given for daily timesteps. (0.1125) [-] Wigmosta, <NAME>., <NAME>, <NAME>, and <NAME> (2009).
"""
global statistics
global multpars
global updateCols
self.thestep = pcr.scalar(0)
self.basetimestep = 86400
self.SSSF = False
pcr.setglobaloption("unittrue")
self.mv = -999
self.count = 0
self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
# Set and get defaults from ConfigFile here ###################################
self.Tslice = int(configget(self.config, "model", "Tslice", "1"))
self.reinit = int(configget(self.config, "run", "reinit", "0"))
self.OverWriteInit = int(configget(self.config, "model", "OverWriteInit", "0"))
self.updating = int(configget(self.config, "model", "updating", "0"))
self.updateFile = configget(self.config, "model", "updateFile", "no_set")
self.TransferMethod = int(
configget(self.config, "model", "transfermethod", "0")
)
self.maxitsupply = int(configget(self.config, "model", "maxitsupply", "5"))
self.UST = int(configget(self.config, "model", "Whole_UST_Avail", "0"))
self.NRiverMethod = int(configget(self.config, "model", "nrivermethod", "1"))
self.kinwaveIters = int(configget(self.config, "model", "kinwaveIters", "0"))
if self.kinwaveIters == 1:
self.logger.info(
"Using sub timestep for kinematic wave (iterate)"
)
if self.TransferMethod == 1:
self.logger.info(
"Applying the original topog_sbm vertical transfer formulation"
)
self.sCatch = int(configget(self.config, "model", "sCatch", "0"))
self.intbl = configget(self.config, "model", "intbl", "intbl")
self.modelSnow = int(configget(self.config, "model", "ModelSnow", "1"))
self.soilInfReduction = int(configget(self.config, "model", "soilInfRedu", "1"))
sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
alf = float(configget(self.config, "model", "Alpha", "60"))
# TODO: make this into a list for all gauges or a map
Qmax = float(configget(self.config, "model", "AnnualDischarge", "300"))
self.UpdMaxDist = float(configget(self.config, "model", "UpdMaxDist", "100"))
self.MaxUpdMult = float(configget(self.config, "model", "MaxUpdMult", "1.3"))
self.MinUpdMult = float(configget(self.config, "model", "MinUpdMult", "0.7"))
self.UpFrac = float(configget(self.config, "model", "UpFrac", "0.8"))
# self.ExternalQbase=int(configget(self.config,'model','ExternalQbase','0'))
self.waterdem = int(configget(self.config, "model", "waterdem", "0"))
WIMaxScale = float(configget(self.config, "model", "WIMaxScale", "0.8"))
self.MassWasting = int(configget(self.config, "model", "MassWasting", "0"))
self.nrLayers = int(configget(self.config, "model", "nrLayers", "1"))
# static maps to use (normally default)
wflow_subcatch = configget(
self.config, "model", "wflow_subcatch", "staticmaps/wflow_subcatch.map"
)
wflow_dem = configget(
self.config, "model", "wflow_dem", "staticmaps/wflow_dem.map"
)
wflow_ldd = configget(
self.config, "model", "wflow_ldd", "staticmaps/wflow_ldd.map"
)
wflow_river = configget(
self.config, "model", "wflow_river", "staticmaps/wflow_river.map"
)
wflow_riverlength = configget(
self.config,
"model",
"wflow_riverlength",
"staticmaps/wflow_riverlength.map",
)
wflow_riverlength_fact = configget(
self.config,
"model",
"wflow_riverlength_fact",
"staticmaps/wflow_riverlength_fact.map",
)
wflow_landuse = configget(
self.config, "model", "wflow_landuse", "staticmaps/wflow_landuse.map"
)
wflow_soil = configget(
self.config, "model", "wflow_soil", "staticmaps/wflow_soil.map"
)
wflow_gauges = configget(
self.config, "model", "wflow_gauges", "staticmaps/wflow_gauges.map"
)
wflow_inflow = configget(
self.config, "model", "wflow_inflow", "staticmaps/wflow_inflow.map"
)
wflow_riverwidth = configget(
self.config, "model", "wflow_riverwidth", "staticmaps/wflow_riverwidth.map"
)
wflow_streamorder = configget(
self.config,
"model",
"wflow_streamorder",
"staticmaps/wflow_streamorder.map",
)
# 2: Input base maps ########################################################
subcatch = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
) # Determines the area of calculations (all cells > 0)
subcatch = pcr.ifthen(subcatch > 0, subcatch)
self.Altitude = self.wf_readmap(
os.path.join(self.Dir, wflow_dem), 0.0, fail=True
) # * pcr.scalar(pcr.defined(subcatch)) # DEM
self.TopoLdd = pcr.ldd(
self.wf_readmap(os.path.join(self.Dir, wflow_ldd), 0.0, fail=True)
) # Local
self.TopoId = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
) # area map
self.River = pcr.cover(
pcr.boolean(
self.wf_readmap(os.path.join(self.Dir, wflow_river), 0.0, fail=True)
),
0,
)
self.RiverLength = pcr.cover(
self.wf_readmap(os.path.join(self.Dir, wflow_riverlength), 0.0), 0.0
)
# Factor to multiply riverlength with (defaults to 1.0)
self.RiverLengthFac = self.wf_readmap(
os.path.join(self.Dir, wflow_riverlength_fact), 1.0
)
# read landuse and soilmap and make sure there are no missing points related to the
# subcatchment map. Currently sets the lu and soil type type to 1
self.LandUse = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_landuse), 0.0, fail=True)
)
self.LandUse = pcr.cover(self.LandUse, pcr.ordinal(subcatch > 0))
self.Soil = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_soil), 0.0, fail=True)
)
self.Soil = pcr.cover(self.Soil, pcr.ordinal(subcatch > 0))
self.OutputLoc = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_gauges), 0.0, fail=True)
) # location of output gauge(s)
self.InflowLoc = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_inflow), 0.0)
) # location abstractions/inflows.
self.RiverWidth = self.wf_readmap(os.path.join(self.Dir, wflow_riverwidth), 0.0)
# Experimental
self.RunoffGenSigmaFunction = int(
configget(self.config, "model", "RunoffGenSigmaFunction", "0")
)
self.SubCatchFlowOnly = int(
configget(self.config, "model", "SubCatchFlowOnly", "0")
)
self.OutputId = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
) # location of subcatchment
# Temperature correction poer cell to add
self.TempCor = self.wf_readmap(
self.Dir
+ "\\"
+ configget(
self.config,
"model",
"TemperatureCorrectionMap",
"staticmaps/wflow_tempcor.map",
),
0.0,
)
self.ZeroMap = 0.0 * pcr.scalar(subcatch) # map with only zero's
# Set static initial values here #########################################
self.pi = 3.1416
self.e = 2.7183
self.SScale = 100.0
self.Latitude = pcr.ycoordinate(pcr.boolean(self.Altitude))
self.Longitude = pcr.xcoordinate(pcr.boolean(self.Altitude))
# Read parameters NEW Method
self.logger.info("Linking parameters to landuse, catchment and soil...")
self.wf_updateparameters()
self.RunoffGeneratingGWPerc = self.readtblDefault(
self.Dir + "/" + self.intbl + "/RunoffGeneratingGWPerc.tbl",
self.LandUse,
subcatch,
self.Soil,
0.1,
)
if hasattr(self, "LAI"):
# Sl must also be defined
if not hasattr(self, "Sl"):
logging.error(
"Sl (specific leaf storage) not defined! Needed becausee LAI is defined."
)
logging.error("Please add it to the modelparameters section. e.g.:")
logging.error(
"Sl=inmaps/clim/LCtoSpecificLeafStorage.tbl,tbl,0.5,1,inmaps/clim/LC.map"
)
if not hasattr(self, "Kext"):
logging.error(
"Kext (canopy extinction coefficient) not defined! Needed becausee LAI is defined."
)
logging.error("Please add it to the modelparameters section. e.g.:")
logging.error(
"Kext=inmaps/clim/LCtoExtinctionCoefficient.tbl,tbl,0.5,1,inmaps/clim/LC.map"
)
if not hasattr(self, "Swood"):
logging.error(
"Swood wood (branches, trunks) canopy storage not defined! Needed becausee LAI is defined."
)
logging.error("Please add it to the modelparameters section. e.g.:")
logging.error(
"Swood=inmaps/clim/LCtoBranchTrunkStorage.tbl,tbl,0.5,1,inmaps/clim/LC.map"
)
self.Cmax = self.Sl * self.LAI + self.Swood
self.CanopyGapFraction = pcr.exp(-self.Kext * self.LAI)
self.np_CanopyGapFraction = pcr.pcr2numpy(self.CanopyGapFraction,self.mv)
# TODO: Add MAXLAI and CWf lookup
else:
self.Cmax = self.readtblDefault(
self.Dir + "/" + self.intbl + "/MaxCanopyStorage.tbl",
self.LandUse,
subcatch,
self.Soil,
1.0,
)
self.CanopyGapFraction = self.readtblDefault(
self.Dir + "/" + self.intbl + "/CanopyGapFraction.tbl",
self.LandUse,
subcatch,
self.Soil,
0.1,
)
self.EoverR = self.readtblDefault(
self.Dir + "/" + self.intbl + "/EoverR.tbl",
self.LandUse,
subcatch,
self.Soil,
0.1,
)
if not hasattr(self, "DemandReturnFlowFraction"):
self.DemandReturnFlowFraction = self.ZeroMap
self.RootingDepth = self.readtblDefault(
self.Dir + "/" + self.intbl + "/RootingDepth.tbl",
self.LandUse,
subcatch,
self.Soil,
750.0,
) # rooting depth
#: rootdistpar determien how roots are linked to water table.
self.rootdistpar = self.readtblDefault(
self.Dir + "/" + self.intbl + "/rootdistpar.tbl",
self.LandUse,
subcatch,
self.Soil,
-8000,
) # rrootdistpar
# Soil parameters
# infiltration capacity if the soil [mm/day]
self.InfiltCapSoil = (
self.readtblDefault(
self.Dir + "/" + self.intbl + "/InfiltCapSoil.tbl",
self.LandUse,
subcatch,
self.Soil,
100.0,
)
* self.timestepsecs
/ self.basetimestep
)
self.CapScale = self.readtblDefault(
self.Dir + "/" + self.intbl + "/CapScale.tbl",
self.LandUse,
subcatch,
self.Soil,
100.0,
) #
# infiltration capacity of the compacted
self.InfiltCapPath = (
self.readtblDefault(
self.Dir + "/" + self.intbl + "/InfiltCapPath.tbl",
self.LandUse,
subcatch,
self.Soil,
10.0,
)
* self.timestepsecs
/ self.basetimestep
)
self.MaxLeakage = (
self.readtblDefault(
self.Dir + "/" + self.intbl + "/MaxLeakage.tbl",
self.LandUse,
subcatch,
self.Soil,
0.0,
)
* self.timestepsecs
/ self.basetimestep
)
self.MaxPercolation = (
self.readtblDefault(
self.Dir + "/" + self.intbl + "/MaxPercolation.tbl",
self.LandUse,
subcatch,
self.Soil,
0.0,
)
* self.timestepsecs
/ self.basetimestep
)
# areas (paths) in [mm/day]
# Fraction area with compacted soil (Paths etc.)
self.PathFrac = self.readtblDefault(
self.Dir + "/" + self.intbl + "/PathFrac.tbl",
self.LandUse,
subcatch,
self.Soil,
0.01,
)
# thickness of the soil
self.SoilThickness = self.readtblDefault(
self.Dir + "/" + self.intbl + "/SoilThickness.tbl",
self.LandUse,
subcatch,
self.Soil,
2000.0,
)
self.thetaR = self.readtblDefault(
self.Dir + "/" + self.intbl + "/thetaR.tbl",
self.LandUse,
subcatch,
self.Soil,
0.01,
)
self.thetaS = self.readtblDefault(
self.Dir + "/" + self.intbl + "/thetaS.tbl",
self.LandUse,
subcatch,
self.Soil,
0.6,
)
# minimum thickness of soild
self.SoilMinThickness = self.readtblDefault(
self.Dir + "/" + self.intbl + "/SoilMinThickness.tbl",
self.LandUse,
subcatch,
self.Soil,
500.0,
)
# KsatVer = $2\inmaps\KsatVer.map
self.KsatVer = (
self.readtblDefault(
self.Dir + "/" + self.intbl + "/KsatVer.tbl",
self.LandUse,
subcatch,
self.Soil,
3000.0,
)
* self.timestepsecs
/ self.basetimestep
)
self.MporeFrac = self.readtblDefault(
self.Dir + "/" + self.intbl + "/MporeFrac.tbl",
self.LandUse,
subcatch,
self.Soil,
0.0,
)
self.KsatHorFrac = self.readtblDefault(
self.Dir + "/" + self.intbl + "/KsatHorFrac.tbl",
self.LandUse,
subcatch,
self.Soil,
1.0,
)
# Check if we have irrigation areas
tt = pcr.pcr2numpy(self.IrrigationAreas, 0.0)
self.nrirri = tt.max()
# Check of we have paddy irrigation areas
tt = pcr.pcr2numpy(self.IrrigationPaddyAreas, 0.0)
self.nrpaddyirri = tt.max()
self.Beta = pcr.scalar(0.6) # For sheetflow
self.M = self.readtblDefault(
self.Dir + "/" + self.intbl + "/M.tbl",
self.LandUse,
subcatch,
self.Soil,
300.0,
) # Decay parameter in Topog_sbm
self.N = self.readtblDefault(
self.Dir + "/" + self.intbl + "/N.tbl",
self.LandUse,
subcatch,
self.Soil,
0.072,
) # Manning overland flow
if self.NRiverMethod == 1:
self.NRiver = self.readtblDefault(
self.Dir + "/" + self.intbl + "/N_River.tbl",
self.LandUse,
subcatch,
self.Soil,
0.036,
) # Manning river
if self.NRiverMethod == 2:
self.NRiver = self.readtblFlexDefault(
self.Dir + "/" + self.intbl + "/N_River.tbl", 0.036, wflow_streamorder
)
self.WaterFrac = self.readtblDefault(
self.Dir + "/" + self.intbl + "/WaterFrac.tbl",
self.LandUse,
subcatch,
self.Soil,
0.0,
) # Fraction Open water
self.et_RefToPot = self.readtblDefault(
self.Dir + "/" + self.intbl + "/et_reftopot.tbl",
self.LandUse,
subcatch,
self.Soil,
1.0,
) # Fraction Open water
if self.modelSnow:
# HBV Snow parameters
# critical temperature for snowmelt and refreezing: TTI= 1.000
self.TTI = self.readtblDefault(
self.Dir + "/" + self.intbl + "/TTI.tbl",
self.LandUse,
subcatch,
self.Soil,
1.0,
)
# TT = -1.41934 # defines interval in which precipitation falls as rainfall and snowfall
self.TT = self.readtblDefault(
self.Dir + "/" + self.intbl + "/TT.tbl",
self.LandUse,
subcatch,
self.Soil,
-1.41934,
)
self.TTM = self.readtblDefault(
self.Dir + "/" + self.intbl + "/TTM.tbl",
self.LandUse,
subcatch,
self.Soil,
-1.41934,
)
# Cfmax = 3.75653 # meltconstant in temperature-index
self.Cfmax = self.readtblDefault(
self.Dir + "/" + self.intbl + "/Cfmax.tbl",
self.LandUse,
subcatch,
self.Soil,
3.75653,
)
# WHC= 0.10000 # fraction of Snowvolume that can store water
self.WHC = self.readtblDefault(
self.Dir + "/" + self.intbl + "/WHC.tbl",
self.LandUse,
subcatch,
self.Soil,
0.1,
)
# Wigmosta, <NAME>., <NAME>, <NAME>, and <NAME> (2009).
self.w_soil = (
self.readtblDefault(
self.Dir + "/" + self.intbl + "/w_soil.tbl",
self.LandUse,
subcatch,
self.Soil,
0.9 * 3.0 / 24.0,
)
* self.timestepsecs
/ self.basetimestep
)
if self.soilInfReduction:
self.cf_soil = pcr.min(
0.99,
self.readtblDefault(
self.Dir + "/" + self.intbl + "/cf_soil.tbl",
self.LandUse,
subcatch,
self.Soil,
0.038,
),
) # Ksat reduction factor fro frozen soi
# We are modelling gletchers
# Determine real slope and cell length
self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
self.ZeroMap, sizeinmetres
)
self.Slope = pcr.slope(self.Altitude)
# self.Slope=pcr.ifthen(pcr.boolean(self.TopoId),pcr.max(0.001,self.Slope*celllength()/self.reallength))
self.Slope = pcr.max(0.00001, self.Slope * pcr.celllength() / self.reallength)
Terrain_angle = pcr.scalar(pcr.atan(self.Slope))
#self.N = pcr.ifthenelse(self.River, self.NRiver, self.N)
if hasattr(self, "ReserVoirSimpleLocs") or hasattr(
self, "ReserVoirComplexLocs"
):
self.ReserVoirLocs = self.ZeroMap
self.filter_P_PET = self.ZeroMap + 1.0
if hasattr(self, "ReserVoirSimpleLocs"):
# Check if we have simple and or complex reservoirs
self.ReserVoirSimpleLocs = pcr.nominal(self.ReserVoirSimpleLocs)
self.ReservoirSimpleAreas = pcr.nominal(self.ReservoirSimpleAreas)
tt_simple = pcr.pcr2numpy(self.ReserVoirSimpleLocs, 0.0)
self.nrresSimple = tt_simple.max()
self.ReserVoirLocs = self.ReserVoirLocs + pcr.cover(
pcr.scalar(self.ReserVoirSimpleLocs)
)
areamap = self.reallength * self.reallength
res_area = pcr.areatotal(pcr.spatial(areamap), self.ReservoirSimpleAreas)
resarea_pnt = pcr.ifthen(pcr.boolean(self.ReserVoirSimpleLocs), res_area)
self.ResSimpleArea = pcr.ifthenelse(
pcr.cover(self.ResSimpleArea, pcr.scalar(0.0)) > 0,
self.ResSimpleArea,
pcr.cover(resarea_pnt, pcr.scalar(0.0)),
)
self.filter_P_PET = pcr.ifthenelse(
pcr.boolean(pcr.cover(res_area, pcr.scalar(0.0))),
res_area * 0.0,
self.filter_P_PET,
)
else:
self.nrresSimple = 0
if hasattr(self, "ReserVoirComplexLocs"):
self.ReservoirComplexAreas = pcr.nominal(self.ReservoirComplexAreas)
self.ReserVoirComplexLocs = pcr.nominal(self.ReserVoirComplexLocs)
tt_complex = pcr.pcr2numpy(self.ReserVoirComplexLocs, 0.0)
self.nrresComplex = tt_complex.max()
self.ReserVoirLocs = self.ReserVoirLocs + pcr.cover(
pcr.scalar(self.ReserVoirComplexLocs)
)
res_area = pcr.cover(pcr.scalar(self.ReservoirComplexAreas), 0.0)
self.filter_P_PET = pcr.ifthenelse(
res_area > 0, res_area * 0.0, self.filter_P_PET
)
# read files
self.sh = {}
res_ids = pcr.ifthen(self.ResStorFunc == 2, self.ReserVoirComplexLocs)
np_res_ids = pcr.pcr2numpy(res_ids, 0)
np_res_ids_u = np.unique(np_res_ids[np.nonzero(np_res_ids)])
if np.size(np_res_ids_u) > 0:
for item in np.nditer(np_res_ids_u):
self.sh[int(item)] = np.loadtxt(
self.Dir
+ "/"
+ self.intbl
+ "/Reservoir_SH_"
+ str(item)
+ ".tbl"
)
self.hq = {}
res_ids = pcr.ifthen(self.ResOutflowFunc == 1, self.ReserVoirComplexLocs)
np_res_ids = pcr.pcr2numpy(res_ids, 0)
np_res_ids_u = np.unique(np_res_ids[np.nonzero(np_res_ids)])
if np.size(np_res_ids_u) > 0:
for item in np.nditer(np_res_ids_u):
self.hq[int(item)] = np.loadtxt(
self.Dir
+ "/"
+ self.intbl
+ "/Reservoir_HQ_"
+ str(item)
+ ".tbl",
skiprows=3,
)
else:
self.nrresComplex = 0
if (self.nrresSimple + self.nrresComplex) > 0:
self.ReserVoirLocs = pcr.ordinal(self.ReserVoirLocs)
self.logger.info(
"A total of "
+ str(self.nrresSimple)
+ " simple reservoirs and "
+ str(self.nrresComplex)
+ " complex reservoirs found."
)
self.ReserVoirDownstreamLocs = pcr.downstream(
self.TopoLdd, self.ReserVoirLocs
)
self.TopoLddOrg = self.TopoLdd
self.TopoLdd = pcr.lddrepair(
pcr.cover(
pcr.ifthen(pcr.boolean(self.ReserVoirLocs), pcr.ldd(5)),
self.TopoLdd,
)
)
tt_filter = pcr.pcr2numpy(self.filter_P_PET, 1.0)
self.filterResArea = tt_filter.min()
# Determine river width from DEM, upstream area and yearly average discharge
# Scale yearly average Q at outlet with upstream are to get Q over whole catchment
# Alf ranges from 5 to > 60. 5 for hardrock. large values for sediments
# "<NAME> et al 2005 Controls on the channel width of rivers:
# Implications for modeling fluvial incision of bedrock"
if (self.nrresSimple + self.nrresComplex) > 0:
upstr = pcr.catchmenttotal(1, self.TopoLddOrg)
else:
upstr = pcr.catchmenttotal(1, self.TopoLdd)
Qscale = upstr / pcr.mapmaximum(upstr) * Qmax
W = (
(alf * (alf + 2.0) ** (0.6666666667)) ** (0.375)
* Qscale ** (0.375)
* (pcr.max(0.0001, pcr.windowaverage(self.Slope, pcr.celllength() * 4.0)))
** (-0.1875)
* self.NRiver ** (0.375)
)
# should use NRiver here!!!
# Use supplied riverwidth if possible, else calulate
self.RiverWidth = pcr.ifthenelse(self.RiverWidth <= 0.0, W, self.RiverWidth)
# soil thickness based on topographical index (see Environmental modelling: finding simplicity in complexity)
# 1: calculate wetness index
# 2: Scale the capacity (now actually a max capacity) based on the index, also apply a minmum capacity
WI = pcr.ln(
pcr.accuflux(self.TopoLdd, 1) / self.Slope
) # Topographical wetnesss. Scale WI by zone/subcatchment assuming these ara also geological units
WIMax = pcr.areamaximum(WI, self.TopoId) * WIMaxScale
self.SoilThickness = pcr.max(
pcr.min(self.SoilThickness, (WI / WIMax) * self.SoilThickness),
self.SoilMinThickness,
)
self.SoilWaterCapacity = self.SoilThickness * (self.thetaS - self.thetaR)
# determine number of layers based on total soil thickness
UStoreLayerThickness = configget(
self.config, "model", "UStoreLayerThickness", "0"
)
if UStoreLayerThickness != "0":
self.nrLayers = len(UStoreLayerThickness.split(","))
self.maxLayers = self.nrLayers + 1
else:
UStoreLayerThickness = self.SoilThickness
self.nrLayers = 1
self.maxLayers = self.nrLayers
self.KsatVerFrac = []
self.c = []
for n in range(self.maxLayers):
self.KsatVerFrac.append(
self.readtblLayersDefault(
self.Dir + "/" + self.intbl + "/KsatVerFrac.tbl",
self.LandUse,
subcatch,
self.Soil,
n,
1.0,
)
)
self.c.append(
self.readtblLayersDefault(
self.Dir + "/" + self.intbl + "/c.tbl",
self.LandUse,
subcatch,
self.Soil,
n,
10.0,
)
)
# limit roots to top 99% of first zone
self.RootingDepth = pcr.min(self.SoilThickness * 0.99, self.RootingDepth)
# subgrid runoff generation, determine CC (sharpness of S-Curve) for upper
# en lower part and take average
self.DemMax = pcr.readmap(self.Dir + "/staticmaps/wflow_demmax")
self.DrainageBase = pcr.readmap(self.Dir + "/staticmaps/wflow_demmin")
self.CClow = pcr.min(
100.0,
-pcr.ln(1.0 / 0.1 - 1) / pcr.min(-0.1, self.DrainageBase - self.Altitude),
)
self.CCup = pcr.min(
100.0, -pcr.ln(1.0 / 0.1 - 1) / pcr.min(-0.1, self.Altitude - self.DemMax)
)
self.CC = (self.CClow + self.CCup) * 0.5
# Which columns/gauges to use/ignore in updating
self.UpdateMap = self.ZeroMap
if self.updating:
_tmp = pcr.pcr2numpy(self.OutputLoc, 0.0)
gaugear = _tmp
touse = np.zeros(gaugear.shape, dtype="int")
for thecol in updateCols:
idx = (gaugear == thecol).nonzero()
touse[idx] = thecol
self.UpdateMap = pcr.numpy2pcr(pcr.Nominal, touse, 0.0)
# Calculate distance to updating points (upstream) annd use to scale the correction
# ldddist returns zero for cell at the gauges so add 1.0 tp result
self.DistToUpdPt = pcr.cover(
pcr.min(
pcr.ldddist(
self.TopoLdd, pcr.boolean(pcr.cover(self.UpdateMap, 0)), 1
)
* self.reallength
/ pcr.celllength(),
self.UpdMaxDist,
),
self.UpdMaxDist,
)
# Initializing of variables
self.logger.info("Initializing of model variables..")
self.TopoLdd = pcr.lddmask(self.TopoLdd, pcr.boolean(self.TopoId))
catchmentcells = pcr.maptotal(pcr.scalar(self.TopoId))
# Limit lateral flow per subcatchment (make pits at all subcatch boundaries)
# This is very handy for Ribasim etc...
if self.SubCatchFlowOnly > 0:
self.logger.info("Creating subcatchment-only drainage network (ldd)")
ds = pcr.downstream(self.TopoLdd, self.TopoId)
usid = pcr.ifthenelse(ds != self.TopoId, self.TopoId, 0)
self.TopoLdd = pcr.lddrepair(
pcr.ifthenelse(pcr.boolean(usid), pcr.ldd(5), self.TopoLdd)
)
# Used to seperate output per LandUse/management classes
OutZones = self.LandUse
self.QMMConv = self.timestepsecs / (
self.reallength * self.reallength * 0.001
) # m3/s --> actial mm of water over the cell
# self.QMMConvUp = 1000.0 * self.timestepsecs / ( pcr.catchmenttotal(pcr.cover(1.0), self.TopoLdd) * self.reallength * self.reallength) #m3/s --> mm over upstreams
temp = (
pcr.catchmenttotal(pcr.cover(1.0), self.TopoLdd)
* self.reallength
* 0.001
* 0.001
* self.reallength
)
self.QMMConvUp = pcr.cover(self.timestepsecs * 0.001) / temp
self.ToCubic = (
self.reallength * self.reallength * 0.001
) / self.timestepsecs # m3/s
self.KinWaveVolumeR = self.ZeroMap
self.OldKinWaveVolumeR = self.ZeroMap
self.KinWaveVolumeL = self.ZeroMap
self.OldKinWaveVolumeL = self.ZeroMap
self.sumprecip = self.ZeroMap # accumulated rainfall for water balance
self.sumevap = self.ZeroMap # accumulated evaporation for water balance
self.sumrunoff = self.ZeroMap # accumulated runoff for water balance
self.sumint = self.ZeroMap # accumulated interception for water balance
self.sumleakage = self.ZeroMap
self.sumoutflow = self.ZeroMap
self.sumsnowmelt = self.ZeroMap
self.CumRad = self.ZeroMap
self.SnowMelt = self.ZeroMap
self.CumPrec = self.ZeroMap
self.CumInwaterMM = self.ZeroMap
self.CumInfiltExcess = self.ZeroMap
self.CumExfiltWater = self.ZeroMap
self.CumSurfaceWater = self.ZeroMap
self.watbal = self.ZeroMap
self.CumEvap = self.ZeroMap
self.CumPotenEvap = self.ZeroMap
self.CumPotenTrans = self.ZeroMap
self.CumInt = self.ZeroMap
self.CumRad = self.ZeroMap
self.CumLeakage = self.ZeroMap
self.CumPrecPol = self.ZeroMap
self.SatWaterFlux = self.ZeroMap
self.SumCellWatBal = self.ZeroMap
self.PathInfiltExceeded = self.ZeroMap
self.SoilInfiltExceeded = self.ZeroMap
self.CumOutFlow = self.ZeroMap
self.CumCellInFlow = self.ZeroMap
self.CumIF = self.ZeroMap
self.CumActInfilt = self.ZeroMap
self.IRSupplymm = self.ZeroMap
self.Aspect = pcr.scalar(pcr.aspect(self.Altitude)) # aspect [deg]
self.Aspect = pcr.ifthenelse(self.Aspect <= 0.0, pcr.scalar(0.001), self.Aspect)
# On Flat areas the Aspect function fails, fill in with average...
self.Aspect = pcr.ifthenelse(
pcr.defined(self.Aspect),
self.Aspect,
pcr.areaaverage(self.Aspect, self.TopoId),
)
# Set DCL to riverlength if that is longer that the basic length calculated from grid
drainlength = detdrainlength(self.TopoLdd, self.xl, self.yl)
# Multiply with Factor (taken from upscaling operation, defaults to 1.0 if no map is supplied
self.DCL = drainlength * pcr.max(1.0, self.RiverLengthFac)
self.DCL = pcr.max(self.DCL, self.RiverLength) # m
# water depth (m)
# set width for kinematic wave to cell width for all cells
self.Bw = detdrainwidth(self.TopoLdd, self.xl, self.yl)
# However, in the main river we have real flow so set the width to the
# width of the river
self.Bw = pcr.ifthenelse(self.River, self.RiverWidth, self.Bw)
# Add rivers to the WaterFrac, but check with waterfrac map and correct
self.RiverFrac = pcr.min(
1.0,
pcr.ifthenelse(
self.River, (self.RiverWidth * self.DCL) / (self.xl * self.yl), 0
),
)
self.WaterFrac = pcr.max(self.WaterFrac - self.RiverFrac, 0)
# term for Alpha
# Correct slope for extra length of the river in a gridcel
riverslopecor = drainlength / self.DCL
# pcr.report(riverslopecor,"cor.map")
# pcr.report(self.Slope * riverslopecor,"slope.map")
self.AlpTermR = pow((self.NRiver / (pcr.sqrt(self.Slope * riverslopecor))), self.Beta)
self.riverSlope = self.Slope * riverslopecor
# power for Alpha
self.AlpPow = (2.0 / 3.0) * self.Beta
# initial approximation for Alpha
self.AlpTermL = pow((self.N / (pcr.sqrt(self.Slope))), self.Beta)
# calculate catchmentsize
self.upsize = pcr.catchmenttotal(self.xl * self.yl, self.TopoLdd)
self.csize = pcr.areamaximum(self.upsize, self.TopoId)
self.wf_multparameters()
# determine flow network and upstream nodes
self.np_ldd = pcr.pcr2numpy(self.TopoLdd, self.mv)
# ldd definitie
_ldd = np.array([[7, 8, 9], [4, 5, 6], [1, 2, 3]])
_ldd_us = np.fliplr(np.flipud(_ldd)).flatten()
_ldd_us = np.where(_ldd_us==5, 0, _ldd_us)
# convert pcr objects to numpy for kinemativ wave surface water
np_zeros = pcr.pcr2numpy(self.ZeroMap, self.mv).ravel()
np_2d_zeros = pcr.pcr2numpy(self.ZeroMap, self.mv)
self.neff = (self.thetaS - self.thetaR)
self.f = pcr.abs((self.thetaS - self.thetaR) /self.M)
self.DL = detdrainlength(self.TopoLdd, self.xl, self.yl)
self.DW = (self.xl * self.yl)/self.DL
# width for overland kinematic reservoir
self.SW = pcr.ifthenelse(self.River, pcr.max(self.DW - self.RiverWidth,0), self.DW)
layer_dtype = np.dtype(
[('c', np.float64),
('UStoreLayerDepth', np.float64),
('st', np.float64),
('KsatVerFrac', np.float64),
('ActEvapUstore', np.float64),
('vwc', np.float64),
('vwc_perc', np.float64),
('UStoreLayerThickness', np.float64),
('UStest', np.float64)
])
self.layer =
|
np.zeros((self.maxLayers,np_zeros.size), dtype=layer_dtype)
|
numpy.zeros
|
import csv
import logging
import os
import socket
import time
from collections import Counter, defaultdict, Iterator
import numpy as np
import torch
import torch.nn.functional as F
from pytorch_pretrained_bert import BertTokenizer, BertAdam
from sklearn import metrics
from torch.nn.modules.loss import _Loss
from torchtext.data import BucketIterator
from tqdm import tqdm
from ensembling.ensemble_helper import load_and_eval
from ensembling.secondary_cls import SecondaryCls
from neural_bag.modelutils import glorot_param_init
from task_A.datasets.RumourEvalDataset_BERT import RumourEval2019Dataset_BERTTriplets
from task_A.frameworks.base_framework import Base_Framework
from task_A.frameworks.self_att_with_bert_tokenizing import SelfAtt_BertTokenizing_Framework
# this is a list of best ensemble predictions
# found via find_best_ensemble_greedy method from ensemble_helper.py
found_best_ensemble = [
"val_result_F1_0.57948_L_0.6698856112670224_2019-01-28_08:24_pcknot5.npy",
"val_result_F1_0.57759_L_0.703442574330578_2019-01-28_00:15_pcbirger.npy",
"val_result_F1_0.57623_L_0.6621931040825227_2019-01-28_00:32_pcknot5.npy",
"val_result_F1_0.57526_L_0.6638631148319039_2019-01-27_08:12_pcknot4.npy",
"val_result_F1_0.57423_L_0.7102468566180802_2019-01-28_17:03_pcknot5.npy",
"val_result_F1_0.57371_L_0.6669414722463592_2019-01-27_00:46_pcknot5.npy",
"val_result_F1_0.56750_L_0.6898565446440823_2019-01-26_20:31_pcknot4.npy",
"val_result_F1_0.56656_L_0.699664715034862_2019-01-27_15:57_pcbirger.npy",
"val_result_F1_0.56460_L_0.724339671515812_2019-01-28_15:53_pcbirger.npy",
"val_result_F1_0.56433_L_0.663498227135592_2019-01-28_13:27_pcknot2.npy",
"val_result_F1_0.56313_L_0.689033422880176_2019-01-26_20:39_pcknot4.npy",
"val_result_F1_0.56069_L_0.670826427442727_2019-01-27_02:10_pcknot4.npy",
"val_result_F1_0.55930_L_0.6865916204641289_2019-01-27_16:14_pcbirger.npy",
"val_result_F1_0.55580_L_0.7056901221467318_2019-01-26_20:24_pcknot4.npy",
"val_result_F1_0.55509_L_0.7102856230281916_2019-01-28_00:06_pcbirger.npy",
"val_result_F1_0.55504_L_0.6975949840002625_2019-01-27_23:51_pcbirger.npy",
"val_result_F1_0.55092_L_0.6955123813847969_2019-01-28_12:34_pcknot4.npy"
]
class Ensemble_Framework(Base_Framework):
def __init__(self, config: dict):
super().__init__(config)
# Uncomment this to run logistic regression L2 classifier
# self.create_l2_optim(config, torch.nn.CrossEntropyLoss(
# weight=torch.Tensor([3.8043243885040283, 1.0, 9.309523582458496, 8.90886116027832]).cuda()))
# sys.exit()
self.save_treshold = 999
self.modeltype = config["modeltype"]
self.tokenizer = BertTokenizer.from_pretrained(self.modeltype, cache_dir="./.BERTcache",
do_lower_case=True)
# In these experiments I have tried to learn an optimal combination of ensemble predictions via L2 classifier
# I have tried various approaches (combining predictions / pre-softmax scores) but reached bad results
# As for a space constraints, this experiment did not make it into the paper
def create_l2_optim(self, config, lossfunction):
files = sorted(os.listdir("saved/ensemble/numpy"))
train_prefix = "val_"
valid = [f for f in files if f.startswith(train_prefix) and f.endswith("npy")]
valid_ensemble_subset = [f"{train_prefix}{s[len('val_'):]}" for s in list(found_best_ensemble)]
# result_files = [f for f in valid if "result" in f]
result_files = [f for f in valid if "result" in f and f in valid_ensemble_subset]
assert len(result_files) == len(valid_ensemble_subset)
logging.debug(result_files)
label_file = [f for f in valid if "labels" in f][0]
labels = np.load(os.path.join("saved/ensemble/numpy", label_file))
result_matrices = [np.load(os.path.join("saved/ensemble/numpy", result_file)) for result_file in result_files]
results = np.array(result_matrices)
# experiment 2, try softmaxing logits first
results = torch.Tensor(results)
results = F.softmax(results, -1).numpy()
results = torch.Tensor(np.concatenate(results, -1)).cuda()
# experiment 1, traing LR on logits
# results = np.concatenate(results, -1)
# results = torch.Tensor(results).cuda()
labels = torch.Tensor(labels).cuda().long()
valid = [f for f in files if f.startswith("val_") and f.endswith("npy")]
valid_ensemble_subset = [f"val_{s[len('val_'):]}" for s in list(found_best_ensemble)]
# result_files = [f for f in valid if "result" in f]
result_files = [f for f in valid if "result" in f and f in valid_ensemble_subset]
assert len(result_files) == len(valid_ensemble_subset)
logging.debug(result_files)
label_file = [f for f in valid if "labels" in f][0]
dev_labels = np.load(os.path.join("saved/ensemble/numpy", label_file))
dev_results = np.array(
[np.load(os.path.join("saved/ensemble/numpy", result_file)) for result_file in result_files])
dev_results = torch.Tensor(dev_results)
dev_results = F.softmax(dev_results, -1).numpy()
dev_results = torch.Tensor(np.concatenate(dev_results, -1)).cuda()
# dev_results = np.concatenate(dev_results, -1)
# dev_results = torch.Tensor(dev_results).cuda()
dev_labels = torch.Tensor(dev_labels).cuda().long()
total_labels = list(dev_labels.cpu().numpy())
ens_best_F1 = 0
ens_best_distribution = None
l = torch.nn.CrossEntropyLoss(
weight=torch.Tensor([3.8043243885040283, 1.0, 9.309523582458496, 8.90886116027832]).cuda())
for _ in range(1000):
F1, distribution = self.run_LR_training(config, dev_labels, dev_results, labels, lossfunction,
results, total_labels)
logging.info(f"New Best F1: {F1}")
if F1 > ens_best_F1:
_, _, e_f1 = load_and_eval(l,
weights=distribution
)
if e_f1 != F1:
F1, distribution = self.run_LR_training(config, dev_labels, dev_results, labels, lossfunction,
results, total_labels)
ens_best_F1 = F1
ens_best_distribution = distribution
logging.debug(f"New Best F1: {ens_best_F1}")
logging.debug(ens_best_distribution)
def run_LR_training(self, config, dev_labels, dev_results, labels, lossfunction, results, total_labels):
model = SecondaryCls(config).cuda()
glorot_param_init(model)
optimizer = BertAdam(filter(lambda p: p.requires_grad, model.parameters()),
lr=config["hyperparameters"]["learning_rate"], weight_decay=0.02)
best_distribution = None
best_F1 = 0
for i in range(1000):
pred_logits = model(results)
loss = lossfunction(pred_logits, labels)
loss.backward()
optimizer.step()
optimizer.zero_grad()
dev_pred_logits = model(dev_results)
dev_loss = lossfunction(dev_pred_logits, dev_labels)
maxpreds, argmaxpreds = torch.max(F.softmax(dev_pred_logits, -1), dim=1)
total_preds = list(argmaxpreds.cpu().numpy())
correct_vec = argmaxpreds == dev_labels
total_correct = torch.sum(correct_vec).item()
loss, acc = dev_loss, total_correct / results.shape[0]
F1 = metrics.f1_score(total_labels, total_preds, average="macro")
if F1 > best_F1:
best_F1 = F1
best_distribution = F.softmax(model.a)
# logging.info(
# f"Validation loss|acc|F1|BEST: {loss:.6f}|{acc:.6f}|{F1:.6f} || {best_F1} || ")
return best_F1, best_distribution
def fit(self, modelfunc):
config = self.config
fields = RumourEval2019Dataset_BERTTriplets.prepare_fields_for_text()
train_data = RumourEval2019Dataset_BERTTriplets(config["train_data"], fields, self.tokenizer,
max_length=config["hyperparameters"]["max_length"])
dev_data = RumourEval2019Dataset_BERTTriplets(config["dev_data"], fields, self.tokenizer,
max_length=config["hyperparameters"]["max_length"])
test_data = RumourEval2019Dataset_BERTTriplets(config["test_data"], fields, self.tokenizer,
max_length=config["hyperparameters"]["max_length"])
# torch.manual_seed(5246727901370826861 & ((1 << 63) - 1))
# torch.manual_seed(40)
# device = torch.device("cpu")
device = torch.device("cuda:0" if config['cuda'] and
torch.cuda.is_available() else "cpu")
create_iter = lambda data: BucketIterator(data, sort_key=lambda x: -len(x.text), sort=True,
batch_size=config["hyperparameters"]["batch_size"],
repeat=False,
device=device)
dev_iter = create_iter(dev_data)
test_iter = create_iter(test_data)
logging.info(f"Train examples: {len(train_data.examples)}\nValidation examples: {len(dev_data.examples)}")
logging.info(f"Test examples: {len(test_data.examples)}")
checkpoints = os.listdir("saved/ensemble/")
modelpaths = sorted([f"saved/ensemble/{ch}" for ch in checkpoints if ch.endswith(".pt")])
logging.info(f"Running ensemble of {len(modelpaths)} models")
models = []
weights = SelfAtt_BertTokenizing_Framework.get_class_weights(train_data.examples, "stance_label", 4,
min_fraction=1)
logging.info("class weights")
logging.info(f"{str(weights.numpy().tolist())}")
lossfunction = torch.nn.CrossEntropyLoss(weight=weights.to(device)) # .to(device))
soft_ensemble = False
build_predictions = True
check_f1s = False
eval_from_npy = False
train_logreg = False
if train_logreg:
start_time = time.time()
try:
model = self.create_l2_optim(config, lossfunction)
except KeyboardInterrupt:
logging.info('-' * 120)
logging.info('Exit from training early.')
finally:
logging.info(f'Finished after {(time.time() - start_time) / 60} minutes.')
elif eval_from_npy:
start_time = time.time()
try:
load_and_eval(lossfunction)
except KeyboardInterrupt:
logging.info('-' * 120)
logging.info('Exit from training early.')
finally:
logging.info(f'Finished after {(time.time() - start_time) / 60} minutes.')
elif build_predictions:
# generate .npy predictions from models
start_time = time.time()
try:
for idx, model_path in enumerate(modelpaths):
pretrained_model = torch.load(
model_path)
model = modelfunc.from_pretrained(self.modeltype, cache_dir="./.BERTcache",
state_dict=pretrained_model.state_dict()
).to(device)
model.dropout = pretrained_model.dropout
logging.info("MODEL: " + model_path)
suffix = model_path[model_path.index("F1"):model_path.index(".pt")]
# generate prediction scores from train data
# train_loss, train_acc, _, train_F1 = self.build_results(idx, model, suffix,
# lossfunction,
# train_iter,
# config,
# prefix="train_", )
# generate prediction scores from val data
# validation_loss, validation_acc, val_acc_per_level, val_F1 = self.build_results(idx, model, suffix,
# lossfunction,
# dev_iter,
# config,
# prefix="val_")
# generate prediction scores from test data
self.build_results(idx, model, suffix,
lossfunction,
test_iter,
config,
prefix="test_",
do_not_evaluate=True)
# logging.info(
# f"Training loss|acc|F1: {train_loss:.6f}|{train_acc:.6f}|{train_F1:.6f}")
# logging.info(
# f"Validation loss|acc|F1: {validation_loss:.6f}|{validation_acc:.6f}|{val_F1:.6f}")
except KeyboardInterrupt:
logging.info('-' * 120)
logging.info('Exit from training early.')
finally:
logging.info(f'Finished after {(time.time() - start_time) / 60} minutes.')
elif soft_ensemble:
start_time = time.time()
try:
for modelpath in modelpaths:
pretrained_model = torch.load(
modelpath)
model = modelfunc.from_pretrained(self.modeltype, cache_dir="./.BERTcache",
state_dict=pretrained_model.state_dict()
).to(device)
model.dropout = pretrained_model.dropout
models.append(model)
validation_loss, validation_acc, val_acc_per_level, val_F1 = self.validate_models(models,
lossfunction,
dev_iter,
config,
log_results=False)
logging.info(
f"Validation loss|acc|F1: {validation_loss:.6f}|{validation_acc:.6f}|{val_F1:.6f}")
except KeyboardInterrupt:
logging.info('-' * 120)
logging.info('Exit from training early.')
finally:
logging.info(f'Finished after {(time.time() - start_time) / 60} minutes.')
elif check_f1s:
# pretrained_model = None
start_time = time.time()
try:
for idx, modelpath in enumerate(modelpaths):
pretrained_model = torch.load(
modelpath)
model = modelfunc.from_pretrained(self.modeltype, cache_dir="./.BERTcache",
state_dict=pretrained_model.state_dict()
).to(device)
model.dropout = pretrained_model.dropout
logging.info(f"Model: {checkpoints[idx]}")
# self.predict(f"answer_BERTF1_textonly_{idx}.json", model, dev_iter)
# train_loss, train_acc, _, train_F1 = self.validate(model, lossfunction, train_iter, config,
# log_results=False)
validation_loss, validation_acc, val_acc_per_level, val_F1 = self.validate(model, lossfunction,
dev_iter,
config,
log_results=False)
# logging.info(
# f"Training loss|acc|F1: {train_loss:.6f}|{train_acc:.6f}|{train_F1:.6f}")
logging.info(
f"Validation loss|acc|F1: {validation_loss:.6f}|{validation_acc:.6f}|{val_F1:.6f}")
except KeyboardInterrupt:
logging.info('-' * 120)
logging.info('Exit from training early.')
finally:
logging.info(f'Finished after {(time.time() - start_time) / 60} minutes.')
def build_results(self, k, model: torch.nn.Module, suffix, lossfunction: _Loss, dev_iter: Iterator, config: dict,
prefix="val_", verbose=False, do_not_evaluate=False):
if not os.path.exists("saved/ensemble/numpy/"):
os.makedirs("saved/ensemble/numpy/")
train_flag = model.training
model.eval()
total_examples = len(dev_iter.data())
results = np.zeros((total_examples, 4))
total_batches = len(dev_iter.data()) // dev_iter.batch_size
if verbose:
pbar = tqdm(total=total_batches)
examples_so_far = 0
dev_loss = 0
total_correct = 0
total_correct_per_level = Counter()
total_per_level = defaultdict(lambda: 0)
total_labels = []
total_preds = []
ids = []
for idx, batch in enumerate(dev_iter):
pred_logits = model(batch)
numpy_logits = pred_logits.cpu().detach().numpy() # bsz x classes
step_size = numpy_logits.shape[0]
write_index = idx * dev_iter.batch_size
results[write_index: write_index + step_size] = numpy_logits
ids += batch.tweet_id
if not do_not_evaluate:
loss = lossfunction(pred_logits, batch.stance_label)
branch_levels = [id.split(".", 1)[-1] for id in batch.branch_id]
for branch_depth in branch_levels: total_per_level[branch_depth] += 1
correct, correct_per_level = self.calculate_correct(pred_logits, batch.stance_label,
levels=branch_levels)
total_correct += correct
total_correct_per_level += correct_per_level
examples_so_far += len(batch.stance_label)
dev_loss += loss.item()
maxpreds, argmaxpreds = torch.max(F.softmax(pred_logits, -1), dim=1)
total_preds += list(argmaxpreds.cpu().numpy())
total_labels += list(batch.stance_label.cpu().numpy())
if verbose:
pbar.set_description(
f"dev loss: {dev_loss / (idx + 1):.4f}, dev acc: {total_correct / examples_so_far:.4f}")
pbar.update(1)
if not do_not_evaluate:
loss, acc = dev_loss / total_batches, total_correct / examples_so_far
total_acc_per_level = {depth: total_correct_per_level.get(depth, 0) / total for depth, total in
total_per_level.items()}
F1 = metrics.f1_score(total_labels, total_preds, average="macro")
|
np.save(f"saved/ensemble/numpy/{prefix}result_{suffix}.npy", results)
|
numpy.save
|
from unittest import TestCase
from tempfile import TemporaryDirectory
from pathlib import Path
from giant.camera_models import PinholeModel, OwenModel, BrownModel, OpenCVModel, save, load
import numpy as np
import giant.rotations as at
import lxml.etree as etree
class TestPinholeModel(TestCase):
def setUp(self):
self.Class = PinholeModel
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
model = self.Class(kx=1, ky=2, px=4, py=5, focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
estimation_parameters=['focal_length', 'px'])
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 0, 4], [0, 2, 5]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['focal_length', 'px'])
def test_estimation_parameters(self):
model = self.Class()
model.estimation_parameters = 'kx'
self.assertEqual(model.estimation_parameters, ['kx'])
model.estimate_multiple_misalignments = False
model.estimation_parameters = ['px', 'py', 'Multiple misalignments']
self.assertEqual(model.estimation_parameters, ['px', 'py', 'multiple misalignments'])
self.assertTrue(model.estimate_multiple_misalignments)
def test_kx(self):
model = self.Class(intrinsic_matrix=np.array([[1, 0, 0], [0, 0, 0]]))
self.assertEqual(model.kx, 1)
model.kx = 100
self.assertEqual(model.kx, 100)
self.assertEqual(model.intrinsic_matrix[0, 0], 100)
def test_ky(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 3, 0]]))
self.assertEqual(model.ky, 3)
model.ky = 100
self.assertEqual(model.ky, 100)
self.assertEqual(model.intrinsic_matrix[1, 1], 100)
def test_px(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 20], [0, 3, 0]]))
self.assertEqual(model.px, 20)
model.px = 100
self.assertEqual(model.px, 100)
self.assertEqual(model.intrinsic_matrix[0, 2], 100)
def test_py(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 0, 10]]))
self.assertEqual(model.py, 10)
model.py = 100
self.assertEqual(model.py, 100)
self.assertEqual(model.intrinsic_matrix[1, 2], 100)
def test_a1(self):
model = self.Class(temperature_coefficients=np.array([10, 0, 0]))
self.assertEqual(model.a1, 10)
model.a1 = 100
self.assertEqual(model.a1, 100)
self.assertEqual(model.temperature_coefficients[0], 100)
def test_a2(self):
model = self.Class(temperature_coefficients=np.array([0, 10, 0]))
self.assertEqual(model.a2, 10)
model.a2 = 100
self.assertEqual(model.a2, 100)
self.assertEqual(model.temperature_coefficients[1], 100)
def test_a3(self):
model = self.Class(temperature_coefficients=np.array([0, 0, 10]))
self.assertEqual(model.a3, 10)
model.a3 = 100
self.assertEqual(model.a3, 100)
self.assertEqual(model.temperature_coefficients[2], 100)
def test_intrinsic_matrix_inv(self):
model = self.Class(kx=5, ky=10, px=100, py=-5)
np.testing.assert_array_almost_equal(model.intrinsic_matrix @ np.vstack([model.intrinsic_matrix_inv,
[0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
np.testing.assert_array_almost_equal(model.intrinsic_matrix_inv @ np.vstack([model.intrinsic_matrix,
[0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
def test_get_temperature_scale(self):
model = self.Class(temperature_coefficients=[1, 2, 3.])
self.assertEqual(model.get_temperature_scale(1), 7)
np.testing.assert_array_equal(model.get_temperature_scale([1, 2]), [7, 35])
np.testing.assert_array_equal(model.get_temperature_scale([-1, 2.]), [-1, 35])
def test_apply_distortion(self):
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
model = self.Class()
for inp in inputs:
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, inp)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5, a1=1, a2=2, a3=3)
with self.subTest(misalignment=None):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=1):
for point in points:
gnom, _, pix = model.get_projections(point, temperature=1)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(1)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=-10.5):
for point in points:
gnom, _, pix = model.get_projections(point, temperature=-10.5)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(-10.5)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[0] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[1] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
point_new = rot_mat @ point
gnom, _, pix = model.get_projections(point, image=0)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
gnom, _, pix = model.get_projections(point, image=1)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5, a1=-1e-3, a2=1e-6, a3=-7e-8)
with self.subTest(misalignment=None):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=1):
for point in points:
pix = model.project_onto_image(point, temperature=1)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(1)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=-10.5):
for point in points:
pix = model.project_onto_image(point, temperature=-10.5)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(-10.5)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[0] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[1] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
point_new = rot_mat @ point
pix = model.project_onto_image(point, image=0)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
|
np.testing.assert_array_almost_equal(pix, pix_true)
|
numpy.testing.assert_array_almost_equal
|
###############################################################################
# apogee.spec.stack: stack APOGEE spectra in various ways
###############################################################################
import numpy
_BIGERR= 10.**7.
def median(spec,mask=None):
"""
NAME:
median
PURPOSE:
median stack a set of spectra
INPUT:
spec - array of spectra (nspec,nwave)
mask= (None) if set, use this mask (1/True for inclusion)
OUTPUT:
median spectrum
HISTORY:
2015-01-26 - Written - Bovy (IAS@KITP)
"""
if mask is None:
mask= True-
|
numpy.isnan(spec)
|
numpy.isnan
|
# Copyright (c) 2021 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''Expression parser version 2 and namespace.
The syntax of an expression is as follows:
* **Integers** or **decimal numbers** are denoted in the usual way.
Examples: ``1``, ``1.2``, ``.2``.
* **Variables** are denoted with a string of characters. The first character
must not be a digit. Unlike Python variables, underscores are not allowed,
as they have a special meaning. If the variable is an array with one or
more axes, all those axes should be labeled with a latin character, the
index, and appended to the variable with an underscore. For example an
array ``a`` with two axes can be denoted with ``a_ij``. Optionally, a
single numeral may be used to select an item at the concerning axis.
Example: in ``a_i0`` the first axis of ``a`` is labeled ``i`` and the first
element of the second axis is selected. If the same index occurs twice,
the trace is taken along the concerning axes. Example: the trace of the
first and third axes of ``b`` is denoted by ``b_iji``. It is invalid to
specify an index more than twice.
* A term, the **product** of two or more arrays or scalars, is denoted by
space-separated variables, constants or compound expressions. Example:
``a b c`` denotes the product of the scalars ``a``, ``b`` and ``c``. A
term may start with a number, but a number is not allowed in other parts
of the term. Example: ``2 a`` denotes two times ``a``; ``2 2 a`` and ``2
a 2``` are invalid. When two arrays in a term have the same index, this
index is summed. Example: ``a_i b_i`` denotes the inner product of ``a``
and ``b`` and ``A_ij b_j``` a matrix vector product. It is not allowed
to use an index more than twice in a term.
* The operator ``/`` denotes a **fraction**. Example: in ``a b / c d`` ``a
b`` is the numerator and ``c d`` the denominator. Both the numerator and
the denominator may start with a number. Example: ``2 a / 3 b``. The
denominator must be a scalar. Example: ``2 / a_i b_i`` is valid, but ``2
a_i / b_i`` is not.
.. warning::
This syntax is different from the Python syntax. In Python ``a*b /
c*d`` is mathematically equivalent to ``a*b*d/c``.
* The operators ``+`` and ``-`` denote **add** and **subtract**. Both
operators should be surrounded by whitespace, e.g. ``a + b``. Both
operands should have the same shape. Example: ``a_ij + b_i c_j`` is a
valid, provided that the lengths of the axes with the same indices match,
but ``a_ij + b_i`` is invalid. At the beginning of an expression or a
compound ``-`` may be used to negate the following term. Example: in
``-a b + c`` the term ``a b`` is negated before adding ``c``. It is not
allowed to negate other terms: ``a + -b`` is invalid, so is ``a -b``.
* An expression surrounded by parentheses is a **compound expression** and
can be used as single entity in a term. Example: ``(a_i + b_i) c_i``
denotes the inner product of ``a_i + b_i`` with ``c_i``.
* **Exponentiation** is denoted by a ``^``, where the left and right
operands should be a number, variable or compound expression and the
right operand should be a scalar. Example: ``a^2`` denotes the square of
``a``, ``a^-2`` denotes ``a`` to the power ``-2`` and ``a^(1 / 2)`` the
square root of ``a``. Note that the power has precedence over a unary
minus: ``-2^2`` is interpreted as ``-(2^2)``.
* An expression surrounded by square brackets or curly braces denotes the
**jump** or **mean**, respectively, of the enclosed expression. Example:
``[a_i]`` denotes the jump of ``a_i`` and ``{a_i + b_i}`` denotes the
mean of ``a_i + b_i``.
* A **function call** is denoted by a name — following the same rules as
for a variable name — optionally followed by ``_`` and indices for **generated axes**, directly
followed by the left parenthesis ``(``, without a space. A function takes
a single argument with any shape and returns an array with the same shape
plus an axis per index listed after the underscore. The function is applied
pointwise to the argument. If an index for a generated axis is also present
in the argument, the trace is taken along the concerning axes after the
function call.
.. _`Einstein Summation Convection`: https://en.wikipedia.org/wiki/Einstein_notation
'''
import typing
if typing.TYPE_CHECKING: # pragma: nocover
from typing_extensions import Protocol
else:
class _Protocol(type):
def __getitem__(cls, item):
return cls
class Protocol(metaclass=_Protocol): pass
from typing import Callable, FrozenSet, Generic, Iterable, Iterator, List, Mapping, Optional, Sequence, Set, Tuple, TypeVar, Union
import functools, numpy
from . import function
T = TypeVar('T')
class _Substring:
def __init__(self, base: str, start: Optional[int] = None, stop: Optional[int] = None) -> None:
self.base = base
self.start = 0 if start is None else start
self.stop = len(base) if stop is None else stop
assert 0 <= self.start <= self.stop <= len(self.base)
def __len__(self) -> int:
return self.stop - self.start
def __str__(self) -> str:
return self.base[self.start:self.stop]
def __iter__(self) -> Iterator['_Substring']:
for i in range(self.start, self.stop):
yield _Substring(self.base, i, i + 1)
def __getitem__(self, item: Union[int, slice]) -> '_Substring':
# Since this is for internal use, we use asserts instead of proper
# exceptions.
assert isinstance(item, (int, slice))
if isinstance(item, int):
assert 0 <= item < len(self)
return _Substring(self.base, self.start + item, self.start + item + 1)
else:
start, stop, stride = item.indices(len(self))
assert stride == 1
return _Substring(self.base, self.start + start, self.start + stop)
def __contains__(self, item: str) -> bool:
return self._find(_match(item))[0] >= 0
def trim(self) -> '_Substring':
return self.trim_end().trim_start()
def trim_start(self) -> '_Substring':
start = self.start
while start < self.stop and self.base[start] == ' ':
start += 1
return _Substring(self.base, start, self.stop)
def trim_end(self) -> '_Substring':
stop = self.stop
while stop > self.start and self.base[stop - 1] == ' ':
stop -= 1
return _Substring(self.base, self.start, stop)
def starts_with(self, prefix: str) -> bool:
return str(self).startswith(prefix)
def ends_with(self, suffix: str) -> bool:
return str(self).endswith(suffix)
def strip_prefix(self, prefix: str) -> Optional['_Substring']:
return self[len(prefix):] if self.starts_with(prefix) else None
def strip_suffix(self, suffix: str) -> Optional['_Substring']:
return self[:len(self)-len(suffix)] if self.ends_with(suffix) else None
def _find(self, *matchers: Callable[[str], int]) -> Tuple[int, int, int]:
# Returns the index of the first successful matcher, the position of the
# match and the length of the match, or `-1`, the length of the substring
# and `0` if nothing matches.
level = 0
for offset, ch in enumerate(self.base[self.start:self.stop]):
if ch in (')', ']', '}', '>'):
level -= 1
if level == 0:
tail = self.base[self.start+offset:self.stop]
for imatcher, matcher in enumerate(matchers):
length = matcher(tail)
if length:
return imatcher, offset, length
if ch in ('(', '[', '{', '<'):
level += 1
return -1, len(self), 0
def split(self, *matchers: Callable[[str], int]) -> Iterator['_Substring']:
# Split the substring at every non-overlapping match.
n = 1
while n:
_, i, n = self._find(*matchers)
yield self[:i]
self = self[i+n:]
def isplit(self, *matchers: Callable[[str], int], first: int) -> Iterator[Tuple[int, '_Substring']]:
# Split the substring at every non-overlapping match and yield both the
# index of the successful matcher and the (subsequently splitted) substring
# to the *right* of the match. The item to the left of the first match, or
# the entire substring if nothing matches, gets `first` as matcher index.
imatcher = first
n = 1
while n:
imatcher_next, i, n = self._find(*matchers)
yield imatcher, self[:i]
self = self[i+n:]
imatcher = imatcher_next
def partition(self, *matchers: Callable[[str], int]) -> Tuple['_Substring', '_Substring', '_Substring']:
_, i, n = self._find(*matchers)
return self[:i], self[i:i+n], self[i+n:]
def partition_scope(self) -> Tuple['_Substring', '_Substring', '_Substring', '_Substring', '_Substring']:
_, i, n = self._find(lambda tail: tail[0] in ('(', '[', '{', '<'))
_, j, n = self[i:]._find(lambda tail: tail[0] in (')', ']', '}', '>'))
j += i
return self[:i], self[i:i+1], self[i+1:j], self[j:j+1], self[j+1:]
class ExpressionSyntaxError(ValueError):
def __init__(self, message: str, caret: Optional['_Substring'] = None, tilde: Optional['_Substring'] = None) -> None:
expression, = {s.base for s in (caret, tilde) if s is not None}
markers = ' '*len(expression)
for marker, s in ('^', caret), ('~', tilde):
if s is not None:
n = max(1, len(s))
markers = markers[:s.start] + marker * n + markers[s.start+n:]
markers = markers.rstrip()
super().__init__('\n'.join((message, expression, markers)))
class _InvalidDimension:
def __init__(self, __actual_ndim: int) -> None:
self.actual_ndim = __actual_ndim
_Shape = Tuple[int, ...]
class _ArrayOps(Protocol[T]):
def from_int(self, __value: int) -> T: ...
def from_float(self, __value: float) -> T: ...
def get_variable(self, __name: str, __ndim: int) -> Optional[Union[Tuple[T, _Shape], _InvalidDimension]]: ...
def call(self, __name: str, __ngenerates: int, arg: T) -> Optional[Union[Tuple[T, _Shape], _InvalidDimension]]: ...
def get_element(self, __array: T, __axis: int, __index: int) -> T: ...
def transpose(self, __array: T, __axes: Tuple[int, ...]) -> T: ...
def trace(self, __array: T, __axis1: int, __axis2: int) -> T: ...
def scope(self, __array: T) -> T: ...
def mean(self, __array: T) -> T: ...
def jump(self, __array: T) -> T: ...
def add(self, *args: Tuple[bool, T]) -> T: ...
def multiply(self, *args: T) -> T: ...
def divide(self, __numerator: T, __denominator: T) -> T: ...
def power(self, __base: T, __exponent: T) -> T: ...
_ORDINALS = 'zeroth', 'first', 'second', 'third', 'fourth', 'fifth'
def _nth(n: int) -> str:
return _ORDINALS[n] if 0 <= n < len(_ORDINALS) else '{}th'.format(n)
def _sp(n: int, s: str, p: str) -> str:
return '{} {}'.format(n, s if n == 1 else p)
def _match(s: str) -> Callable[[str], int]:
def matcher(tail: str) -> int:
return len(s) if tail.startswith(s) else 0
return matcher
def _match_spaces(tail: str) -> int:
return len(tail) - len(tail.lstrip(' '))
class _Parser(Generic[T]):
def __init__(self, __array_ops: _ArrayOps[T]) -> None:
self.array = __array_ops
def parse_expression(self, s: _Substring) -> Tuple[T, _Shape, str, FrozenSet[str]]:
s_tail = s
# Parse optional leading minus. The leading minus applies to the entire
# term, not to the first number, if any, e.g. `-2^2` is interpreted as
# `-(2^2)`. See also
# https://en.wikipedia.org/wiki/Order_of_operations#Unary_minus_sign
negate = False
s_try_strip = s_tail.trim_start().strip_prefix('-')
if s_try_strip:
s_tail = s_try_strip
negate = True
# Parse terms separated by ` + ` or ` - `. If the expression is empty,
# `split` yields once and `self.parse_fraction` will raise an exception.
unaligned = tuple((imatcher == 1, s_term, self.parse_fraction(s_term)) for imatcher, s_term in s_tail.isplit(_match(' + '), _match(' - '), first=1 if negate else 0))
# Check that all terms have the same indices and transpose all but the
# first array such that all terms have the same order of indices.
negate, s_first, (term, shape, indices, summed_indices) = unaligned[0]
if not negate and len(unaligned) == 1:
# There is only one term without unary minus. Return the term as is.
return term, shape, indices, summed_indices
aligned = [(negate, term)]
for iterm, (negate, s_term, (term, term_shape, term_indices, term_summed_indices)) in enumerate(unaligned[1:], 2):
if term_indices != indices:
# The indices of the current term don't match the indices of the first
# term. Check if there are no missing indices and transpose.
for index in sorted(set(indices) - set(term_indices)):
raise ExpressionSyntaxError('Index {} of the first term [^] is missing in the {} term [~].'.format(index, _nth(iterm)), caret=s_first.trim(), tilde=s_term.trim())
for index in sorted(set(term_indices) - set(indices)):
raise ExpressionSyntaxError('Index {} of the {} term [~] is missing in the first term [^].'.format(index, _nth(iterm)), caret=s_first.trim(), tilde=s_term.trim())
axes = tuple(map(term_indices.index, indices))
term = self.array.transpose(term, axes)
term_shape = tuple(map(term_shape.__getitem__, axes))
# Verify the shape of the current (transposed) term with the first
# term.
for n, m, index in zip(shape, term_shape, indices):
if n != m:
raise ExpressionSyntaxError('Index {} has length {} in the first term [^] but length {} in the {} term [~].'.format(index, n, m, _nth(iterm)), caret=s_first.trim(), tilde=s_term.trim())
aligned.append((negate, term))
summed_indices |= term_summed_indices
result = self.array.add(*aligned)
return result, shape, indices, summed_indices
def parse_fraction(self, s: _Substring) -> Tuple[T, _Shape, str, FrozenSet[str]]:
s_parts = tuple(s.split(_match(' / ')))
if len(s_parts) > 2:
raise ExpressionSyntaxError('Repeated fractions are not allowed. Use parentheses if necessary.', s.trim())
# Parse the numerator.
numerator, shape, indices, numerator_summed_indices = self.parse_term(s_parts[0])
if len(s_parts) == 1:
# There is no denominator. Return the numerator as is.
return numerator, shape, indices, numerator_summed_indices
# Parse the denominator.
denominator, denominator_shape, denominator_indices, denominator_summed_indices = self.parse_term(s_parts[1])
# Verify and merge indices. The denominator must have dimension zero.
# Summed indices of the numerator and denominator are treated as if the
# numerator and denominator are multiplied.
if denominator_indices:
raise ExpressionSyntaxError('The denominator must have dimension zero.', s_parts[1].trim())
summed_indices = self._merge_summed_indices_same_term(s.trim(), numerator_summed_indices, denominator_summed_indices)
self._verify_indices_summed(s.trim(), indices, summed_indices)
return self.array.divide(numerator, denominator), shape, indices, summed_indices
def parse_term(self, s: _Substring) -> Tuple[T, _Shape, str, FrozenSet[str]]:
s_trimmed = s.trim()
if not s_trimmed:
# If the string is empty, let `parse_power` raise an exception. We
# don't trim the string, because we want to highlight the entire part of
# the expression that we are currently parsing.
return self.parse_power(s, allow_number=True)
# Split the substring at spaces and parse the items using `parse_power`.
# The first items is allowed to be a number, the remainder is not.
parts = tuple(self.parse_power(s_part, allow_number=i==0) for i, s_part in enumerate(s_trimmed.split(_match_spaces)))
if len(parts) == 1:
# There is only one item in the term. Return this item as is.
return parts[0]
items, shapes, indices, summed_indices = zip(*parts)
shape = tuple(n for shape in shapes for n in shape)
# Sum duplicate indices, e.g. index `i` in `a_ij b_ik`.
return self._trace(s_trimmed, self.array.multiply(*items), shape, ''.join(indices), *summed_indices)
def parse_power(self, s: _Substring, allow_number: bool) -> Tuple[T, _Shape, str, FrozenSet[str]]:
s_parts = tuple(s.trim().split(_match('^')))
if len(s_parts) > 2:
raise ExpressionSyntaxError('Repeated powers are not allowed. Use parentheses if necessary.', s.trim())
if len(s_parts) == 2:
if s_parts[0].ends_with(' '):
raise ExpressionSyntaxError('Unexpected whitespace before `^`.', s_parts[0][-1:])
if s_parts[1].starts_with(' '):
raise ExpressionSyntaxError('Unexpected whitespace after `^`.', s_parts[1][:1])
# Parse the base.
base, shape, indices, base_summed_indices = self.parse_item(s_parts[0], allow_number=allow_number)
if len(s_parts) == 1:
# There's no exponent. Return the base as is.
return base, shape, indices, base_summed_indices
# Parse the exponent. This should either be a scoped expression, or a signed int.
s_head, s_open, s_scope, s_close, s_tail = s_parts[1].partition_scope()
if not s_head and not s_tail and str(s_open) == '(' and str(s_close) == ')':
exponent, exponent_shape, exponent_indices, exponent_summed_indices = self.parse_expression(s_scope)
elif s_parts[1] and ('0' <= str(s_parts[1][0]) <= '9' or str(s_parts[1][0]) == '-'):
exponent, exponent_shape, exponent_indices, exponent_summed_indices = self.parse_signed_int(s_parts[1])
else:
raise ExpressionSyntaxError('Expected an int or scoped expression.', s_parts[1])
# Verify and merge indices. The exponent must have dimension zero. Summed
# indices of the base and exponent are treated as if base and exponent are
# multiplied.
if exponent_indices:
raise ExpressionSyntaxError('The exponent must have dimension zero.', s_parts[1])
summed_indices = self._merge_summed_indices_same_term(s.trim(), base_summed_indices, exponent_summed_indices)
self._verify_indices_summed(s.trim(), indices, summed_indices)
return self.array.power(base, exponent), shape, indices, summed_indices
def parse_item(self, s: _Substring, allow_number: bool) -> Tuple[T, _Shape, str, FrozenSet[str]]:
s_trimmed = s.trim()
if allow_number:
msg = 'Expected a number, variable, scope, mean, jump or function call.'
else:
msg = 'Expected a variable, scope, mean, jump or function call.'
if any(op in s_trimmed for op in ('+', '-', '/')):
msg += ' Hint: the operators `+`, `-` and `/` must be surrounded by spaces.'
error = ExpressionSyntaxError(msg, s_trimmed or s)
if not s_trimmed:
raise error
# If the expression starts with a digit or a dot, we assume this is a
# number. We try to parse the expression as an int or a float, in that
# order. Otherwise we raise `error`.
if '0' <= str(s_trimmed[0]) <= '9' or str(s_trimmed[0]) == '.':
if not allow_number:
raise ExpressionSyntaxError('Numbers are only allowed at the start of a term.', s_trimmed)
for parse in self.parse_unsigned_int, self.parse_unsigned_float:
try:
return parse(s_trimmed)
except ExpressionSyntaxError:
pass
raise error
# If the expression contains a scope, partition it and verify that opening
# and closing parentheses match. If there is no scope, `head` will be the
# entire expression and `scope` will be empty.
s_head, s_open, s_scope, s_close, s_tail = s_trimmed.partition_scope()
parentheses = {'(': ')', '[': ']', '{': '}', '<': '>'}
if s_open:
if not s_close:
raise ExpressionSyntaxError("Unclosed `{}`.".format(s_open), caret=s_open, tilde=s_close)
if parentheses[str(s_open)] != str(s_close):
raise ExpressionSyntaxError("Parenthesis `{}` closed by `{}`.".format(s_open, s_close), caret=s_open, tilde=s_close)
# Under no circumstances we allow anything after a scope.
if s_tail:
raise ExpressionSyntaxError('Unexpected symbols after scope.', s_tail)
# If there are symbols (before an optional scope), assume this is a variable
# (no scope) or a function (with scope).
if s_head:
s_name, s_underscore, s_generated_indices = s_head.partition(_match('_'))
if not s_open:
# There is no scope. Parse as a variable.
indices = ''
summed_indices = frozenset()
result = self.array.get_variable(str(s_name), len(s_generated_indices))
if result is None:
raise ExpressionSyntaxError('No such variable: `{}`.'.format(s_name), s_name)
elif isinstance(result, _InvalidDimension):
raise ExpressionSyntaxError('Expected {} for variable `{}` but got {}.'.format(_sp(result.actual_ndim, 'index', 'indices'), s_name, len(s_generated_indices)), s_trimmed)
array, shape = result
assert len(shape) == len(s_generated_indices), 'array backend returned an array with incorrect dimension'
elif str(s_open) == '(':
# Parse the argument and call the function.
arg, shape, indices, summed_indices = self.parse_expression(s_scope)
result = self.array.call(str(s_name), len(s_generated_indices), arg)
if result is None:
raise ExpressionSyntaxError('No such function: `{}`.'.format(s_name), s_name)
elif isinstance(result, _InvalidDimension):
raise ExpressionSyntaxError('Expected {} for axes generated by function `{}` but got {}.'.format(_sp(result.actual_ndim, 'index', 'indices'), s_name, len(s_generated_indices)), s_trimmed)
array, generated_shape = result
assert len(generated_shape) == len(s_generated_indices), 'array backend returned an array with incorrect dimension'
shape = (*shape, *generated_shape)
else:
raise error
# Process generated indices. If an index is numeric, get the element at
# the index, otherwise add the index to result indices.
for s_index in s_generated_indices:
index = str(s_index)
if '0' <= index <= '9':
index = int(index)
axis = len(indices)
if index >= shape[axis]:
raise ExpressionSyntaxError('Index of axis with length {} out of range.'.format(shape[axis]), s_index)
array = self.array.get_element(array, axis, index)
shape = shape[:axis] + shape[axis+1:]
elif 'a' <= index <= 'z':
indices += str(s_index)
else:
raise ExpressionSyntaxError('Symbol `{}` is not allowed as index.'.format(s_index), s_index)
# Verify indices and sum indices that occur twice.
return self._trace(s_trimmed, array, shape, indices, summed_indices)
elif str(s_open) in ('(', '[', '{'):
array, shape, indices, summed_indices = self.parse_expression(s_scope)
array = {'(': self.array.scope, '{': self.array.mean, '[': self.array.jump}[str(s_open)](array)
return array, shape, indices, summed_indices
else:
raise error
def parse_signed_int(self, s: _Substring) -> Tuple[T, _Shape, str, FrozenSet[str]]:
try:
value = int(str(s.trim()))
except ValueError:
raise ExpressionSyntaxError('Expected an int.', s.trim() or s) from None
return self.array.from_int(value), (), '', frozenset(())
def parse_unsigned_int(self, s: _Substring) -> Tuple[T, _Shape, str, FrozenSet[str]]:
try:
value = int(str(s.trim()))
except ValueError:
raise ExpressionSyntaxError('Expected an int.', s.trim() or s) from None
if value < 0:
raise ExpressionSyntaxError('Expected an int.', s.trim() or s)
return self.array.from_int(value), (), '', frozenset(())
def parse_unsigned_float(self, s: _Substring) -> Tuple[T, _Shape, str, FrozenSet[str]]:
try:
value = float(str(s.trim()))
except ValueError:
raise ExpressionSyntaxError('Expected a float.', s.trim() or s) from None
if value < 0:
raise ExpressionSyntaxError('Expected a float.', s.trim() or s)
return self.array.from_float(value), (), '', frozenset(())
def _verify_indices_summed(self, s: _Substring, indices: str, summed: FrozenSet[str]) -> None:
# Check that none of `indices` occur in `summed`. Note that all `indices`
# are assumed to be unique. If this is not the case, duplicates will
# silently be ignored.
for index in indices:
if index in summed:
raise ExpressionSyntaxError('Index {} occurs more than twice.'.format(index), s)
def _merge_summed_indices_same_term(self, s: _Substring, *parts: FrozenSet[str]) -> FrozenSet[str]:
# Merge `items` into a single set of indices and check that we don't have
# duplicates.
merged = set() # type: Set[str]
for part in parts:
for index in sorted(merged & part):
raise ExpressionSyntaxError('Index {} occurs more than twice.'.format(index), s)
merged |= part
return frozenset(merged)
def _trace(self, s: _Substring, array: T, shape: _Shape, indices: str, *summed_indices_parts: FrozenSet[str]) -> Tuple[T, _Shape, str, FrozenSet[str]]:
# Sum duplicate indices.
summed_indices = set(self._merge_summed_indices_same_term(s, *summed_indices_parts))
j = 0
while j < len(indices):
index = indices[j]
i = indices.index(index)
if index in summed_indices:
raise ExpressionSyntaxError('Index {} occurs more than twice.'.format(index), s)
elif i < j:
if shape[i] != shape[j]:
raise ExpressionSyntaxError('Index {} is assigned to axes with different lengths: {} and {}.'.format(index, shape[i], shape[j]), s)
array = self.array.trace(array, i, j)
shape = shape[:i] + shape[i+1:j] + shape[j+1:]
indices = indices[:i] + indices[i+1:j] + indices[j+1:]
summed_indices.add(index)
j -= 1
else:
j += 1
return array, shape, indices, frozenset(summed_indices)
class Namespace:
'''Namespace for :class:`~nutils.function.Array` objects supporting assignments with tensor expressions.
The :class:`Namespace` object is used to store :class:`~nutils.function.Array` objects.
>>> from nutils import function
>>> ns = Namespace()
>>> ns.A = function.zeros([2, 3])
>>> ns.x = function.zeros([3])
>>> ns.c = 2
In addition to the assignment of :class:`~nutils.function.Array` objects, it is also possible
to specify an array using a tensor expression string — see
:mod:`nutils.expression_v2` for the syntax. All attributes defined in this
namespace are available as variables in the expression. If the array defined
by the expression has one or more dimensions the indices of the axes should
be appended to the attribute name. Example:
>>> ns.cAx_i = 'c A_ij x_j'
It is also possible to simply evaluate an expression without storing its
value in the namespace using ``expression @ ns``:
>>> '2 c' @ ns
Array<>
>>> 'c A_ij x_j' @ ns
Array<2>
>>> 'A_ij' @ ns # indices are ordered alphabetically
Array<2,3>
Note that evaluating an expression with an incompatible length raises an
exception:
>>> 'A_ij + A_ji' @ ns
Traceback (most recent call last):
...
nutils.expression_v2.ExpressionSyntaxError: Index i has length 2 in the first term [^] but length 3 in the second term [~].
A_ij + A_ji
^^^^ ~~~~
When evaluating an expression through this namespace the following functions
are available: ``opposite``, ``sin``, ``cos``, ``tan``, ``sinh``, ``cosh``,
``tanh``, ``arcsin``, ``arccos``, ``arctanh``, ``exp``, ``abs``, ``ln``,
``log``, ``log2``, ``log10``, ``sqrt``, ``sign``, ``conj``, ``real`` and
``imag``.
Additional pointwise functions can be assigned to the namespace similar to variables:
>>> ns.sqr = lambda u: u**2
>>> 'sqr(x_i)' @ ns # same as 'x_i^2'
Array<3>
'''
def __init__(self) -> None:
self.opposite = function.opposite
self.sin = numpy.sin
self.cos = numpy.cos
self.tan = numpy.tan
self.sinh = numpy.sinh
self.cosh = numpy.cosh
self.tanh = numpy.tanh
self.arcsin = numpy.arcsin
self.arccos = numpy.arccos
self.arctan = numpy.arctan
self.arctanh = numpy.arctanh
self.exp = numpy.exp
self.abs = numpy.abs
self.ln = numpy.log
self.log = numpy.log
self.log2 = numpy.log2
self.log10 = numpy.log10
self.sqrt = numpy.sqrt
self.sign = numpy.sign
self.conj = numpy.conj
self.real = numpy.real
self.imag = numpy.imag
def __setattr__(self, attr: str, value: Union[function.Array, str]) -> None:
name, underscore, indices = attr.partition('_')
if isinstance(value, (int, float, complex, numpy.ndarray)):
value = function.Array.cast(value)
if hasattr(value, '__array_ufunc__') and hasattr(value, '__array_function__'):
if underscore:
raise AttributeError('Cannot assign an array to an attribute with an underscore.')
super().__setattr__(name, value)
elif isinstance(value, str):
if not all('a' <= index <= 'z' for index in indices):
raise AttributeError('Only lower case latin characters are allowed as indices.')
if len(set(indices)) != len(indices):
raise AttributeError('All indices must be unique.')
ops = _FunctionArrayOps(self)
array, shape, expression_indices, summed = _Parser(ops).parse_expression(_Substring(value))
assert numpy.shape(array) == shape
if expression_indices != indices:
for index in sorted(set(indices) - set(expression_indices)):
raise AttributeError('Index {} of the namespace attribute is missing in the expression.'.format(index))
for index in sorted(set(expression_indices) - set(indices)):
raise AttributeError('Index {} of the expression is missing in the namespace attribute.'.format(index))
array = ops.align(array, expression_indices, indices)
super().__setattr__(name, array)
elif callable(value):
if underscore:
raise AttributeError('Cannot assign a function to an attribute with an underscore.')
super().__setattr__(name, value)
else:
raise AttributeError('Cannot assign an object of type {} to the namespace.'.format(type(value)))
def __rmatmul__(self, expression):
ops = _FunctionArrayOps(self)
parser = _Parser(ops)
if isinstance(expression, str):
array, shape, indices, summed = parser.parse_expression(_Substring(expression))
assert numpy.shape(array) == shape
array = ops.align(array, indices, ''.join(sorted(indices)))
return array
elif isinstance(expression, tuple):
return tuple(item @ self for item in expression)
elif isinstance(expression, list):
return list(item @ self for item in expression)
else:
return NotImplemented
def define_for(self, __name: str, *, gradient: Optional[str] = None, curl: Optional[str] = None, normal: Optional[str] = None, jacobians: Sequence[str] = ()) -> None:
'''Define gradient, normal or jacobian for the given geometry.
Parameters
----------
name : :class:`str`
Define the gradient, normal or jacobian for the geometry with the given
name in this namespace.
gradient : :class:`str`, optional
Define the gradient function with the given name. The function
generates axes with the same shape as the given geometry.
curl : :class:`str`, optional
Define the curl function with the given name. The function generates
two axes of length 3 where the last axis should be traced with an axis
of the argument, e.g. `curl_ij(u_j)`.
normal : :class:`str`, optional
Define the normal with the given name. The normal has the same shape as
the geometry.
jacobians : sequence of :class:`str`, optional
Define the jacobians for decreasing dimensions, starting at the
dimensions of the geometry. The jacobians are always scalars.
Example
-------
>>> from nutils import function, mesh
>>> ns = Namespace()
>>> topo, ns.x = mesh.rectilinear([2, 2])
>>> ns.define_for('x', gradient='∇', normal='n', jacobians=('dV', 'dS'))
>>> ns.basis = topo.basis('spline', degree=1)
>>> ns.u = function.dotarg('u', ns.basis)
>>> ns.v = function.dotarg('v', ns.basis)
>>> res = topo.integral('-∇_i(v) ∇_i(u) dV' @ ns, degree=2)
>>> res += topo.boundary.integral('∇_i(v) u n_i dS' @ ns, degree=2)
'''
geom = getattr(self, __name)
if gradient:
setattr(self, gradient, lambda arg: function.grad(arg, geom))
if curl:
if numpy.shape(geom) != (3,):
raise ValueError('The curl can only be defined for a geometry with shape (3,) but got {}.'.format(numpy.shape(geom)))
# Definition: `curl_ki(u_...)` := `ε_kji ∇_j(u_...)`. Should be used as
# `curl_ki(u_i)`, which is equivalent to `ε_kji ∇_j(u_i)`.
setattr(self, curl, lambda arg: (function.levicivita(3) * function.grad(arg, geom)[...,numpy.newaxis,:,numpy.newaxis]).sum(-2))
if normal:
setattr(self, normal, function.normal(geom))
for i, jacobian in enumerate(jacobians):
if i > numpy.size(geom):
raise ValueError('Cannot define the jacobian {!r}: dimension is negative.'.format(jacobian))
setattr(self, jacobian, function.jacobian(geom, numpy.size(geom) - i))
def copy_(self, **replacements: Mapping[str, function.Array]) -> 'Namespace':
'''Return a copy of this namespace.
Parameters
----------
**replacements : :class:`nutils.function.Array`
Argument replacements to apply to the copy of this namespace.
Returns
-------
:class:`Namespace`
A copy of this namespace.
'''
ns = Namespace()
for attr, value in vars(self).items():
if replacements and isinstance(value, function.Array):
value = function.replace_arguments(value, replacements)
object.__setattr__(ns, attr, value)
return ns
class _FunctionArrayOps:
def __init__(self, namespace: Namespace) -> None:
self.namespace = namespace
def align(self, array: function.Array, in_indices: str, out_indices: str) -> function.Array:
assert set(in_indices) == set(out_indices) and len(in_indices) == len(out_indices) == len(set(in_indices))
return self.transpose(array, tuple(map(in_indices.index, out_indices)))
def from_int(self, value: int) -> function.Array:
return function.Array.cast(value)
def from_float(self, value: float) -> function.Array:
return function.Array.cast(value)
def get_variable(self, name: str, ndim: int) -> Optional[Union[Tuple[function.Array, _Shape],_InvalidDimension]]:
try:
array = getattr(self.namespace, name)
except AttributeError:
return None
if callable(array):
return None
elif numpy.ndim(array) == ndim:
return array, numpy.shape(array)
else:
return _InvalidDimension(numpy.ndim(array))
def call(self, name: str, ngenerates: int, arg: function.Array) -> Optional[Union[Tuple[function.Array, _Shape],_InvalidDimension]]:
try:
func = getattr(self.namespace, name)
except AttributeError:
return None
array = func(arg)
assert numpy.shape(array)[:numpy.ndim(arg)] == numpy.shape(arg)
if numpy.ndim(array) == numpy.ndim(arg) + ngenerates:
return array, numpy.shape(array)[numpy.ndim(arg):]
else:
return _InvalidDimension(numpy.ndim(array) - numpy.ndim(arg))
def get_element(self, array: function.Array, axis: int, index: int) -> function.Array:
assert 0 <= axis < numpy.ndim(array) and 0 <= index < numpy.shape(array)[axis]
return numpy.take(array, index, axis)
def transpose(self, array: function.Array, axes: Tuple[int, ...]) -> function.Array:
assert numpy.ndim(array) == len(axes)
return numpy.transpose(array, axes)
def trace(self, array: function.Array, axis1: int, axis2: int) -> function.Array:
return numpy.trace(array, axis1, axis2)
def scope(self, array: function.Array) -> function.Array:
return array
def mean(self, array: function.Array) -> function.Array:
return function.mean(array)
def jump(self, array: function.Array) -> function.Array:
return function.jump(array)
def add(self, *args: Tuple[bool, function.Array]) -> function.Array:
assert all(numpy.shape(arg) == numpy.shape(args[0][1]) for neg, arg in args[1:])
negated = (-arg if neg else arg for neg, arg in args)
return functools.reduce(numpy.add, negated)
def append_axes(self, array, shape):
shuffle = numpy.concatenate([len(shape) + numpy.arange(numpy.ndim(array)), numpy.arange(len(shape))])
return numpy.transpose(numpy.broadcast_to(array, shape + numpy.shape(array)), shuffle)
def multiply(self, *args: function.Array) -> function.Array:
result = args[0]
for arg in args[1:]:
result = numpy.multiply(self.append_axes(result, numpy.shape(arg)), arg)
return result
def divide(self, numerator: function.Array, denominator: function.Array) -> function.Array:
assert
|
numpy.ndim(denominator)
|
numpy.ndim
|
#!/usr/bin/python
########################################################################################################################
#
# Copyright (c) 2014, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
"""ADC library
"""
import laygo
import numpy as np
import os
#import logging;logging.basicConfig(level=logging.DEBUG)
def generate_boundary(laygen, objectname_pfix, placement_grid,
devname_bottom, devname_top, devname_left, devname_right,
shape_bottom=None, shape_top=None, shape_left=None, shape_right=None,
transform_bottom=None, transform_top=None, transform_left=None, transform_right=None,
origin=np.array([0, 0])):
#generate a boundary structure to resolve boundary design rules
pg = placement_grid
#parameters
if shape_bottom == None:
shape_bottom = [np.array([1, 1]) for d in devname_bottom]
if shape_top == None:
shape_top = [np.array([1, 1]) for d in devname_top]
if shape_left == None:
shape_left = [np.array([1, 1]) for d in devname_left]
if shape_right == None:
shape_right = [np.array([1, 1]) for d in devname_right]
if transform_bottom == None:
transform_bottom = ['R0' for d in devname_bottom]
if transform_top == None:
transform_top = ['R0' for d in devname_top]
if transform_left == None:
transform_left = ['R0' for d in devname_left]
if transform_right == None:
transform_right = ['R0' for d in devname_right]
#bottom
dev_bottom=[]
dev_bottom.append(laygen.place("I" + objectname_pfix + 'BNDBTM0', devname_bottom[0], pg, xy=origin,
shape=shape_bottom[0], transform=transform_bottom[0]))
for i, d in enumerate(devname_bottom[1:]):
dev_bottom.append(laygen.relplace("I" + objectname_pfix + 'BNDBTM'+str(i+1), d, pg, dev_bottom[-1].name,
shape=shape_bottom[i+1], transform=transform_bottom[i+1]))
dev_left=[]
dev_left.append(laygen.relplace("I" + objectname_pfix + 'BNDLFT0', devname_left[0], pg, dev_bottom[0].name, direction='top',
shape=shape_left[0], transform=transform_left[0]))
for i, d in enumerate(devname_left[1:]):
dev_left.append(laygen.relplace("I" + objectname_pfix + 'BNDLFT'+str(i+1), d, pg, dev_left[-1].name, direction='top',
shape=shape_left[i+1], transform=transform_left[i+1]))
dev_right=[]
dev_right.append(laygen.relplace("I" + objectname_pfix + 'BNDRHT0', devname_right[0], pg, dev_bottom[-1].name, direction='top',
shape=shape_right[0], transform=transform_right[0]))
for i, d in enumerate(devname_right[1:]):
dev_right.append(laygen.relplace("I" + objectname_pfix + 'BNDRHT'+str(i+1), d, pg, dev_right[-1].name, direction='top',
shape=shape_right[i+1], transform=transform_right[i+1]))
dev_top=[]
dev_top.append(laygen.relplace("I" + objectname_pfix + 'BNDTOP0', devname_top[0], pg, dev_left[-1].name, direction='top',
shape=shape_top[0], transform=transform_top[0]))
for i, d in enumerate(devname_top[1:]):
dev_top.append(laygen.relplace("I" + objectname_pfix + 'BNDTOP'+str(i+1), d, pg, dev_top[-1].name,
shape=shape_top[i+1], transform=transform_top[i+1]))
dev_right=[]
return [dev_bottom, dev_top, dev_left, dev_right]
def generate_sarlogic_array(laygen, objectname_pfix, templib_logic, placement_grid, routing_grid_m2m3,
routing_grid_m3m4, routing_grid_m4m5, num_bits=8, num_bits_row=4, m_space_4x=0,
m_space_2x=0, m_space_1x=0, origin=np.array([0, 0])):
"""generate cap driver array """
pg = placement_grid
rg_m2m3 = routing_grid_m2m3
rg_m3m4 = routing_grid_m3m4
rg_m4m5 = routing_grid_m4m5
num_row=int(num_bits/num_bits_row)
tap_name='tap'
slogic_name='sarlogic'
space_1x_name = 'space_1x'
space_2x_name = 'space_2x'
space_4x_name = 'space_4x'
# placement
itapl=[]
islogic=[]
itapr=[]
isp4x=[]
isp2x=[]
isp1x=[]
for i in range(num_row):
if i%2==0: tf='R0'
else: tf='MX'
if i==0:
itapl.append(laygen.place(name="I" + objectname_pfix + 'TAPL0', templatename=tap_name,
gridname=pg, xy=origin, template_libname=templib_logic))
else:
itapl.append(laygen.relplace(name = "I" + objectname_pfix + 'TAPL'+str(i), templatename = tap_name,
gridname = pg, refinstname = itapl[-1].name, transform=tf,
direction = 'top', template_libname=templib_logic))
islogic.append(laygen.relplace(name = "I" + objectname_pfix + 'CLG'+str(i), templatename = slogic_name,
gridname = pg, refinstname = itapl[-1].name, shape=
|
np.array([num_bits_row, 1])
|
numpy.array
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-arguments, too-many-branches,
# pylint: disable=too-many-locals, too-many-instance-attributes, too-many-lines
"""
This module implements the linear Kalman filter in both an object
oriented and procedural form. The KalmanFilter class implements
the filter by storing the various matrices in instance variables,
minimizing the amount of bookkeeping you have to do.
All Kalman filters operate with a predict->update cycle. The
predict step, implemented with the method or function predict(),
uses the state transition matrix F to predict the state in the next
time period (epoch). The state is stored as a gaussian (x, P), where
x is the state (column) vector, and P is its covariance. Covariance
matrix Q specifies the process covariance. In Bayesian terms, this
prediction is called the *prior*, which you can think of colloquially
as the estimate prior to incorporating the measurement.
The update step, implemented with the method or function `update()`,
incorporates the measurement z with covariance R, into the state
estimate (x, P). The class stores the system uncertainty in S,
the innovation (residual between prediction and measurement in
measurement space) in y, and the Kalman gain in k. The procedural
form returns these variables to you. In Bayesian terms this computes
the *posterior* - the estimate after the information from the
measurement is incorporated.
Whether you use the OO form or procedural form is up to you. If
matrices such as H, R, and F are changing each epoch, you'll probably
opt to use the procedural form. If they are unchanging, the OO
form is perhaps easier to use since you won't need to keep track
of these matrices. This is especially useful if you are implementing
banks of filters or comparing various KF designs for performance;
a trivial coding bug could lead to using the wrong sets of matrices.
This module also offers an implementation of the RTS smoother, and
other helper functions, such as log likelihood computations.
The Saver class allows you to easily save the state of the
KalmanFilter class after every update
This module expects NumPy arrays for all values that expect
arrays, although in a few cases, particularly method parameters,
it will accept types that convert to NumPy arrays, such as lists
of lists. These exceptions are documented in the method or function.
Examples
--------
The following example constructs a constant velocity kinematic
filter, filters noisy data, and plots the results. It also demonstrates
using the Saver class to save the state of the filter at each epoch.
.. code-block:: Python
import matplotlib.pyplot as plt
import numpy as np
from filterpy.kalman import KalmanFilter
from filterpy.common import Q_discrete_white_noise, Saver
r_std, q_std = 2., 0.003
cv = KalmanFilter(dim_x=2, dim_z=1)
cv.x = np.array([[0., 1.]]) # position, velocity
cv.F = np.array([[1, dt],[ [0, 1]])
cv.R = np.array([[r_std^^2]])
f.H = np.array([[1., 0.]])
f.P = np.diag([.1^^2, .03^^2)
f.Q = Q_discrete_white_noise(2, dt, q_std**2)
saver = Saver(cv)
for z in range(100):
cv.predict()
cv.update([z + randn() * r_std])
saver.save() # save the filter's state
saver.to_array()
plt.plot(saver.x[:, 0])
# plot all of the priors
plt.plot(saver.x_prior[:, 0])
# plot mahalanobis distance
plt.figure()
plt.plot(saver.mahalanobis)
This code implements the same filter using the procedural form
x = np.array([[0., 1.]]) # position, velocity
F = np.array([[1, dt],[ [0, 1]])
R = np.array([[r_std^^2]])
H = np.array([[1., 0.]])
P = np.diag([.1^^2, .03^^2)
Q = Q_discrete_white_noise(2, dt, q_std**2)
for z in range(100):
x, P = predict(x, P, F=F, Q=Q)
x, P = update(x, P, z=[z + randn() * r_std], R=R, H=H)
xs.append(x[0, 0])
plt.plot(xs)
For more examples see the test subdirectory, or refer to the
book cited below. In it I both teach Kalman filtering from basic
principles, and teach the use of this library in great detail.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
Copyright 2014-2018 <NAME>.
"""
from __future__ import absolute_import, division
from copy import deepcopy
from math import log, exp, sqrt
import sys
import warnings
import numpy as np
from numpy import dot, zeros, eye, isscalar, shape
import numpy.linalg as linalg
from filterpy.stats import logpdf
from filterpy.common import pretty_str, reshape_z
class KalmanFilter(object):
r""" Implements a Kalman filter. You are responsible for setting the
various state variables to reasonable values; the defaults will
not give you a functional filter.
For now the best documentation is my free book Kalman and Bayesian
Filters in Python [2]_. The test files in this directory also give you a
basic idea of use, albeit without much description.
In brief, you will first construct this object, specifying the size of
the state vector with dim_x and the size of the measurement vector that
you will be using with dim_z. These are mostly used to perform size checks
when you assign values to the various matrices. For example, if you
specified dim_z=2 and then try to assign a 3x3 matrix to R (the
measurement noise matrix you will get an assert exception because R
should be 2x2. (If for whatever reason you need to alter the size of
things midstream just use the underscore version of the matrices to
assign directly: your_filter._R = a_3x3_matrix.)
After construction the filter will have default matrices created for you,
but you must specify the values for each. It’s usually easiest to just
overwrite them rather than assign to each element yourself. This will be
clearer in the example below. All are of type numpy.array.
Examples
--------
Here is a filter that tracks position and velocity using a sensor that only
reads position.
First construct the object with the required dimensionality.
.. code::
from filterpy.kalman import KalmanFilter
f = KalmanFilter (dim_x=2, dim_z=1)
Assign the initial value for the state (position and velocity). You can do this
with a two dimensional array like so:
.. code::
f.x = np.array([[2.], # position
[0.]]) # velocity
or just use a one dimensional array, which I prefer doing.
.. code::
f.x = np.array([2., 0.])
Define the state transition matrix:
.. code::
f.F = np.array([[1.,1.],
[0.,1.]])
Define the measurement function:
.. code::
f.H = np.array([[1.,0.]])
Define the covariance matrix. Here I take advantage of the fact that
P already contains np.eye(dim_x), and just multiply by the uncertainty:
.. code::
f.P *= 1000.
I could have written:
.. code::
f.P = np.array([[1000., 0.],
[ 0., 1000.] ])
You decide which is more readable and understandable.
Now assign the measurement noise. Here the dimension is 1x1, so I can
use a scalar
.. code::
f.R = 5
I could have done this instead:
.. code::
f.R = np.array([[5.]])
Note that this must be a 2 dimensional array, as must all the matrices.
Finally, I will assign the process noise. Here I will take advantage of
another FilterPy library function:
.. code::
from filterpy.common import Q_discrete_white_noise
f.Q = Q_discrete_white_noise(dim=2, dt=0.1, var=0.13)
Now just perform the standard predict/update loop:
while some_condition_is_true:
.. code::
z = get_sensor_reading()
f.predict()
f.update(z)
do_something_with_estimate (f.x)
**Procedural Form**
This module also contains stand alone functions to perform Kalman filtering.
Use these if you are not a fan of objects.
**Example**
.. code::
while True:
z, R = read_sensor()
x, P = predict(x, P, F, Q)
x, P = update(x, P, z, R, H)
See my book Kalman and Bayesian Filters in Python [2]_.
You will have to set the following attributes after constructing this
object for the filter to perform properly. Please note that there are
various checks in place to ensure that you have made everything the
'correct' size. However, it is possible to provide incorrectly sized
arrays such that the linear algebra can not perform an operation.
It can also fail silently - you can end up with matrices of a size that
allows the linear algebra to work, but are the wrong shape for the problem
you are trying to solve.
Parameters
----------
dim_x : int
Number of state variables for the Kalman filter. For example, if
you are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
This is used to set the default size of P, Q, and u
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x,y), dim_z would be 2.
dim_u : int (optional)
size of the control input, if it is being used.
Default value of 0 indicates it is not used.
compute_log_likelihood : bool (default = True)
Computes log likelihood by default, but this can be a slow
computation, so if you never use it you can turn this computation
off.
Attributes
----------
x : numpy.array(dim_x, 1)
Current state estimate. Any call to update() or predict() updates
this variable.
P : numpy.array(dim_x, dim_x)
Current state covariance matrix. Any call to update() or predict()
updates this variable.
x_prior : numpy.array(dim_x, 1)
Prior (predicted) state estimate. The *_prior and *_post attributes
are for convienence; they store the prior and posterior of the
current epoch. Read Only.
P_prior : numpy.array(dim_x, dim_x)
Prior (predicted) state covariance matrix. Read Only.
x_post : numpy.array(dim_x, 1)
Posterior (updated) state estimate. Read Only.
P_post : numpy.array(dim_x, dim_x)
Posterior (updated) state covariance matrix. Read Only.
z : numpy.array
Last measurement used in update(). Read only.
R : numpy.array(dim_z, dim_z)
Measurement noise matrix
Q : numpy.array(dim_x, dim_x)
Process noise matrix
F : numpy.array()
State Transition matrix
H : numpy.array(dim_z, dim_x)
Measurement function
y : numpy.array
Residual of the update step. Read only.
K : numpy.array(dim_x, dim_z)
Kalman gain of the update step. Read only.
S : numpy.array
System uncertainty (P projected to measurement space). Read only.
SI : numpy.array
Inverse system uncertainty. Read only.
log_likelihood : float
log-likelihood of the last measurement. Read only.
likelihood : float
likelihood of last measurement. Read only.
Computed from the log-likelihood. The log-likelihood can be very
small, meaning a large negative value such as -28000. Taking the
exp() of that results in 0.0, which can break typical algorithms
which multiply by this value, so by default we always return a
number >= sys.float_info.min.
mahalanobis : float
mahalanobis distance of the innovation. Read only.
inv : function, default numpy.linalg.inv
If you prefer another inverse function, such as the Moore-Penrose
pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv
This is only used to invert self.S. If you know it is diagonal, you
might choose to set it to filterpy.common.inv_diagonal, which is
several times faster than numpy.linalg.inv for diagonal matrices.
alpha : float
Fading memory setting. 1.0 gives the normal Kalman filter, and
values slightly larger than 1.0 (such as 1.02) give a fading
memory effect - previous measurements have less influence on the
filter's estimates. This formulation of the Fading memory filter
(there are many) is due to <NAME> [1]_.
References
----------
.. [1] <NAME>. "Optimal State Estimation." <NAME>.
p. 208-212. (2006)
.. [2] <NAME>. "Kalman and Bayesian Filters in Python"
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
"""
def __init__(self, dim_x, dim_z, dim_u=0):
if dim_x < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_z < 1:
raise ValueError('dim_z must be 1 or greater')
if dim_u < 0:
raise ValueError('dim_u must be 0 or greater')
self.dim_x = dim_x
self.dim_z = dim_z
self.dim_u = dim_u
self.x = zeros((dim_x, 1)) # state
self.P = eye(dim_x) # uncertainty covariance
self.Q = eye(dim_x) # process uncertainty
self.B = None # control transition matrix
self.F = eye(dim_x) # state transition matrix
self.H = zeros((dim_z, dim_x)) # Measurement function
self.R = eye(dim_z) # state uncertainty
self._alpha_sq = 1. # fading memory control
self.M = np.zeros((dim_z, dim_z)) # process-measurement cross correlation
self.z = np.array([[None]*self.dim_z]).T
# gain and residual are computed during the innovation step. We
# save them so that in case you want to inspect them for various
# purposes
self.K = np.zeros((dim_x, dim_z)) # kalman gain
self.y = zeros((dim_z, 1))
self.S = np.zeros((dim_z, dim_z)) # system uncertainty
self.SI = np.zeros((dim_z, dim_z)) # inverse system uncertainty
# identity matrix. Do not alter this.
self._I = np.eye(dim_x)
# these will always be a copy of x,P after predict() is called
self.x_prior = self.x.copy()
self.P_prior = self.P.copy()
# these will always be a copy of x,P after update() is called
self.x_post = self.x.copy()
self.P_post = self.P.copy()
# Only computed only if requested via property
self._log_likelihood = log(sys.float_info.min)
self._likelihood = sys.float_info.min
self._mahalanobis = None
self.inv = np.linalg.inv
def predict(self, u=None, B=None, F=None, Q=None):
"""
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array
Optional control vector. If not `None`, it is multiplied by B
to create the control input into the system.
B : np.array(dim_x, dim_z), or None
Optional control transition matrix; a value of None
will cause the filter to use `self.B`.
F : np.array(dim_x, dim_x), or None
Optional state transition matrix; a value of None
will cause the filter to use `self.F`.
Q : np.array(dim_x, dim_x), scalar, or None
Optional process noise matrix; a value of None will cause the
filter to use `self.Q`.
"""
if B is None:
B = self.B
if F is None:
F = self.F
if Q is None:
Q = self.Q
elif isscalar(Q):
Q = eye(self.dim_x) * Q
# x = Fx + Bu
if B is not None and u is not None:
self.x =
|
dot(F, self.x)
|
numpy.dot
|
# CMPSPEC contains METHODS TO COMPUTE THE POWER SPECTRUM
# GIVEN AS INPUT 1D, 2D, 3D FIELD
"""
Author: <NAME>
date: 14/05/20
"""
# ___ __ _ _ ____ _ _ ____ __ __ _ ___ ____ ____ ____ ___ ____ ____ __
# / __)/ \ ( \/ )( _ \/ )( \(_ _)( )( ( \ / __) / ___)( _ \( __)/ __)(_ _)( _ \ / _\
# ( (__( O )/ \/ \ ) __/) \/ ( )( )( / /( (_ \ \___ \ ) __/ ) _)( (__ )( ) // \
# \___)\__/ \_)(_/(__) \____/ (__) (__)\_)__) \___/ (____/(__) (____)\___) (__) (__\_)\_/\_/
import numpy as np
from numpy.fft import fftn
# ____ _ _ __ __ ____ _ _
# / ___)( \/ ) / \ / \(_ _)/ )( \
# \___ \/ \/ \( O )( O ) )( ) __ (
# (____/\_)(_/ \__/ \__/ (__) \_)(_/
# Function for smoothing the spectrum
# only for visualisation
def movingaverage(interval, window_size):
window = np.ones(int(window_size)) / float(window_size)
return np.convolve(interval, window, 'same')
# __ ____ ____ __ ____ __ ____ ____ ____ ____ ___ ____ ____ _ _ _ _
# / \ ___( \ ( __)( )( __)( ) ( \ / ___)( _ \( __)/ __)(_ _)( _ \/ )( \( \/ )
# (_/ /(___)) D ( ) _) )( ) _) / (_/\ ) D ( \___ \ ) __/ ) _)( (__ )( ) /) \/ (/ \/ \
# (__) (____/ (__) (__)(____)\____/(____/ (____/(__) (____)\___) (__) (__\_)\____/\_)(_/
# Method to compute spectrum from 1D Field
def compute1Dspectrum(r,lx, smooth):
"""
Parameters:
----------------------------------------------------------------
r: float-vector
The 1D random field
lx: float
the domain size in the x-direction.
nx: integer
the number of grid points in the x-direction
smooth: boolean
Active/Disactive smooth function for visualisation
-----------------------------------------------------------------
"""
nx = len(r)
nt = nx
n = nx
rh = fftn(r)/nt
# calculate energy in fourier domain
tkeh = (rh * np.conj(rh)).real
k0x = 2.0*np.pi/lx
knorm = k0x
kxmax = nx / 2
wave_numbers = knorm*np.arange(0,n) # array of wavenumbers
tke_spectrum = np.zeros(len(wave_numbers))
for kx in range(-nx//2, nx//2-1):
rk = np.sqrt(kx**2)
k = int(np.round(rk))
tke_spectrum[k] = tke_spectrum[k] + tkeh[kx]
tke_spectrum = tke_spectrum/knorm
knyquist = knorm * nx / 2
# If smooth parameter is TRUE: Smooth the computed spectrum
# ONLY for Visualisation
if smooth:
tkespecsmooth = movingaverage(tke_spectrum, 5) # smooth the spectrum
tkespecsmooth[0:4] = tke_spectrum[0:4] # get the first 4 values from the original data
tke_spectrum = tkespecsmooth
#
return knyquist, wave_numbers, tke_spectrum
# ____ ____ ____ __ ____ __ ____ ____ ____ ____ ___ ____ ____ _ _ _ _
# (___ \ ___( \ ( __)( )( __)( ) ( \ / ___)( _ \( __)/ __)(_ _)( _ \/ )( \( \/ )
# / __/(___)) D ( ) _) )( ) _) / (_/\ ) D ( \___ \ ) __/ ) _)( (__ )( ) /) \/ (/ \/ \
# (____) (____/ (__) (__)(____)\____/(____/ (____/(__) (____)\___) (__) (__\_)\____/\_)(_/
# Method to compute spectrum from 2D Field
def compute2Dspectrum(r,lx, ly, smooth):
"""
Parameters:
----------------------------------------------------------------
r: float-vector
The 2D random field
lx: float
the domain size in the x-direction.
nx: integer
the number of grid points in the x-direction
smooth: boolean
Active/Disactive smooth function for visualisation
-----------------------------------------------------------------
"""
nx = len(r[:,0])
ny = len(r[0,:])
nt = nx*ny
n = nx
rh = fftn(r)/nt
# calculate energy in fourier domain
tkeh = (rh *
|
np.conj(rh)
|
numpy.conj
|
import os
import cv2
import sqlite3
import argparse
import numpy as np
import matplotlib.pyplot as plt
# Parser for path to database file:
def parse_args() -> tuple:
"""
Function to parse user argument
:return: input_path and output_path
"""
ap = argparse.ArgumentParser(description='Converting colmap output files into ORB descriptors.')
ap.add_argument('-p', '--input_path', required=True)
ap.add_argument('-o', '--output_path', type=str, default=None)
args = vars(ap.parse_args())
return args['input_path'], args['output_path']
def get_descriptors(cursor, img_list: list) -> list:
"""
get list of descriptors for each image
:param cursor: sqlite3.connect().cursor
:param img_list: list of images
:return dsk: list of descriptors
"""
dsk = []
for image_id, _ in img_list:
cursor.execute('SELECT data FROM descriptors WHERE image_id=?;', (image_id,))
row = next(cursor)
if row[0] is None:
dsk.append(np.zeros((0, 128), dtype=np.uint8))
else:
dsk.append(np.frombuffer(row[0], dtype=np.uint8).reshape(-1, 128))
return dsk
def get_3d_points(input_path: str) -> list:
"""
get a list of 3d points from the points.txt file
:param input_path: path to points.txt file
:return points: list of points
"""
points = []
path = os.path.join(input_path, 'points3D.txt')
with open(path, 'r') as file:
lines = file.readlines()
for line in lines:
line = line.strip()
if len(line) > 0 and line[0] != "#":
elements = line.split()
xyz = np.array(tuple(map(float, elements[1:4])))
image_ids = np.array(tuple(map(int, elements[8::2])))
point2d_ids = np.array(tuple(map(int, elements[9::2])))
error = float(elements[7])
points.append([xyz, image_ids, point2d_ids, error])
return points
def plot_2d(file_name: str, output_path: str, threshold=500) -> None:
"""
Create a 2d plot of the reconstructed model
:param file_name: path to points.txt file
:param output_path: path to save the plot
:param threshold: threshold for filter outlier points
:return points: list of points
"""
x = []
y = []
z = []
with open(file_name, "r") as f:
for line in f.readlines():
if "#" not in line:
data = line.split(" ")
x_val = float(data[0])
y_val = float(data[2])
z_val = float(data[1])
# filter outliers
if abs(x_val) < threshold and abs(y_val) < threshold and abs(z_val) < threshold:
x.append(x_val)
y.append(y_val)
z.append(z_val)
fig = plt.figure()
plt.scatter(x, y, linewidth=0.1, s=2)
fig.savefig(os.path.join(output_path, 'sparse_plot.png'))
def main():
plot_map = True
# Parse input arguments:
input_path, output_path = parse_args()
if output_path is None:
output_path = input_path
# Connect to the database and create cursor object:
connection = sqlite3.connect(os.path.join(input_path, 'database.db'))
cursor = connection.cursor()
# Get list of images (img_id, img_name):
cursor.execute('SELECT image_id, name FROM images;')
images = list(row for row in cursor)
# Get list of descriptors:
descriptors = get_descriptors(cursor, images)
# End SQL connection:
cursor.close()
connection.close()
# Get list 3D points:
points_3D = get_3d_points(input_path)
# Compute mean ORB descriptors for all 3D points:
feature_desc = []
for point in points_3D:
dsc = []
for d in range(len(point[1])):
img_id = point[1][d] - 1
pt_id = point[2][d]
if pt_id < len(descriptors[img_id]):
dsc.append(descriptors[img_id][pt_id])
if len(dsc) != 0:
mean_dsc =
|
np.mean(dsc, axis=0)
|
numpy.mean
|
# -*- coding: utf-8 -*-
"""
This Python 3.3 module implements the cubic Snyder equal area map projection (CSEA) as described in [Snyd1992]_.
.. [Snyd1992] <NAME>, An equal-area map projection for polyhedral globes, Cartographica 29 (1992), 10--21.
CHANGELOG:
- <NAME> (AR), 2013-01-31: Initial version.
- AR, 2013-02-08: Fixed a bug in the xy-positions of the square faces.
- AR, 2013-07-23: Ported to Python 3.3.
NOTE:
All lengths are measured in meters and all angles are measured in radians
unless indicated otherwise.
By 'ellipsoid' below, i mean an oblate ellipsoid of revolution.
"""
#*****************************************************************************
# Copyright (C) 2012 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU Lesser General Public License (LGPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
# Import third-party modules.
from numpy import pi, rad2deg, deg2rad, floor, sin, cos, arccos, arctan, arctan2, sqrt, array
# Import my modules.
from .utils import auth_lat, auth_rad
g = arctan(sqrt(2)) # Snyder approximates this as 54.73561032 degrees.
tang = sqrt(2)
G = pi/3
theta = pi/4 # = 2*pi divided by twice the number of sides of a square
cottheta = 1
alpha = pi/2 # = 2*(pi/2 - theta)
R_prime = sqrt(pi/6) # = (1/tang)*sqrt(2*(G - theta)/(sin(theta)*cos(theta)))
A = pi/2 - g
face_centers_lonlat = [
(-2*pi/3, A),
(-pi/3, -A),
(0, A),
(pi/3, -A),
(2*pi/3, A),
(pi, -A)
]
B = sqrt(pi/3) # = R_prime*tang
face_centers_xy = [
(-2*B, B/2),
(-B, -B/2),
(0, B/2),
(B, -B/2),
(2*B, B/2),
(3*B, -B/2)
]
EPS = 1e-12 # Fuzz to avoid rounding errors.
def csea_sphere(lam, phi):
r"""
Compute the signature function of the icosohedral Snyder equal area
(ISEA) map projection of the unit sphere.
INPUT:
- `lam, phi` - Geodetic longitude-latitude coordinates in radians.
Assume -pi <= `lam` < pi and -pi/2 <= `phi` <= pi/2.
"""
# (lam, phi) lies in one of these two squares:
face_1 = int(floor((lam + pi)/(pi/3)))
face_2 = (face_1) - 1 % 6
for i in [face_1, face_2]:
lam_0, phi_0 = face_centers_lonlat[i]
z = arccos(sin(phi_0)*sin(phi) +\
cos(phi_0)*cos(phi)*(cos(lam - lam_0)))
if z > g:
# (lam, phi) does not lie in this square
continue
# Might have found the correct square face.
Az = arctan2(cos(phi)*sin(lam - lam_0),
cos(phi_0)*sin(phi) - sin(phi_0)*cos(phi)*cos(lam - lam_0))
j = 0
while Az <= 0 or Az > alpha:
if Az <= 0:
Az += alpha
j += 1
else:
Az -= alpha
j -= 1
q = arctan(tang/(cos(Az) + sin(Az)*cottheta))
if z > q + EPS:
# (lam, phi) does not lie in this square
continue
# Found the correct square face.
H = arccos(sin(Az)*sin(G)*
|
cos(g)
|
numpy.cos
|
from numba import jit
import numpy as np
def calc_normals(state) :
state.tx = state.C*state.X
state.tz = state.C*state.Y
state.nx = -state.C*state.Y
state.nz = state.C*state.X
def calc_TL(state) :
state.W = np.abs(state.q * state.dangle0[np.newaxis,:])
state.P = np.zeros((state.Lz,state.Lr))
loop(state)
state.TL = -20*np.log10(4*np.pi*np.abs(state.P))
state.TL[np.isnan(state.TL)] = 120
state.TL[np.isinf(state.TL)] = 120
#@jit(nopython = True)#, parallel = True)
def loop(state) :
zz = np.linspace(state.zmin,state.zmax,state.Lz)
rr = np.linspace(state.rmin,state.rmax,state.Lr)
R, Z = np.meshgrid(rr,zz)
#j = 7
for j in range(state.nr) :
print("beam %i / %i" %(j, state.nr), end = '\r')
for i in range(1,state.n_max) :
if state.W[i,j] > 0 :
#coords influenced by the ray j between i and i+1
rec = ((R >= state.r[i-1,j]) & (R < state.r[i,j]))
#normal distance between those points and segment i
n_ = np.abs( (R - state.r[i,j])*state.nx[i,j] + (Z - state.z[i,j])*state.nz[i,j])
#along ray coordinate
s_ = (R - state.r[i-1,j])*state.tx[i-1,j] + (Z - state.z[i-1,j])*state.tz[i-1,j]
al = s_ / np.sqrt((state.r[i-1,j] - state.r[i,j])**2 + (state.z[i-1,j] - state.z[i,j])**2)
T_ = state.T[i-1,j] + al * (state.T[i,j] - state.T[i-1,j])
q_ = state.q[i-1,j] + al * (state.q[i,j] - state.q[i-1,j])
W_ = state.W[i-1,j] + al * (state.W[i,j] - state.W[i-1,j])
r_ = state.r[i-1,j] + al * (state.r[i,j] - state.r[i-1,j])
n_val = (n_ <= W_)
A_ = 1/(4*np.pi) * (-1j)**state.m[i,j] * np.sqrt(np.abs(state.C[i,j]*
|
np.cos(state.angle_0[j])
|
numpy.cos
|
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
https://www.sciencedirect.com/science/article/pii/S1524070311000178?via%3Dihub
based on the pseudocode by <NAME>
https://project.dke.maastrichtuniversity.nl/robotlab/wp-content/uploads/Bachelor-thesis-Renzo-Poddighe.pdf
"""
import math
import numpy as np
from transformations import quaternion_inverse, quaternion_multiply, quaternion_from_matrix, euler_from_quaternion
from anim_utils.motion_editing.analytical_inverse_kinematics import calculate_limb_joint_rotation, calculate_limb_root_rotation, to_local_coordinate_system
def sign(x):
return 1 if x >= 0 else -1
def quaternion_to_av(q):
""" according to lee 2000
the purely imaginary quaternion is identical to the angular velocity
the sign of the real part gives the direction
Since the unit quaternion space is folded by the antipodal equivalence,
the angular velocity is twice as fast
"""
return 2 * np.array(q[1:]) * sign(q[0])
def normalize(v):
return v/ np.linalg.norm(v)
def get_quaternion_delta(a, b):
return quaternion_multiply(quaternion_inverse(b), a)
def quaternion_from_axis_angle(axis, angle):
q = [1,0,0,0]
q[1] = axis[0] * math.sin(angle / 2)
q[2] = axis[1] * math.sin(angle / 2)
q[3] = axis[2] * math.sin(angle / 2)
q[0] = math.cos(angle / 2)
return normalize(q)
def get_offset_quat(a, b):
a_len = np.linalg.norm(a)
b_len = np.linalg.norm(b)
if a_len > 0 and b_len > 0:
q = quaternion_from_vector_to_vector(a/a_len,b/b_len)
q /= np.linalg.norm(q)
return q
else:
return [1,0,0,0]
def quaternion_from_vector_to_vector(a, b):
"""src: http://stackoverflow.com/questions/1171849/finding-quaternion-representing-the-rotation-from-one-vector-to-another
http://wiki.ogre3d.org/Quaternion+and+Rotation+Primer"""
v =
|
np.cross(a, b)
|
numpy.cross
|
import gammalib
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import pdb
def format_ax(ax):
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(14)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(14)
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.grid()
return
def cube_flux(model,emin,emax):
# get data
filename = model.spatial().filename().file()
hdulist = fits.open(filename)
cube = hdulist[0].data
energies = hdulist[1].data['Energy']
binsize1 = np.abs(hdulist[0].header['CDELT1'])
binsize2 = np.abs(hdulist[0].header['CDELT2'])
# multiply by solid angle (only works for tangential projection or for reference latitude ~0)
cube *= np.deg2rad(binsize1) * np.deg2rad(binsize2)
fluxes = np.sum(cube, axis=(1, 2))
# correct by spectral model
for s, energy in enumerate(energies):
fluxes[s] *= model.spectral().eval(gammalib.GEnergy(np.double(energy),'MeV'))
# select energy range
# emin, emax are in TeV and the energy vector in MeV
# just select on bins, may be inaccurate close to threshold
fluxes = fluxes[(energies >= 1.e6 * emin) & (energies <= 1.e6 * emax)]
energies = energies[(energies >= 1.e6 * emin) & (energies <= 1.e6 * emax)]
# integrate over energy in piece-wise power law approximation
gamma = - (np.log(fluxes[1:]) - np.log(fluxes[:-1])) / (np.log(energies[1:]) - np.log(energies[:-1]))
# integral flux in individual bins between two nodes
int = fluxes[:-1] * energies[:-1] / (-gamma + 1) * (np.power(energies[1:]/energies[:-1],-gamma+1) - 1)
# sum over bins
int = np.sum(int)
return int
def flux_Crab(model,Emin,Emax):
emin = gammalib.GEnergy(Emin,'TeV')
emax = gammalib.GEnergy(Emax,'TeV')
# deal with special case of cubes
if model.spatial().type() == 'DiffuseMapCube':
# get cube flux
flux = cube_flux(model,Emin,Emax)
else:
flux = model.spectral().flux(emin, emax)
# convert to Crab units
# Set Crab TeV spectral model based on a power law
crab = gammalib.GModelSpectralPlaw(5.7e-16, -2.48, gammalib.GEnergy(0.3, 'TeV'))
# calculate crab flux over the same energy range
crab_flux = crab.flux(emin, emax)
# remormalize crab flux so that it matches the Meyer model > 1 TeV (consistent with gamma-cat)
crab_flux_1TeV = crab.flux(gammalib.GEnergy(1.,'TeV'), gammalib.GEnergy(1000.,'TeV'))
# (Meyer model, flux > 1 TeV in ph cm-2 s-1)
crab_flux *= 2.0744340476909142e-11 / crab_flux_1TeV
flux /= crab_flux
return flux
def dist_from_gammalib(models,emin=1.,emax=1000):
#extracts flux, longitude and latitude distribution from gammalib model container
fluxes = []
lons = []
lats = []
names = []
radii = []
for model in models:
if model.classname() == 'GModelSky':
src_dir = get_model_dir(model)
lons.append(src_dir.l_deg())
lats.append(src_dir.b_deg())
rad = get_model_radius(model)
radii.append(rad)
flux = flux_Crab(model,emin,emax)
fluxes.append(flux)
names.append(model.name())
else:
pass
return lons, lats, radii, fluxes, names
def get_model_dir(model):
"""
extracts sky direction for either analytical model or DiffuseMap
:param model: ~gammalib.GModelSky
:return: src_dir: ~gammalib.GSkyDir
"""
if model.spatial().type() == 'DiffuseMap':
# retrieve direction from map
src_dir = model.spatial().region().centre()
elif model.spatial().type() == 'DiffuseMapCube':
# region does not work, extract manually from the map
# call the energies method to load the cube
model.spatial().energies()
# assume it is center of the map
sl = model.spatial().cube().extract(0)
ctr_pix = gammalib.GSkyPixel((sl.nx() - 1) / 2, (sl.ny() - 1) / 2)
src_dir = sl.pix2dir(ctr_pix)
else:
# retrieve direction from analytical model
src_dir = model.spatial().dir()
return src_dir
def get_model_radius(model):
"""
extracts radius for extended source
:param model: ~gammalib.GModelSky
:return: radius: float
"""
try:
if model.spatial().type() == 'DiffuseMapCube':
# region not implemented in gammalib, extract manually extent of the map
# call the energies method to load the cube
model.spatial().energies()
# half size along x direction
slice = model.spatial().cube().extract(0)
ctr_pix = gammalib.GSkyPixel((slice.nx() - 1) / 2, (slice.ny() - 1) / 2)
ctr_dir = slice.pix2dir(ctr_pix)
bor_pix = gammalib.GSkyPixel(0., (slice.ny() - 1) / 2)
bor_dir = slice.pix2dir(bor_pix)
radius = bor_dir.dist_deg(ctr_dir)
else:
circle = gammalib.GSkyRegionCircle(model.spatial().region())
radius = circle.radius()
except:
print('Cannot extract radius for model {} with spatial model type {}'.format(
model.name(), model.spatial().type()))
return radius
def delete_source_fom(distx,disty,radr, frlog):
fom = np.sqrt((distx / 180) ** 2 + (disty / 10) ** 2 + (radr / 1.) ** 2 + (frlog >= 0) * (frlog / 0.6) ** 2 + (frlog < 0) * (frlog / 0.3) ** 2)
return fom
def pop_source(d,name):
# remove source from dictionary by name
# boolean mask to identify sources to keep in final dictionary
m = (d['name'] != name)
# handle composites for which name is an array
if len(np.shape(m)) > 1:
m = np.product(m, axis=1) # both names must coincide to be the same composite
m = m.astype('bool')
for key in d.keys():
if key == 'name':
pass
else:
d[key] = d[key][m]
d['name'] = d['name'][m]
return d
def find_source_to_delete(d,lon,lat,rad,flux, radmin =0.05):
# calculate distance in lon and lat
distx = np.abs(d['GLON'] - lon)
distx[distx > 180] = 360 - distx[distx > 180]
disty = d['GLAT'] - lat
#dist = np.sqrt(distx**2 + disty**2)
# calculate radius relative difference
# take into the fact that current instruments cannot resolve objects with radii < 0.05 deg
# based on minimum measured size in HGPS
if rad < radmin:
rad = radmin
radr = 0.5 * (np.maximum(d['radius'],radmin) - rad) / (np.maximum(d['radius'],radmin) + rad)
# calculate flux_ratio log
frlog = np.log10(d['flux']/flux)
# figure of merit to decide which source to eliminate
fom = delete_source_fom(distx,disty,radr,frlog)
# eliminate closer source = minimum fom
s = np.where(fom == np.min(fom))
name = d['name'][s][0]
d = pop_source(d,name)
return name, d, distx[s], disty[s], radr[s], frlog[s]
def get_syn_model(filename,fmin,bmax,emin=1.,emax=1000.):
"""
Load synthetic population already given in gammalib format
:param filename: str, name of XML file
:param fmin: float, minimum flux (Crab)
:param emin: float, minimum energy for flux computation (TeV)
:param emax: float, maximum energy for flux computation (TeV)
:return: ~gammalib.GModels, dic, output models and dictionary of their properties
"""
# load input models models
models = gammalib.GModels(filename)
# parameter distribution for the requested energy range
lons, lats, radii, fluxes, names = dist_from_gammalib(models,emin=emin,emax=emax)
# output models
outmodels = gammalib.GModels()
for s, model in enumerate(models):
if fluxes[s] > fmin and np.abs(lats[s]) < bmax:
outmodels.append(model)
else:
pass
# regenerate distributions for standard energy range
lons, lats, radii, fluxes, names = dist_from_gammalib(outmodels,emin=1.,emax=1000.)
# create dictionary with source parameters
d = {'name' : np.array(names),
'GLON' : np.array(lons),
'GLAT' : np.array(lats),
'radius' : np.array(radii),
'flux' : np.array(fluxes)}
return outmodels, d
def plot_del_sources(distx,disty,radr, frlog,namestr,namefull):
fig4 = plt.figure('Deleted {} XY'.format(namefull))
ax4 = plt.subplot()
ax4.set_xlabel("Longitude distance (deg)", fontsize=14)
ax4.set_ylabel('Latitude distance (deg)', fontsize=14)
format_ax(ax4)
ax4.scatter(distx, disty)
# add FOM contours
xmin, xmax = ax4.get_xlim()
ymin, ymax = ax4.get_ylim()
xv, yv = np.meshgrid(np.linspace(xmin, xmax, 50),
np.linspace(ymin, ymax, 50))
fom = delete_source_fom(xv, yv, 0.2, 0.3)
cs = ax4.contour(xv, yv, fom)
ax4.clabel(cs, inline=1, fontsize=10)
fig4.savefig('{}XY.png'.format(namestr), dpi=300)
fig5 = plt.figure('Deleted {} Flux-radius'.format(namefull))
ax5 = plt.subplot()
ax5.set_xlabel("Relative radius difference", fontsize=14)
ax5.set_ylabel('Log10(flux ratio)', fontsize=14)
format_ax(ax5)
ax5.scatter(radr, frlog)
# add FOM contours
xmin, xmax = ax5.get_xlim()
ymin, ymax = ax5.get_ylim()
xv, yv = np.meshgrid(np.linspace(xmin, xmax, 50),
np.linspace(ymin, ymax, 50))
fom = delete_source_fom(20, 2, xv, yv)
cs = ax5.contour(xv, yv, fom)
ax5.clabel(cs, inline=1, fontsize=10)
fig5.savefig('{}Flux-rad.png'.format(namestr), dpi=300)
return
def set_composites(pwn_dict,snr_dict, outfilename = None):
# create arrays to store output quantities
names = []
lons = np.array([])
lats = np.array([])
radii =
|
np.array([])
|
numpy.array
|
# from sklearn.manifold import TSNE, LocallyLinearEmbedding
from tsnecuda import TSNE as cudaTSNE
import matplotlib.pyplot as plt
import torchvision.models as models
import torch
import wandb
import numpy as np
import pcl.loader
import pcl.builder
# from utils import *
import torchvision.transforms as transforms
from clustering import *
from sklearn.cluster import DBSCAN
import os
import math
import torchvision.datasets as datasets
from PIL import ImageFilter, Image
import umap
import cuml
# import umap.plot
class CIFAR10Instance_w_label(datasets.CIFAR10):
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, index)
"""
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
# print(img.shape)
if self.transform is not None:
img = self.transform(img)
# print(img.shape)
return img, index, target
def compute_features(eval_loader, model, low_dim=128, gpu=0):
print('Computing features...')
model.eval()
features = torch.zeros(len(eval_loader.dataset),low_dim).cuda(gpu)
targets = torch.zeros(len(eval_loader.dataset), dtype=torch.long)
for i, (images, index, target) in enumerate(tqdm(eval_loader)):
with torch.no_grad():
images = images.cuda(gpu, non_blocking=True)
feat = model(images,is_eval=True)
features[index] = feat
targets[index] = target
# dist.barrier()
# dist.all_reduce(features, op=dist.ReduceOp.SUM)
return features.cpu(), targets
def load_data(data='cifar', aug_plus=True):
"""
Parameters
----------
args : argparse.ArgumentParser
the command line arguments relevant to fetching data
Returns
-------
train_dataset : torch.utils.data.Dataset
torch dataset for training data
eval_dataset : torch.utils.data.Dataset
torch dataset for evaluation data
"""
if data == 'cifar':
# Data loading code
# traindir = os.path.join(args.data, 'train')
normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
std=[0.247, 0.243, 0.261])
if aug_plus:
# MoCo v2's aug: similar to SimCLR https://arxiv.org/abs/2002.05709
augmentation = [
transforms.RandomResizedCrop(32, scale=(0.2, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([pcl.loader.GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]
else:
# MoCo v1's aug: same as InstDisc https://arxiv.org/abs/1805.01978
augmentation = [
transforms.RandomResizedCrop(32, scale=(0.2, 1.)),
transforms.RandomGrayscale(p=0.2),
transforms.ColorJitter(0.4, 0.4, 0.4, 0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]
# center-crop augmentation
eval_augmentation = transforms.Compose([
transforms.Resize(32),
transforms.CenterCrop(32),
transforms.ToTensor(),
normalize
])
train_dataset = CIFAR10Instance_w_label(
'data', train=True,
transform=pcl.loader.TwoCropsTransform(transforms.Compose(augmentation)), download=True)
eval_dataset = CIFAR10Instance_w_label(
'data', train=True,
transform=eval_augmentation, download=True)
return train_dataset, eval_dataset
def im2cluster_to_centroids(x, im2cluster):
# returns an np.ndarray of c_i, where each c_i is the mean of all inputs with cluster i
centroids = np.zeros((max(im2cluster) + 1, len(x[0]))) # (num_clusters, C)
# unique_clusters = set(im2cluster)
counts = np.zeros(max(im2cluster) + 1) # (num_clusters)
for idx in range(len(x)):
cluster = im2cluster[idx]
centroids[cluster] += x[idx]
counts[cluster] += 1
centroids = centroids / np.expand_dims(counts, axis=1) # taking mean of vectors in each cluster
# normalizing since means won't lie on the unit sphere after taking mean (could be like in the middle, this is especially likely for noise class)
centroids = centroids / np.linalg.norm(centroids, axis=1).reshape(-1, 1)
return centroids
def l2_distance(x, y):
return np.linalg.norm(x - y)
def run_dbscan(x, minPts=200, minSamples=0, temperature=0.2, eps=0.3):
# make sure the parser args has the necessary dbscan parameters (eps, minPts) - ADDED
# x = x.numpy() # this is already done before calling the function
# n = x.shape[0] # number of samples
# d = x.shape[1] # dimension
print('performing dbscan clustering')
results = {'im2cluster':[],'centroids':[],'density':[],'sampled_protos':[]}
# x = x.numpy()
pca = PCA(n_components = 20)
features = pca.fit_transform(x)
# print(pca.explained_variance_ratio_)
# im2cluster = db.labels_
if minSamples:
clusterer = hdbscan.HDBSCAN(min_cluster_size=minPts, min_samples=minSamples)
else:
clusterer = hdbscan.HDBSCAN(min_cluster_size=minPts)
# clusterer = DBSCAN(eps=eps, min_samples=minPts, n_jobs=-1, metric='euclidean').fit(features) # run DBSCAN
# im2cluster = clusterer.labels_
im2cluster = clusterer.fit_predict(features)
if -1 in im2cluster: # so that noise data is in cluster 0 instead of -1
im2cluster += 1
centroids = im2cluster_to_centroids(x, im2cluster)
# density = np.ones(len(set(im2cluster))) * args.temperature
Dcluster = [[] for c in range(len(centroids))]
for im,i in enumerate(im2cluster):
# Dcluster[i].append(D[im][0])
Dcluster[i].append(l2_distance(centroids[i], x[im]))
# concentration estimation (phi)
density = np.zeros(len(centroids))
for i,dist in enumerate(Dcluster):
if len(dist)>1:
density[i] = (np.asarray(dist)).mean()/np.log(len(dist)+10) # i got rid of the **0.5 since then we aren't actually doing l2 distances? idk why the authors did it (tbd)
#if cluster only has one point, use the max to estimate its concentration
dmax = density.max()
for i,dist in enumerate(Dcluster):
if len(dist)<=1:
density[i] = dmax
density = density.clip(np.percentile(density,10),np.percentile(density,90)) #clamp extreme values for stability
density = temperature*density/density.mean() #scale the mean to temperature
im2cluster = torch.LongTensor(im2cluster).cuda(gpu)
unique_clusters = set(im2cluster.tolist())
print(unique_clusters)
density = torch.Tensor(density).cuda(gpu)
centroids = torch.Tensor(centroids).cuda(gpu)
# centroids = nn.functional.normalize(centroids, p=args.norm_p, dim=1) # hmmmm ?
results['centroids'].append(centroids)
results['density'].append(density)
results['im2cluster'].append(im2cluster)
# run dbscan, and then select a random core point from each cluster to be the centroid
return results
# density = np.ones(len(set(im2cluster))) * args.temperature
def run_kmeans(x, num_cluster=['250'], centroid_sampling=False, temperature=0.2):
"""
Args:
x: data to be clustered
"""
print('performing kmeans clustering')
results = {'im2cluster':[],'centroids':[],'density':[],'sampled_protos':[]}
for seed, num_cluster in enumerate(num_cluster):
# intialize faiss clustering parameters
d = x.shape[1]
k = int(num_cluster)
clus = faiss.Clustering(d, k)
clus.verbose = True
clus.niter = 20
clus.nredo = 5
clus.seed = seed
clus.max_points_per_centroid = 1000
clus.min_points_per_centroid = 10
res = faiss.StandardGpuResources()
cfg = faiss.GpuIndexFlatConfig()
cfg.useFloat16 = False # originally False
cfg.device = 0
# cfg.device = 1 #REMEMBER TO CHANGE THIS
index = faiss.GpuIndexFlatL2(res, d, cfg)
clus.train(x, index)
D, I = index.search(x, 1) # for each sample, find cluster distance and assignments
im2cluster = [int(n[0]) for n in I]
# get cluster centroids
centroids = faiss.vector_to_array(clus.centroids).reshape(k,d)
# sample-to-centroid distances for each cluster
Dcluster = [[] for c in range(k)]
indices_per_cluster = [[] for c in range(k)] # for next step - random sampling
for im,i in enumerate(im2cluster):
Dcluster[i].append(D[im][0])
indices_per_cluster[i].append(im)
if centroid_sampling:
# print("WTF")
# sample a random point from each cluster to act as a prototype rather than the centroid
# sampled_protos = [np.zeros((len(indices_per_cluster[i]), d)) for i in range(k)]
sampled_protos = [0 for i in range(k)]
for i in range(k):
# if there are no points other than the centroid (empty), this won't work
# print(len(indices_per_cluster[i]))
selected_proto_id = random.choice(indices_per_cluster[i % num_cluster])
sampled_protos[i] = selected_proto_id
# sampled_protos[i] = x[indices_per_cluster[i]]
# concentration estimation (phi)
density = np.zeros(k)
for i,dist in enumerate(Dcluster):
if len(dist)>1:
d = (np.asarray(dist)**0.5).mean()/np.log(len(dist)+10)
density[i] = d
#if cluster only has one point, use the max to estimate its concentration
dmax = density.max()
for i,dist in enumerate(Dcluster):
if len(dist)<=1:
density[i] = dmax
density = density.clip(np.percentile(density,10),np.percentile(density,90)) #clamp extreme values for stability
density = temperature*density/density.mean() #scale the mean to temperature
# convert to cuda Tensors for broadcast
centroids = torch.Tensor(centroids).cuda()
centroids = nn.functional.normalize(centroids, p=2, dim=1) # hmmmm ?
if centroid_sampling:
for i in range(k):
sampled_protos[i] = torch.Tensor(sampled_protos[i]).cuda()
im2cluster = torch.LongTensor(im2cluster).cuda()
density = torch.Tensor(density).cuda()
results['centroids'].append(centroids)
results['density'].append(density)
results['im2cluster'].append(im2cluster)
if centroid_sampling:
results['sampled_protos'].append(sampled_protos)
return results
def calculate_kn_distance(X,k):
kn_distance = []
for i in range(len(X)):
eucl_dist = []
for j in range(len(X)):
# eucl_dist.append(
# math.sqrt(
# ((X[i,0] - X[j,0]) ** 2) +
# ((X[i,1] - X[j,1]) ** 2)))
eucl_dist.append(np.linalg.norm(X[i] - X[j]))
eucl_dist.sort()
kn_distance.append(eucl_dist[k])
return kn_distance
# visualizing the data
print('Visualizing Representations...')
# methods = [LocallyLinearEmbedding(n_components=2, method='standard'), TSNE(n_components=2, init='pca')]
# tsne = TSNE(n_components=2, perplexity=45, learning_rate=200, verbose=1)
# methods = tsne
def plot_tsne(num_classes=20, num_samples=10000):
features, classes = compute_features(eval_loader, model, low_dim=low_dim, gpu=gpu)
features[torch.norm(features,dim=1)>1.5] /= 2 #account for the few samples that are computed twice
features = features.numpy()
# results = run_dbscan(features)
# results = run_kmeans(features)
# im2cluster = results['im2cluster'][0].tolist() # remember to turn this back to a list
restricted_classes = [i for i in classes if i < num_classes]
features = features[np.array(classes) < num_classes]
print(len(restricted_classes))
print(len(features))
features = features[:num_samples]
restricted_classes = restricted_classes[:num_samples]
hparams = [2500]
fig = plt.figure(figsize=(25,25))
for i, hparam in enumerate(hparams):
print('hparam: {}'.format(i))
# tsne = TSNE(n_components=2, perplexity=30, learning_rate=hparam, verbose=1, n_jobs=-1, n_iter=2500)
tsne = cudaTSNE(n_components=2, perplexity=50, learning_rate=600, verbose=1, n_iter=hparam)
y = tsne.fit_transform(features)
if len(hparams) == 1:
ax = fig.add_subplot(1, len(hparams), i + 1)
else:
ax = fig.add_subplot(3, len(hparams)//3 + 1, i + 1)
ax.scatter(y[:, 0], y[:, 1], c=restricted_classes)
ax.set_title('hparam: {}'.format(hparam))
if not os.path.exists('imgs/tsne_{}'.format(checkpoint_id[0])):
os.makedirs('imgs/tsne_{}'.format(checkpoint_id[0]))
save_path = 'imgs/tsne_{}/tsne_{}_{}'.format(checkpoint_id[0], checkpoint_id[0], checkpoint_id[1])
fig.savefig(save_path)
print('Figure saved to : {}'.format(save_path))
def plot_umap(num_classes=20, num_samples=10000):
features, classes = compute_features(eval_loader, model, low_dim=low_dim, gpu=gpu)
features[torch.norm(features,dim=1)>1.5] /= 2 #account for the few samples that are computed twice
features = features.numpy()
# results = run_dbscan(features)
# results = run_kmeans(features)
# im2cluster = results['im2cluster'][0].tolist() # remember to turn this back to a list
restricted_classes = np.array([i for i in classes if i < num_classes])
features = features[np.array(classes) < num_classes]
features = features[:num_samples]
restricted_classes = restricted_classes[:num_samples]
hparams = [2500]
for i, hparam in enumerate(hparams):
print('hparam: {}'.format(i))
reducer = umap.UMAP(n_neighbors = 30, min_dist=0.1, n_components=2, metric='cosine')
y = reducer.fit_transform(features)
# ax = umap.plot.points(reducer, labels=restricted_classes) # need to change this
ax.set_title('hparam: {}'.format(hparam))
if not os.path.exists('imgs/umap_{}'.format(checkpoint_id[0])):
os.makedirs('imgs/umap_{}'.format(checkpoint_id[0]))
save_path = 'imgs/umap_{}/umap_{}_{}'.format(checkpoint_id[0], checkpoint_id[0], checkpoint_id[1])
# save_path = 'imgs/umap_{}/umap_{}_{}'.format(checkpoint_id[0], 'cosine', checkpoint_id[1])
# fig = ax.get_figure()
ax.figure.savefig(save_path)
print('Figure saved to : {}'.format(save_path))
def plot_progression(checkpoint_id, num_rows=3, num_progressions=9, num_classes=20, num_samples=10000, algorithm = 'umap', true_classes=True):
checkpoints = os.listdir('pcl_cifar10_{}'.format(checkpoint_id[0]))
checkpoints.sort()
assert num_progressions <= len(checkpoints), 'Not enough checkpoints saved.'
checkpoints = [checkpoints[i] for i in range(0, len(checkpoints), len(checkpoints) // (num_progressions-1))][:num_progressions-1] + [checkpoints[-1]]
fig, axes = plt.subplots(num_rows, len(checkpoints)//num_rows + 1 if len(checkpoints) % num_rows != 0 else len(checkpoints)//num_rows, figsize=(40,25))
print(checkpoints)
for i, checkpoint_file in enumerate(checkpoints):
# print(checkpoints)
# print(checkpoint_file)
epoch = checkpoint_file[11:15] # just the 4-digit checkpoint epoch
print('epoch: {}'.format(epoch))
checkpoint = torch.load('pcl_cifar10_{}/{}'.format(checkpoint_id[0], checkpoint_file))
model.load_state_dict(checkpoint['state_dict'])
features, classes = compute_features(eval_loader, model, low_dim=low_dim, gpu=gpu)
features[torch.norm(features,dim=1)>1.5] /= 2 #account for the few samples that are computed twice
features = features.numpy()
restricted_classes = np.array([i for i in classes if i < num_classes])
features = features[np.array(classes) < num_classes]
features = features[:num_samples]
restricted_classes = restricted_classes[:num_samples]
if algorithm == 'umap':
# reducer = umap.UMAP(n_neighbors = 60, min_dist=0.1, n_components=2, metric='cosine')
reducer = cuml.UMAP(n_neighbors=60, min_dist=0.1, n_components=2, n_epochs=1000)
y = reducer.fit_transform(features)
elif algorithm == 'tsne':
tsne = cudaTSNE(n_components=2, perplexity=50, learning_rate=600, verbose=1, n_iter=2500, metric='euclidean')
y = tsne.fit_transform(features)
if true_classes:
scatter = axes.flat[i].scatter(y[:, 0], y[:, 1], c = restricted_classes, cmap='Spectral', s=3)
else:
with torch.no_grad():
results = run_dbscan(features, minPts=200, minSamples=0, temperature=0.2)
# results = run_kmeans(features, num_cluster=['250'])
im2cluster = results['im2cluster'][0].tolist() # remember to turn this back to a list
scatter = axes.flat[i].scatter(y[:, 0], y[:, 1], c = im2cluster, cmap='Spectral', s=3) # restricting num_classes does not work here
# legend = axes.flat[i].legend(*scatter.legend_elements(), loc='lower left', title="Classes")
# axes.flat[i].add_artist(legend)
axes.flat[i].set_title('epoch: {}'.format(epoch))
axes.flat[-1].legend(*scatter.legend_elements(), loc='lower left', title="Classes", bbox_to_anchor=(1.00, 0), prop={'size': 25})
fig.suptitle('{}_{}: {}'.format(algorithm, checkpoint_id[0], 'Cifar Classes' if true_classes else 'Clustering Classes'), fontsize=20)
if not os.path.exists('imgs/{}_{}'.format(algorithm, checkpoint_id[0])):
os.makedirs('imgs/{}_{}'.format(algorithm, checkpoint_id[0]))
save_path = 'imgs/{}_{}/{}_{}'.format(algorithm, checkpoint_id[0], 'progression', 'true_classes' if true_classes else 'cluster_classes')
fig.savefig(save_path)
print('Figure saved to : {}'.format(save_path))
def plot_comparison(checkpoint_id, num_progressions=5, num_classes=20, num_samples=10000, algorithm = 'umap'):
checkpoints = os.listdir('pcl_cifar10_{}'.format(checkpoint_id[0]))
checkpoints.sort()
assert num_progressions <= len(checkpoints), 'Not enough checkpoints saved.'
checkpoints = [checkpoints[i] for i in range(0, len(checkpoints), len(checkpoints) // (num_progressions-1))][:num_progressions-1] + [checkpoints[-1]]
# fig, axes = plt.subplots(len(checkpoints), 2, figsize=(30,50))
fig, axes = plt.subplots(2, len(checkpoints), figsize=(60,30))
print(checkpoints)
for i, checkpoint_file in enumerate(checkpoints):
# print(checkpoints)
# print(checkpoint_file)
epoch = checkpoint_file[11:15] # just the 4-digit checkpoint epoch
print('epoch: {}'.format(epoch))
checkpoint = torch.load('pcl_cifar10_{}/{}'.format(checkpoint_id[0], checkpoint_file))
model.load_state_dict(checkpoint['state_dict'])
features, classes = compute_features(eval_loader, model, low_dim=low_dim, gpu=gpu)
features[torch.norm(features,dim=1)>1.5] /= 2 #account for the few samples that are computed twice
features = features.numpy()
restricted_classes =
|
np.array([i for i in classes if i < num_classes])
|
numpy.array
|
import numpy as np
import numpy.ma as ma
import argparse
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import mutrel
import inputparser
from common import Models
MISSING = -1
def load_mutrels(mutrel_args):
mutrels = {}
for mutrel_arg in mutrel_args:
mutrel_name, mutrel_path = mutrel_arg.split('=', 1)
assert mutrel_name not in mutrels, '%s is duplicate' % mutrel_name
if os.path.exists(mutrel_path):
mrel = np.load(mutrel_path)
mutrels[mutrel_name] = mutrel.Mutrel(vids=mrel['vids'], rels=mrel['rels'])
else:
mutrels[mutrel_name] = None
return mutrels
def discard_garbage(mutrels, clustered, garbage, ignore_garbage_for):
for name in list(mutrels.keys()):
if mutrels[name] is None:
continue
vids = mutrels[name].vids
assert set(vids) == clustered | garbage, 'vids do not match expected set for %s' % name
gidxs = [idx for idx, vid in enumerate(vids) if vid in garbage]
if name not in ignore_garbage_for:
for garbrels in (mutrels[name].rels[gidxs,:,Models.garbage], mutrels[name].rels[:,gidxs,Models.garbage].T):
# Garbage mutations should have posterior that coclusters with
# themselves, but that renders them garbage to every other mutation
# (including other garbage)
G, M = garbrels.shape
# `expected` shape, given `G` garbage variants and `M` total variants: `GxM`
expected = np.ones((G, M))
expected[np.arange(G),gidxs] = 0
assert np.allclose(expected, garbrels), '%s garbage relations are wrong' % name
mutrels[name] = mutrel.remove_variants_by_vidx(mutrels[name], gidxs)
assert set(mutrels[name].vids) == clustered
def _score_distl1(rels, truth):
# Compute mean L1 distance.
M = len(rels)
dist = np.sum(np.abs(rels - truth), axis=2)
assert np.allclose(dist, dist.T)
assert np.allclose(0, np.diag(dist))
# A pairwise mutrel vector can have a maximum L1 distance of 2, when no
# elements have overlap with each other. Normalize this so scores can be
# interpreted as "mean proportion of miscalled relations", given hard 0/1
# calls for relation types.
dist /= 2
# Distances may be slightly higher than 1 because of floating point error.
# Set these to exactly 1.
dist[np.logical_and(dist > 1, np.isclose(1, dist))] = 1
assert np.all(0 <= dist) and np.all(dist <= 1)
# Take entries below main diagonal.
dist_lower = np.tril(dist, 1)
assert dist_lower.shape == (M, M)
# There are (M choose 2) elements below the diagonal, so divide by this
# when computing mean.
score = np.sum(dist_lower) / (0.5*M*(M - 1))
return score
def _compute_kld(P, Q):
for A in (P, Q):
assert np.all(A >= 0)
assert np.allclose(1, np.sum(A, axis=2))
logP = ma.log2(ma.masked_equal(P, 0))
logQ = ma.log2(
|
ma.masked_equal(Q, 0)
|
numpy.ma.masked_equal
|
from operator import pos
from numpy.core.defchararray import array
from quadrotor_env import quad, sensor
from quadrotor_control import Controller
from quadrotor_estimator import MEKF
from quaternion_euler_utility import euler_quat
from scipy.spatial.transform import Rotation as R
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#Quadrotor Model Object
quad_model = quad(t_step=0.01, n=1, training=False, euler=0, direct_control=0, T=1, clipped=True)
sens = sensor(quad_model)
# seed = quad_model.seed(2)
#Trajectory Generator ---> Obtain positions, velocities and accelerations references vectors
inner_length = 1
controller = Controller(total_time = 10, sample_time = 0.01, inner_length = inner_length)
x_wp = np.array([[0.2, 0.3, 0.4, 0.5, 0.6]]).T
y_wp = np.array([[0, 0, 0.2, 0.3, 0.3]]).T
z_wp = np.array([[2, 2.5, 3, 3, 3]]).T
psi_wp = np.array([[0, 0, 0, np.pi/4, np.pi/2]]).T
t = [0, 5, 10, 15, 20]
step = 0.01
_, _, x_matrix = controller.getCoeff_snap(x_wp, t)
_, _, y_matrix = controller.getCoeff_snap(y_wp, t)
_, _, z_matrix = controller.getCoeff_snap(z_wp, t)
_, _, psi_matrix = controller.getCoeff_accel(psi_wp, t)
x_ref, dotx_ref, ddotx_ref, _, _ = controller.evaluate_equations_snap(t, step, x_matrix)
y_ref, doty_ref, ddoty_ref, _, _ = controller.evaluate_equations_snap(t, step, y_matrix)
z_ref, dotz_ref, ddotz_ref, _, _ = controller.evaluate_equations_snap(t, step, z_matrix)
psi_ref, _, _ = controller.evaluate_equations_accel(t, step, psi_matrix)
# x_ref, dotx_ref, ddotx_ref, y_ref, doty_ref, ddoty_ref, z_ref, dotz_ref, ddotz_ref, psiInt = controller.trajectory_generator(radius=1, frequency=np.pi/15, max_h=7, min_h=4)
outer_length = len(x_ref)
#Get initial states
x_atual, _ = quad_model.reset(
|
np.array([0.2, 0, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0])
|
numpy.array
|
import numpy as np
from numpy.testing import assert_equal, assert_, assert_raises
import pandas as pd
import pandas.util.testing as tm
import pytest
from statsmodels.base import data as sm_data
from statsmodels.formula import handle_formula_data
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.discrete.discrete_model import Logit
# FIXME: do not leave commented-out, enable or move/remove
# class TestDates(object):
# @classmethod
# def setup_class(cls):
# nrows = 10
# cls.dates_result = cls.dates_results = np.random.random(nrows)
#
# def test_dates(self):
# np.testing.assert_equal(data.wrap_output(self.dates_input, 'dates'),
# self.dates_result)
class TestArrays(object):
@classmethod
def setup_class(cls):
cls.endog = np.random.random(10)
cls.exog = np.c_[np.ones(10), np.random.random((10, 2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y'
cls.row_labels = None
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
def test_names(self):
data = self.data
np.testing.assert_equal(data.xnames, self.xnames)
np.testing.assert_equal(data.ynames, self.ynames)
def test_labels(self):
# HACK: because numpy master after NA stuff assert_equal fails on
# pandas indices
# FIXME: see if this can be de-hacked
np.testing.assert_(np.all(self.data.row_labels == self.row_labels))
class TestArrays2dEndog(TestArrays):
@classmethod
def setup_class(cls):
super(TestArrays2dEndog, cls).setup_class()
cls.endog = np.random.random((10, 1))
cls.exog = np.c_[np.ones(10), np.random.random((10, 2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
class TestArrays1dExog(TestArrays):
@classmethod
def setup_class(cls):
super(TestArrays1dExog, cls).setup_class()
cls.endog = np.random.random(10)
exog = np.random.random(10)
cls.data = sm_data.handle_data(cls.endog, exog)
cls.exog = exog[:, None]
cls.xnames = ['x1']
cls.ynames = 'y'
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog.squeeze())
class TestDataFrames(TestArrays):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
tm.assert_frame_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
tm.assert_series_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
tm.assert_series_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
tm.assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
class TestDataFramesWithMultiIndex(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
mi = pd.MultiIndex.from_product([['x'], ['1', '2']])
exog = pd.DataFrame(np.random.random((10, 2)), columns=mi)
exog_flattened_idx = pd.Index(['const', 'x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input, index=exog_flattened_idx)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input, index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog_flattened_idx,
columns=exog_flattened_idx)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
class TestLists(TestArrays):
@classmethod
def setup_class(cls):
super(TestLists, cls).setup_class()
cls.endog = np.random.random(10).tolist()
cls.exog = np.c_[np.ones(10), np.random.random((10, 2))].tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
class TestRecarrays(TestArrays):
@classmethod
def setup_class(cls):
super(TestRecarrays, cls).setup_class()
cls.endog = np.random.random(9).view([('y_1', 'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'), ('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog,
self.endog.view(float, type=np.ndarray))
np.testing.assert_equal(self.data.exog,
self.exog.view((float, 3), type=np.ndarray))
class TestStructarrays(TestArrays):
@classmethod
def setup_class(cls):
super(TestStructarrays, cls).setup_class()
cls.endog = np.random.random(9).view([('y_1', 'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'), ('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog,
self.endog.view(float, type=np.ndarray))
np.testing.assert_equal(self.data.exog,
self.exog.view((float, 3), type=np.ndarray))
class TestListDataFrame(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = np.random.random(10).tolist()
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameList(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x1', 'x2'])
exog.insert(0, 'const', 1)
cls.exog = exog.values.tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
tm.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestArrayDataFrame(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = np.random.random(10)
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameArray(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x1', 'x2']) # names mimic defaults
exog.insert(0, 'const', 1)
cls.exog = exog.values
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
tm.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestSeriesDataFrame(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.Series(np.random.random(10), name='y_1')
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
tm.assert_series_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
class TestSeriesSeries(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.Series(np.random.random(10), name='y_1')
exog = pd.Series(np.random.random(10), name='x_1')
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 1
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=[exog.name])
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=[exog.name],
columns=[exog.name])
cls.xnames = ['x_1']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
tm.assert_series_equal(self.data.orig_endog, self.endog)
tm.assert_series_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values[:, None])
def test_alignment():
# Fix Issue GH#206
from statsmodels.datasets.macrodata import load_pandas
d = load_pandas().data
# growth rates
gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna()
gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna()
lint = d['realint'][:-1] # incorrect indexing for test purposes
endog = gs_l_realinv
# re-index because they will not conform to lint
realgdp = gs_l_realgdp.reindex(lint.index, method='bfill')
data = dict(const=np.ones_like(lint), lrealgdp=realgdp, lint=lint)
exog = pd.DataFrame(data)
# TODO: which index do we get??
np.testing.assert_raises(ValueError, OLS, *(endog, exog))
class TestMultipleEqsArrays(TestArrays):
@classmethod
def setup_class(cls):
cls.endog = np.random.random((10, 4))
cls.exog = np.c_[np.ones(10), np.random.random((10, 2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
neqs = 4
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.cov_eq_result = cls.cov_eq_input = np.random.random((neqs, neqs))
cls.col_eq_result = cls.col_eq_input = np.array((neqs, nvars))
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = ['y1', 'y2', 'y3', 'y4']
cls.row_labels = None
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
np.testing.assert_equal(data.wrap_output(self.cov_eq_input, 'cov_eq'),
self.cov_eq_result)
np.testing.assert_equal(data.wrap_output(self.col_eq_input,
'columns_eq'),
self.col_eq_result)
class TestMultipleEqsDataFrames(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = endog = pd.DataFrame(np.random.random((10, 4)),
columns=['y_1', 'y_2', 'y_3', 'y_4'])
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
neqs = 4
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.cov_eq_input = np.random.random((neqs, neqs))
cls.cov_eq_result = pd.DataFrame(cls.cov_eq_input,
index=endog.columns,
columns=endog.columns)
cls.col_eq_input = np.random.random((nvars, neqs))
cls.col_eq_result = pd.DataFrame(cls.col_eq_input,
index=exog.columns,
columns=endog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = ['y_1', 'y_2', 'y_3', 'y_4']
cls.row_labels = cls.exog.index
def test_attach(self):
data = self.data
tm.assert_series_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
tm.assert_series_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
tm.assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
tm.assert_frame_equal(data.wrap_output(self.cov_eq_input, 'cov_eq'),
self.cov_eq_result)
tm.assert_frame_equal(data.wrap_output(self.col_eq_input,
'columns_eq'),
self.col_eq_result)
class TestMissingArray(object):
@classmethod
def setup_class(cls):
X = np.random.random((25, 4))
y = np.random.random(25)
y[10] = np.nan
X[2, 3] = np.nan
X[14, 2] = np.nan
cls.y, cls.X = y, X
@pytest.mark.smoke
def test_raise_no_missing(self):
# GH#1700
sm_data.handle_data(np.random.random(20), np.random.random((20, 2)),
'raise')
def test_raise(self):
with pytest.raises(Exception):
# TODO: be more specific about exception
sm_data.handle_data(self.y, self.X, 'raise')
def test_drop(self):
y = self.y
X = self.X
combined = np.c_[y, X]
idx = ~np.isnan(combined).any(axis=1)
y = y[idx]
X = X[idx]
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_array_equal(data.endog, y)
np.testing.assert_array_equal(data.exog, X)
def test_none(self):
data = sm_data.handle_data(self.y, self.X, 'none', hasconst=False)
np.testing.assert_array_equal(data.endog, self.y)
np.testing.assert_array_equal(data.exog, self.X)
assert data.k_constant == 0
def test_endog_only_raise(self):
with pytest.raises(Exception):
# TODO: be more specific about exception
sm_data.handle_data(self.y, None, 'raise')
def test_endog_only_drop(self):
y = self.y
y = y[~np.isnan(y)]
data = sm_data.handle_data(self.y, None, 'drop')
np.testing.assert_array_equal(data.endog, y)
def test_mv_endog(self):
y = self.X
y = y[~np.isnan(y).any(axis=1)]
data = sm_data.handle_data(self.X, None, 'drop')
np.testing.assert_array_equal(data.endog, y)
def test_extra_kwargs_2d(self):
sigma = np.random.random((25, 25))
sigma = sigma + sigma.T - np.diag(np.diag(sigma))
data = sm_data.handle_data(self.y, self.X, 'drop', sigma=sigma)
idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
sigma = sigma[idx][:, idx]
np.testing.assert_array_equal(data.sigma, sigma)
def test_extra_kwargs_1d(self):
weights = np.random.random(25)
data = sm_data.handle_data(self.y, self.X, 'drop', weights=weights)
idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
weights = weights[idx]
np.testing.assert_array_equal(data.weights, weights)
class TestMissingPandas(object):
@classmethod
def setup_class(cls):
X = np.random.random((25, 4))
y = np.random.random(25)
y[10] = np.nan
X[2, 3] = np.nan
X[14, 2] = np.nan
cls.y = pd.Series(y)
cls.X = pd.DataFrame(X)
@pytest.mark.smoke
def test_raise_no_missing(self):
# GH#1700
sm_data.handle_data(pd.Series(np.random.random(20)),
pd.DataFrame(np.random.random((20, 2))),
'raise')
def test_raise(self):
with pytest.raises(Exception):
# TODO: be more specific about exception
sm_data.handle_data(self.y, self.X, 'raise')
def test_drop(self):
y = self.y
X = self.X
combined = np.c_[y, X]
idx = ~np.isnan(combined).any(axis=1)
y = y.loc[idx]
X = X.loc[idx]
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
tm.assert_series_equal(data.orig_endog, self.y.loc[idx])
np.testing.assert_array_equal(data.exog, X.values)
tm.assert_frame_equal(data.orig_exog, self.X.loc[idx])
def test_none(self):
data = sm_data.handle_data(self.y, self.X, 'none', hasconst=False)
np.testing.assert_array_equal(data.endog, self.y.values)
np.testing.assert_array_equal(data.exog, self.X.values)
assert data.k_constant == 0
def test_endog_only_raise(self):
with pytest.raises(Exception):
# TODO: be more specific about exception
sm_data.handle_data(self.y, None, 'raise')
def test_endog_only_drop(self):
y = self.y
y = y.dropna()
data = sm_data.handle_data(self.y, None, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
def test_mv_endog(self):
y = self.X
y = y.loc[~np.isnan(y.values).any(axis=1)]
data = sm_data.handle_data(self.X, None, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
def test_labels(self):
labels = pd.Index([0, 1, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24])
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_(data.row_labels.equals(labels))
class TestConstant(object):
@classmethod
def setup_class(cls):
from statsmodels.datasets.longley import load_pandas
cls.data = load_pandas()
def test_array_constant(self):
exog = self.data.exog.copy()
exog['const'] = 1
data = sm_data.handle_data(self.data.endog.values, exog.values)
np.testing.assert_equal(data.k_constant, 1)
np.testing.assert_equal(data.const_idx, 6)
def test_pandas_constant(self):
exog = self.data.exog.copy()
exog['const'] = 1
data = sm_data.handle_data(self.data.endog, exog)
np.testing.assert_equal(data.k_constant, 1)
np.testing.assert_equal(data.const_idx, 6)
def test_pandas_noconstant(self):
exog = self.data.exog.copy()
data = sm_data.handle_data(self.data.endog, exog)
np.testing.assert_equal(data.k_constant, 0)
np.testing.assert_equal(data.const_idx, None)
def test_array_noconstant(self):
exog = self.data.exog.copy()
data = sm_data.handle_data(self.data.endog.values, exog.values)
np.testing.assert_equal(data.k_constant, 0)
np.testing.assert_equal(data.const_idx, None)
class TestHandleMissing(object):
def test_pandas(self):
df = tm.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]]
tm.assert_frame_equal(data['exog'], X_exp)
tm.assert_series_equal(data['endog'], y_exp)
def test_arrays(self):
arr = np.random.randn(20, 4)
arr[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = arr[:, 0], arr[:, 1:]
data, _ = sm_data.handle_missing(y, X, missing='drop')
bools_mask = np.ones(20, dtype=bool)
bools_mask[[2, 5, 10]] = False
y_exp = arr[bools_mask, 0]
X_exp = arr[bools_mask, 1:]
np.testing.assert_array_equal(data['endog'], y_exp)
np.testing.assert_array_equal(data['exog'], X_exp)
def test_pandas_array(self):
df = tm.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]].values
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]].values
np.testing.assert_array_equal(data['exog'], X_exp)
tm.assert_series_equal(data['endog'], y_exp)
def test_array_pandas(self):
df = tm.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]].values, df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]].values, df[df.columns[1:]]
tm.assert_frame_equal(data['exog'], X_exp)
np.testing.assert_array_equal(data['endog'], y_exp)
def test_noop(self):
df = tm.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='none')
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]]
tm.assert_frame_equal(data['exog'], X_exp)
tm.assert_series_equal(data['endog'], y_exp)
class CheckHasConstant(object):
def test_hasconst(self):
for x, result in zip(self.exogs, self.results):
mod = self.mod(self.y, x)
assert_equal(mod.k_constant, result[0])
assert_equal(mod.data.k_constant, result[0])
if result[1] is None:
assert_(mod.data.const_idx is None)
else:
assert_equal(mod.data.const_idx, result[1])
# extra check after fit, some models raise on singular
fit_kwds = getattr(self, 'fit_kwds', {})
try:
res = mod.fit(**fit_kwds)
except np.linalg.LinAlgError:
pass
else:
assert_equal(res.model.k_constant, result[0])
assert_equal(res.model.data.k_constant, result[0])
@classmethod
def setup_class(cls):
# create data
np.random.seed(0)
cls.y_c = np.random.randn(20)
cls.y_bin = (cls.y_c > 0).astype(int)
x1 = np.column_stack((np.ones(20), np.zeros(20)))
result1 = (1, 0)
x2 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5)).astype(float)
result2 = (1, None)
x3 = np.column_stack((np.arange(20), np.zeros(20)))
result3 = (0, None)
x4 = np.column_stack((np.arange(20), np.zeros((20, 2))))
result4 = (0, None)
x5 = np.column_stack((np.zeros(20), 0.5 * np.ones(20)))
result5 = (1, 1)
x5b = np.column_stack((np.arange(20), np.ones((20, 3))))
result5b = (1, 1)
x5c = np.column_stack((np.arange(20), np.ones((20, 3)) * [0.5, 1, 1]))
result5c = (1, 2)
# implicit and zero column
x6 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5,
np.zeros(20))).astype(float)
result6 = (1, None)
x7 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5,
np.zeros((20, 2)))).astype(float)
result7 = (1, None)
cls.exogs = (x1, x2, x3, x4, x5, x5b, x5c, x6, x7)
cls.results = (result1, result2, result3, result4, result5, result5b,
result5c, result6, result7)
cls._initialize()
class TestHasConstantOLS(CheckHasConstant):
@classmethod
def _initialize(cls):
cls.mod = OLS
cls.y = cls.y_c
class TestHasConstantGLM(CheckHasConstant):
@staticmethod
def mod(y, x):
return GLM(y, x, family=families.Binomial())
@classmethod
def _initialize(cls):
cls.y = cls.y_bin
class TestHasConstantLogit(CheckHasConstant):
@classmethod
def _initialize(cls):
cls.mod = Logit
cls.y = cls.y_bin
cls.fit_kwds = {'disp': False}
def test_dtype_object():
# see GH#880
X = np.random.random((40, 2))
df = pd.DataFrame(X)
df[2] = np.random.randint(2, size=40).astype('object')
df['constant'] = 1
y = pd.Series(np.random.randint(2, size=40))
np.testing.assert_raises(ValueError, sm_data.handle_data, y, df)
def test_formula_missing_extra_arrays():
np.random.seed(1)
# because patsy cannot turn off missing data-handling as of 0.3.0, we need
# separate tests to make sure that missing values are handled correctly
# when going through formulas
# there is a handle_formula_data step
# then there is the regular handle_data step
# see GH#2083
# the untested cases are endog/exog have missing. extra has missing.
# endog/exog are fine. extra has missing.
# endog/exog do or do not have missing and extra has wrong dimension
y = np.random.randn(10)
y_missing = y.copy()
y_missing[[2, 5]] = np.nan
X = np.random.randn(10)
X_missing = X.copy()
X_missing[[1, 3]] = np.nan
weights = np.random.uniform(size=10)
weights_missing = weights.copy()
weights_missing[[6]] = np.nan
weights_wrong_size = np.random.randn(12)
data = {'y': y,
'X': X,
'y_missing': y_missing,
'X_missing': X_missing,
'weights': weights,
'weights_missing': weights_missing}
data = pd.DataFrame.from_dict(data)
data['constant'] = 1
formula = 'y_missing ~ X_missing'
((endog, exog),
missing_idx, design_info) = handle_formula_data(data, None, formula,
depth=2,
missing='drop')
kwargs = {'missing_idx': missing_idx, 'missing': 'drop',
'weights': data['weights_missing']}
model_data = sm_data.handle_data(endog, exog, **kwargs)
data_nona = data.dropna()
assert_equal(data_nona['y'].values, model_data.endog)
assert_equal(data_nona[['constant', 'X']].values, model_data.exog)
assert_equal(data_nona['weights'].values, model_data.weights)
tmp = handle_formula_data(data, None, formula, depth=2, missing='drop')
(endog, exog), missing_idx, design_info = tmp
weights_2d = np.random.randn(10, 10)
weights_2d[[8, 7], [7, 8]] = np.nan # symmetric missing values
kwargs.update({'weights': weights_2d,
'missing_idx': missing_idx})
model_data2 = sm_data.handle_data(endog, exog, **kwargs)
good_idx = [0, 4, 6, 9]
assert_equal(data.loc[good_idx, 'y'], model_data2.endog)
assert_equal(data.loc[good_idx, ['constant', 'X']], model_data2.exog)
assert_equal(weights_2d[good_idx][:, good_idx], model_data2.weights)
tmp = handle_formula_data(data, None, formula, depth=2, missing='drop')
(endog, exog), missing_idx, design_info = tmp
kwargs.update({'weights': weights_wrong_size,
'missing_idx': missing_idx})
assert_raises(ValueError, sm_data.handle_data, endog, exog, **kwargs)
def test_raise_nonfinite_exog():
# we raise now in the has constant check before hitting the linear algebra
from statsmodels.tools.sm_exceptions import MissingDataError
x =
|
np.arange(10)
|
numpy.arange
|
import numpy as np
import os
import sys
import math
from Find_neighbors import find_neighbors
def atoms_dist(a, b, latt_mat):
""" Return the distance between atoms a and b.
Arguments:
-------------------
a, b : array or list, dim = (1, 3)
Coordinates of two atoms in the cell.
latt_mat : array, dim = (3, 3)
Matrix consisting of lacttice vectors a, b and c.
Returns:
-------------------
rtype : float
"""
return np.linalg.norm(np.dot((a - b), latt_mat), 2)
def extract_training_data(num = 5000, cutoff_radius = 2.6, augment = False, multi = False):
""" Calculate Fe coordination numbers around oxygen. Return training, cross-validation, and test sets.
Fourteen Li3FeO3.5 POSCARs are used. 170 data points for each XANES spectrum.
If calculating the average coordination number by averaging 49 spectra together, augment is sent to True, and we use regression.
Arguments:
----------------------------
num: the number of combinations of 49 spectra desired
cutoff_radius: the maximum length, in angstroms, an Fe atom can be from an oxygen such that it contributes to the oxygen's Fe coordination number
augment: signals whether to use data augmentation to generate data by averaging 49 spectra at a time
multi: signals whether it's the multi-task learning model, and we need to extract both coordination number and charge data
Returns:
----------------------------
shuffled_X[:, :divider1] : array (170, 64% of data)
Training set input
shuffled_X[:, divider1:divider2] : array (170, 16% of data)
Dev set input
shuffled_X[:, divider2:] : array (170, 20% of data)
Test set input
shuffled_Y[:, :divider1] : array (1, 64% of data)
Training set output
shuffled_Y[:, divider1:divider2] : array (1, 16% of data)
Dev set output
shuffled_Y[:, divider2:] : array (1, 20% of data)
Test set output
numOutputNodes : int
Number of nodes of output layer, max(Fe coordination number) + 1
"""
X = np.zeros((170, 1), float) # 170 is the number of data points in the spectrum
Y = []
if multi:
Y2 = np.zeros((1, 1), float)
# extract the data, stored in three separate directories
if not multi: # there is no charge data for these three POSCARs, so do not use if in multi-task learning
for i in ['0K', '15ps', '20ps']:
prefix = "./new_data/original/"
FeCoorNum = []
tmp = np.loadtxt(prefix + i + "_Combo_O_all.dat")
# energy range [-1 eV ~ 14 eV], 170 data points
X = np.concatenate((X, tmp[436:606, 1:]), 1)
for O_index in range(1, 50): # cycles through the 49 oxygen atoms present
res = find_neighbors('O' + str(O_index), cutoff_radius, prefix + "POSCAR_" + i)
FeCoorNum.append(len(res['Fe'])) # this adds the calculated Fe coordination number for this particular oxygen atom
Y = Y + FeCoorNum
for n in ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']:
prefix = "./new_data/300k/"
FeCoorNum = []
tmp = np.loadtxt(prefix + n + "/Combo_O_all.dat")
X = np.concatenate((X, tmp[438:608, 1:]), 1)
for O_index in range(1, 50):
res = find_neighbors('O' + str(O_index), cutoff_radius, prefix + n + "/CONTCAR")
FeCoorNum.append(len(res['Fe']))
Y = Y + FeCoorNum
if multi:
charges = np.loadtxt(prefix + n + "/charge.dat").reshape(1, 49)
Y2 = np.concatenate((Y2, charges), 1)
for n in ['01', '02', '03', '04', '05']:
prefix = "./new_data/1000K/"
FeCoorNum = []
tmp = np.loadtxt(prefix + n + "/Combo_O_all.dat")
X = np.concatenate((X, tmp[438:608, 1:]), 1)
for O_index in range(1, 50):
res = find_neighbors('O' + str(O_index), cutoff_radius, prefix + n + "/CONTCAR")
FeCoorNum.append(len(res['Fe']))
Y = Y + FeCoorNum
if multi:
charges = np.loadtxt(prefix + n + "/charge.dat").reshape(1, 49)
Y2 = np.concatenate((Y2, charges), 1)
X = np.delete(X, 0, 1) # remove the first column of zeros used to initialize X
if augment:
X, Y = data_augmentation(X, Y, num) # computes averaged spectra and labels
Y = np.array(Y).reshape(1, X.shape[1])
if multi:
Y2 = np.delete(Y2, 0, 1)
Y2 = np.array(Y2).reshape(1, X.shape[1])
if augment:
numOutputNodes = 1
else:
numOutputNodes = int(Y.max()) + 1
# shuffle the input data
m = X.shape[1]
np.random.seed(0) # used if consistency is desired
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation]
# generate training, development, and test sets (64:16:20 ratio)
divider1 = math.floor(m*16/25)
divider2 = math.floor(m*4/5)
# normalization: compute the mean and stdev for only the training data, and subtract from the whole data set
mu = np.mean(shuffled_X[:, :divider1], axis=1).reshape(170, 1)
std = np.std(shuffled_X[:, :divider1], axis=1).reshape(170, 1)
shuffled_X = (shuffled_X - mu) / std
if multi:
shuffled_Y2 = Y2[:, permutation]
return shuffled_X[:, :divider1], shuffled_X[:, divider1:divider2], shuffled_X[:, divider2:], shuffled_Y[:, :divider1], \
shuffled_Y[:, divider1:divider2], shuffled_Y[:, divider2:], shuffled_Y2[:, :divider1], shuffled_Y2[:, divider1:divider2], \
shuffled_Y2[:, divider2:], numOutputNodes
else:
return shuffled_X[:, :divider1], shuffled_X[:, divider1:divider2], shuffled_X[:, divider2:], shuffled_Y[:, :divider1], \
shuffled_Y[:, divider1:divider2], shuffled_Y[:, divider2:], numOutputNodes
def data_augmentation(X, Y, num):
""" Given data set X and its corresponding labels Y, augments the data by averaging 49 spectra
and labels together.
Arguments:
----------------------------
X, Y: the original spectra and corresponding labels
num: number of new spectra desired (bounded by the number of examples, choose 49)
Returns:
----------------------------
bigX, bigY: augmented versions of X and Y
"""
bigX = np.zeros((170, 1), float)
bigY = np.zeros((5, 1), float) # the labels can be 0, 1, 2, 3, or 4; this allows us to keep track of how many spectra of each individual label are used
Y = np.array(Y).reshape(1, len(Y))
Y_one_hot = np.zeros((5, Y.size)) # creates a one-hot vector with Y
Y_one_hot[Y.astype(int), np.arange(Y.size)] = 1
np.random.seed(0) # for consistency
for n in range(num): # repeat to get a 'num' number of samples
indices = np.random.choice(Y.size, 49, replace = False) # randomly selects 49 columns to use, without replacement
chooseX = X[:, indices]
chooseY = Y_one_hot[:, indices]
newX = np.sum(chooseX, axis=1).reshape((170, 1)) / 49 # averages 49 spectra
newY = np.sum(chooseY, axis=1).reshape((5, 1)) / 49 # averages 49 labels
bigX = np.concatenate((bigX, newX), 1)
bigY = np.concatenate((bigY, newY), 1)
# remove the first column of zeros that we used to initialize bigX and bigY
bigX = np.delete(bigX, 0, 1)
bigY = np.delete(bigY, 0, 1)
weights = np.array([0,1,2,3,4])
bigY = np.matmul(weights, bigY).reshape((1, num)) # finds the weighted average of all labels
return bigX, bigY
def real_averaged_spectra(num = 10000, cutoff_radius = 2.6):
""" Returns the average spectrum and coordination number from each POSCAR.
The 49 spectra are all taken from the same POSCAR file, rather than in data_augmentation, where a set of random 49 are taken. """
X = np.zeros((170, 1), float) # 170 is the number of data points in the spectrum
Y = []
X_avg = np.zeros((170, 1), float)
Y_avg = np.zeros((5, 1), float)
# extract the data, stored in three separate directories
for i in ['0K', '15ps', '20ps']:
prefix = "./new_data/original/"
FeCoorNum = []
tmp = np.loadtxt(prefix + i + "_Combo_O_all.dat")
# energy range [-1 eV ~ 14 eV], 170 data points
X = np.concatenate((X, tmp[436:606, 1:]), 1)
for O_index in range(1, 50): # cycles through the 49 oxygen atoms present
res = find_neighbors('O' + str(O_index), cutoff_radius, prefix + "POSCAR_" + i)
FeCoorNum.append(len(res['Fe'])) # this adds the calculated Fe coordination number for this particular oxygen atom
Y = Y + FeCoorNum
for n in ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']:
prefix = "./new_data/300k/"
FeCoorNum = []
tmp = np.loadtxt(prefix + n + "/Combo_O_all.dat")
X = np.concatenate((X, tmp[438:608, 1:]), 1)
for O_index in range(1, 50):
res = find_neighbors('O' + str(O_index), cutoff_radius, prefix + n + "/CONTCAR")
FeCoorNum.append(len(res['Fe']))
Y = Y + FeCoorNum
for n in ['01', '02', '03', '04', '05']:
prefix = "./new_data/1000K/"
FeCoorNum = []
tmp = np.loadtxt(prefix + n + "/Combo_O_all.dat")
X = np.concatenate((X, tmp[438:608, 1:]), 1)
for O_index in range(1, 50):
res = find_neighbors('O' + str(O_index), cutoff_radius, prefix + n + "/CONTCAR")
FeCoorNum.append(len(res['Fe']))
Y = Y + FeCoorNum
X_old = np.delete(X, 0, 1) # remove the first column of zeros used to initialize X
Y_new = np.array(Y).reshape(1, len(Y))
Y_one_hot = np.zeros((5, Y_new.size)) # creates a one-hot vector with Y
Y_one_hot[Y_new.astype(int), np.arange(Y_new.size)] = 1
for i in range(18): # 18, because there are 18 POSCAR/CONTCAR files
chooseX = X[:, i*49:(i+1)*49]
chooseY = Y_one_hot[:, i*49:(i+1)*49]
newX = np.sum(chooseX, axis=1).reshape((170, 1)) / 49 # averages 49 spectra together
newY = np.sum(chooseY, axis=1).reshape((5, 1)) / 49 # averages 49 labels together
X_avg = np.concatenate((X_avg, newX), 1)
Y_avg = np.concatenate((Y_avg, newY), 1)
# remove the first column of zeros that we used to initialize bigX and bigY
X_avg = np.delete(X_avg, 0, 1)
Y_avg = np.delete(Y_avg, 0, 1)
weights = np.array([0,1,2,3,4])
Y_avg = np.matmul(weights, Y_avg).reshape((1, 18)) # finds the weighted average of all labels
X, _ = data_augmentation(X_old, Y, num) # computes the averaged spectra used in training
divider1 = math.floor(X.shape[1]*16/25)
# preprocessing on the real spectra: compute the mean and stdev for only the training data
# then use these values on the real averaged spectra, because
mu = np.mean(X[:, :divider1], axis=1).reshape(170, 1)
std =
|
np.std(X[:, :divider1], axis=1)
|
numpy.std
|
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
from __future__ import print_function
import locale
from warnings import warn
import time
from scipy.optimize import curve_fit
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state, check_array
from sklearn.utils.validation import check_is_fitted
from sklearn.metrics import pairwise_distances
from sklearn.preprocessing import normalize
from sklearn.neighbors import KDTree
try:
import joblib
except ImportError:
# sklearn.externals.joblib is deprecated in 0.21, will be removed in 0.23
from sklearn.externals import joblib
import numpy as np
import scipy.sparse
from scipy.sparse import tril as sparse_tril, triu as sparse_triu
import scipy.sparse.csgraph
import numba
import umap.distances as dist
import umap.sparse as sparse
from umap.utils import (
submatrix,
ts,
csr_unique,
fast_knn_indices,
)
from umap.spectral import spectral_layout
from umap.layouts import (
optimize_layout_euclidean,
optimize_layout_generic,
optimize_layout_inverse,
)
from pynndescent import NNDescent
from pynndescent.distances import named_distances as pynn_named_distances
from pynndescent.sparse import sparse_named_distances as pynn_sparse_named_distances
locale.setlocale(locale.LC_NUMERIC, "C")
INT32_MIN = np.iinfo(np.int32).min + 1
INT32_MAX = np.iinfo(np.int32).max - 1
SMOOTH_K_TOLERANCE = 1e-5
MIN_K_DIST_SCALE = 1e-3
NPY_INFINITY = np.inf
DISCONNECTION_DISTANCES = {
"correlation": 1,
"cosine": 1,
"hellinger": 1,
"jaccard": 1,
"dice": 1,
}
def flatten_iter(container):
for i in container:
if isinstance(i, (list, tuple)):
for j in flatten_iter(i):
yield j
else:
yield i
def flattened(container):
return tuple(flatten_iter(container))
def breadth_first_search(adjmat, start, min_vertices):
explored = []
queue = [start]
levels = {}
levels[start] = 0
max_level = np.inf
visited = [start]
while queue:
node = queue.pop(0)
explored.append(node)
if max_level == np.inf and len(explored) > min_vertices:
max_level = max(levels.values())
if levels[node] + 1 < max_level:
neighbors = adjmat[node].indices
for neighbour in neighbors:
if neighbour not in visited:
queue.append(neighbour)
visited.append(neighbour)
levels[neighbour] = levels[node] + 1
return np.array(explored)
def raise_disconnected_warning(
edges_removed,
vertices_disconnected,
disconnection_distance,
total_rows,
threshold=0.1,
verbose=False,
):
"""A simple wrapper function to avoid large amounts of code repetition."""
if verbose & (vertices_disconnected == 0) & (edges_removed > 0):
print(
f"Disconnection_distance = {disconnection_distance} has removed {edges_removed} edges. "
f"This is not a problem as no vertices were disconnected."
)
elif (vertices_disconnected > 0) & (
vertices_disconnected <= threshold * total_rows
):
warn(
f"A few of your vertices were disconnected from the manifold. This shouldn't cause problems.\n"
f"Disconnection_distance = {disconnection_distance} has removed {edges_removed} edges.\n"
f"It has only fully disconnected {vertices_disconnected} vertices.\n"
f"Use umap.utils.disconnected_vertices() to identify them.",
)
elif vertices_disconnected > threshold * total_rows:
warn(
f"A large number of your vertices were disconnected from the manifold.\n"
f"Disconnection_distance = {disconnection_distance} has removed {edges_removed} edges.\n"
f"It has fully disconnected {vertices_disconnected} vertices.\n"
f"You might consider using find_disconnected_points() to find and remove these points from your data.\n"
f"Use umap.utils.disconnected_vertices() to identify them.",
)
@numba.njit(
locals={
"psum": numba.types.float32,
"lo": numba.types.float32,
"mid": numba.types.float32,
"hi": numba.types.float32,
},
fastmath=True,
) # benchmarking `parallel=True` shows it to *decrease* performance
def smooth_knn_dist(distances, k, n_iter=64, local_connectivity=1.0, bandwidth=1.0):
"""Compute a continuous version of the distance to the kth nearest
neighbor. That is, this is similar to knn-distance but allows continuous
k values rather than requiring an integral k. In essence we are simply
computing the distance such that the cardinality of fuzzy set we generate
is k.
Parameters
----------
distances: array of shape (n_samples, n_neighbors)
Distances to nearest neighbors for each samples. Each row should be a
sorted list of distances to a given samples nearest neighbors.
k: float
The number of nearest neighbors to approximate for.
n_iter: int (optional, default 64)
We need to binary search for the correct distance value. This is the
max number of iterations to use in such a search.
local_connectivity: int (optional, default 1)
The local connectivity required -- i.e. the number of nearest
neighbors that should be assumed to be connected at a local level.
The higher this value the more connected the manifold becomes
locally. In practice this should be not more than the local intrinsic
dimension of the manifold.
bandwidth: float (optional, default 1)
The target bandwidth of the kernel, larger values will produce
larger return values.
Returns
-------
knn_dist: array of shape (n_samples,)
The distance to kth nearest neighbor, as suitably approximated.
nn_dist: array of shape (n_samples,)
The distance to the 1st nearest neighbor for each point.
"""
target = np.log2(k) * bandwidth
rho = np.zeros(distances.shape[0], dtype=np.float32)
result = np.zeros(distances.shape[0], dtype=np.float32)
mean_distances = np.mean(distances)
for i in range(distances.shape[0]):
lo = 0.0
hi = NPY_INFINITY
mid = 1.0
# TODO: This is very inefficient, but will do for now. FIXME
ith_distances = distances[i]
non_zero_dists = ith_distances[ith_distances > 0.0]
if non_zero_dists.shape[0] >= local_connectivity:
index = int(np.floor(local_connectivity))
interpolation = local_connectivity - index
if index > 0:
rho[i] = non_zero_dists[index - 1]
if interpolation > SMOOTH_K_TOLERANCE:
rho[i] += interpolation * (
non_zero_dists[index] - non_zero_dists[index - 1]
)
else:
rho[i] = interpolation * non_zero_dists[0]
elif non_zero_dists.shape[0] > 0:
rho[i] = np.max(non_zero_dists)
for n in range(n_iter):
psum = 0.0
for j in range(1, distances.shape[1]):
d = distances[i, j] - rho[i]
if d > 0:
psum += np.exp(-(d / mid))
else:
psum += 1.0
if np.fabs(psum - target) < SMOOTH_K_TOLERANCE:
break
if psum > target:
hi = mid
mid = (lo + hi) / 2.0
else:
lo = mid
if hi == NPY_INFINITY:
mid *= 2
else:
mid = (lo + hi) / 2.0
result[i] = mid
# TODO: This is very inefficient, but will do for now. FIXME
if rho[i] > 0.0:
mean_ith_distances = np.mean(ith_distances)
if result[i] < MIN_K_DIST_SCALE * mean_ith_distances:
result[i] = MIN_K_DIST_SCALE * mean_ith_distances
else:
if result[i] < MIN_K_DIST_SCALE * mean_distances:
result[i] = MIN_K_DIST_SCALE * mean_distances
return result, rho
def nearest_neighbors(
X,
n_neighbors,
metric,
metric_kwds,
angular,
random_state,
low_memory=True,
use_pynndescent=True,
n_jobs=-1,
verbose=False,
):
"""Compute the ``n_neighbors`` nearest points for each data point in ``X``
under ``metric``. This may be exact, but more likely is approximated via
nearest neighbor descent.
Parameters
----------
X: array of shape (n_samples, n_features)
The input data to compute the k-neighbor graph of.
n_neighbors: int
The number of nearest neighbors to compute for each sample in ``X``.
metric: string or callable
The metric to use for the computation.
metric_kwds: dict
Any arguments to pass to the metric computation function.
angular: bool
Whether to use angular rp trees in NN approximation.
random_state: np.random state
The random state to use for approximate NN computations.
low_memory: bool (optional, default False)
Whether to pursue lower memory NNdescent.
verbose: bool (optional, default False)
Whether to print status data during the computation.
Returns
-------
knn_indices: array of shape (n_samples, n_neighbors)
The indices on the ``n_neighbors`` closest points in the dataset.
knn_dists: array of shape (n_samples, n_neighbors)
The distances to the ``n_neighbors`` closest points in the dataset.
rp_forest: list of trees
The random projection forest used for searching (if used, None otherwise)
"""
if verbose:
print(ts(), "Finding Nearest Neighbors")
if metric == "precomputed":
# Note that this does not support sparse distance matrices yet ...
# Compute indices of n nearest neighbors
knn_indices = fast_knn_indices(X, n_neighbors)
# knn_indices = np.argsort(X)[:, :n_neighbors]
# Compute the nearest neighbor distances
# (equivalent to np.sort(X)[:,:n_neighbors])
knn_dists = X[np.arange(X.shape[0])[:, None], knn_indices].copy()
# Prune any nearest neighbours that are infinite distance apart.
disconnected_index = knn_dists == np.inf
knn_indices[disconnected_index] = -1
knn_search_index = None
else:
# TODO: Hacked values for now
n_trees = min(64, 5 + int(round((X.shape[0]) ** 0.5 / 20.0)))
n_iters = max(5, int(round(np.log2(X.shape[0]))))
knn_search_index = NNDescent(
X,
n_neighbors=n_neighbors,
metric=metric,
metric_kwds=metric_kwds,
random_state=random_state,
n_trees=n_trees,
n_iters=n_iters,
max_candidates=60,
low_memory=low_memory,
n_jobs=n_jobs,
verbose=verbose,
)
knn_indices, knn_dists = knn_search_index.neighbor_graph
if verbose:
print(ts(), "Finished Nearest Neighbor Search")
return knn_indices, knn_dists, knn_search_index
@numba.njit(
locals={
"knn_dists": numba.types.float32[:, ::1],
"sigmas": numba.types.float32[::1],
"rhos": numba.types.float32[::1],
"val": numba.types.float32,
},
parallel=True,
fastmath=True,
)
def compute_membership_strengths(
knn_indices, knn_dists, sigmas, rhos, return_dists=False, bipartite=False,
):
"""Construct the membership strength data for the 1-skeleton of each local
fuzzy simplicial set -- this is formed as a sparse matrix where each row is
a local fuzzy simplicial set, with a membership strength for the
1-simplex to each other data point.
Parameters
----------
knn_indices: array of shape (n_samples, n_neighbors)
The indices on the ``n_neighbors`` closest points in the dataset.
knn_dists: array of shape (n_samples, n_neighbors)
The distances to the ``n_neighbors`` closest points in the dataset.
sigmas: array of shape(n_samples)
The normalization factor derived from the metric tensor approximation.
rhos: array of shape(n_samples)
The local connectivity adjustment.
return_dists: bool (optional, default False)
Whether to return the pairwise distance associated with each edge
bipartite: bool (optional, default False)
Does the nearest neighbour set represent a bipartite graph? That is are the
nearest neighbour indices from the same point set as the row indices?
Returns
-------
rows: array of shape (n_samples * n_neighbors)
Row data for the resulting sparse matrix (coo format)
cols: array of shape (n_samples * n_neighbors)
Column data for the resulting sparse matrix (coo format)
vals: array of shape (n_samples * n_neighbors)
Entries for the resulting sparse matrix (coo format)
dists: array of shape (n_samples * n_neighbors)
Distance associated with each entry in the resulting sparse matrix
"""
n_samples = knn_indices.shape[0]
n_neighbors = knn_indices.shape[1]
rows = np.zeros(knn_indices.size, dtype=np.int32)
cols = np.zeros(knn_indices.size, dtype=np.int32)
vals = np.zeros(knn_indices.size, dtype=np.float32)
if return_dists:
dists = np.zeros(knn_indices.size, dtype=np.float32)
else:
dists = None
for i in range(n_samples):
for j in range(n_neighbors):
if knn_indices[i, j] == -1:
continue # We didn't get the full knn for i
# If applied to an adjacency matrix points shouldn't be similar to themselves.
# If applied to an incidence matrix (or bipartite) then the row and column indices are different.
if (bipartite == False) & (knn_indices[i, j] == i):
val = 0.0
elif knn_dists[i, j] - rhos[i] <= 0.0 or sigmas[i] == 0.0:
val = 1.0
else:
val = np.exp(-((knn_dists[i, j] - rhos[i]) / (sigmas[i])))
rows[i * n_neighbors + j] = i
cols[i * n_neighbors + j] = knn_indices[i, j]
vals[i * n_neighbors + j] = val
if return_dists:
dists[i * n_neighbors + j] = knn_dists[i, j]
return rows, cols, vals, dists
def fuzzy_simplicial_set(
X,
n_neighbors,
random_state,
metric,
metric_kwds={},
knn_indices=None,
knn_dists=None,
angular=False,
set_op_mix_ratio=1.0,
local_connectivity=1.0,
apply_set_operations=True,
verbose=False,
return_dists=None,
):
"""Given a set of data X, a neighborhood size, and a measure of distance
compute the fuzzy simplicial set (here represented as a fuzzy graph in
the form of a sparse matrix) associated to the data. This is done by
locally approximating geodesic distance at each point, creating a fuzzy
simplicial set for each such point, and then combining all the local
fuzzy simplicial sets into a global one via a fuzzy union.
Parameters
----------
X: array of shape (n_samples, n_features)
The data to be modelled as a fuzzy simplicial set.
n_neighbors: int
The number of neighbors to use to approximate geodesic distance.
Larger numbers induce more global estimates of the manifold that can
miss finer detail, while smaller values will focus on fine manifold
structure to the detriment of the larger picture.
random_state: numpy RandomState or equivalent
A state capable being used as a numpy random state.
metric: string or function (optional, default 'euclidean')
The metric to use to compute distances in high dimensional space.
If a string is passed it must match a valid predefined metric. If
a general metric is required a function that takes two 1d arrays and
returns a float can be provided. For performance purposes it is
required that this be a numba jit'd function. Valid string metrics
include:
* euclidean (or l2)
* manhattan (or l1)
* cityblock
* braycurtis
* canberra
* chebyshev
* correlation
* cosine
* dice
* hamming
* jaccard
* kulsinski
* ll_dirichlet
* mahalanobis
* matching
* minkowski
* rogerstanimoto
* russellrao
* seuclidean
* sokalmichener
* sokalsneath
* sqeuclidean
* yule
* wminkowski
Metrics that take arguments (such as minkowski, mahalanobis etc.)
can have arguments passed via the metric_kwds dictionary. At this
time care must be taken and dictionary elements must be ordered
appropriately; this will hopefully be fixed in the future.
metric_kwds: dict (optional, default {})
Arguments to pass on to the metric, such as the ``p`` value for
Minkowski distance.
knn_indices: array of shape (n_samples, n_neighbors) (optional)
If the k-nearest neighbors of each point has already been calculated
you can pass them in here to save computation time. This should be
an array with the indices of the k-nearest neighbors as a row for
each data point.
knn_dists: array of shape (n_samples, n_neighbors) (optional)
If the k-nearest neighbors of each point has already been calculated
you can pass them in here to save computation time. This should be
an array with the distances of the k-nearest neighbors as a row for
each data point.
angular: bool (optional, default False)
Whether to use angular/cosine distance for the random projection
forest for seeding NN-descent to determine approximate nearest
neighbors.
set_op_mix_ratio: float (optional, default 1.0)
Interpolate between (fuzzy) union and intersection as the set operation
used to combine local fuzzy simplicial sets to obtain a global fuzzy
simplicial sets. Both fuzzy set operations use the product t-norm.
The value of this parameter should be between 0.0 and 1.0; a value of
1.0 will use a pure fuzzy union, while 0.0 will use a pure fuzzy
intersection.
local_connectivity: int (optional, default 1)
The local connectivity required -- i.e. the number of nearest
neighbors that should be assumed to be connected at a local level.
The higher this value the more connected the manifold becomes
locally. In practice this should be not more than the local intrinsic
dimension of the manifold.
verbose: bool (optional, default False)
Whether to report information on the current progress of the algorithm.
return_dists: bool or None (optional, default None)
Whether to return the pairwise distance associated with each edge.
Returns
-------
fuzzy_simplicial_set: coo_matrix
A fuzzy simplicial set represented as a sparse matrix. The (i,
j) entry of the matrix represents the membership strength of the
1-simplex between the ith and jth sample points.
"""
if knn_indices is None or knn_dists is None:
knn_indices, knn_dists, _ = nearest_neighbors(
X, n_neighbors, metric, metric_kwds, angular, random_state, verbose=verbose,
)
knn_dists = knn_dists.astype(np.float32)
sigmas, rhos = smooth_knn_dist(
knn_dists, float(n_neighbors), local_connectivity=float(local_connectivity),
)
rows, cols, vals, dists = compute_membership_strengths(
knn_indices, knn_dists, sigmas, rhos, return_dists
)
result = scipy.sparse.coo_matrix(
(vals, (rows, cols)), shape=(X.shape[0], X.shape[0])
)
result.eliminate_zeros()
if apply_set_operations:
transpose = result.transpose()
prod_matrix = result.multiply(transpose)
result = (
set_op_mix_ratio * (result + transpose - prod_matrix)
+ (1.0 - set_op_mix_ratio) * prod_matrix
)
result.eliminate_zeros()
if return_dists is None:
return result, sigmas, rhos
else:
if return_dists:
dmat = scipy.sparse.coo_matrix(
(dists, (rows, cols)), shape=(X.shape[0], X.shape[0])
)
dists = dmat.maximum(dmat.transpose()).todok()
else:
dists = None
return result, sigmas, rhos, dists
@numba.njit()
def fast_intersection(rows, cols, values, target, unknown_dist=1.0, far_dist=5.0):
"""Under the assumption of categorical distance for the intersecting
simplicial set perform a fast intersection.
Parameters
----------
rows: array
An array of the row of each non-zero in the sparse matrix
representation.
cols: array
An array of the column of each non-zero in the sparse matrix
representation.
values: array
An array of the value of each non-zero in the sparse matrix
representation.
target: array of shape (n_samples)
The categorical labels to use in the intersection.
unknown_dist: float (optional, default 1.0)
The distance an unknown label (-1) is assumed to be from any point.
far_dist float (optional, default 5.0)
The distance between unmatched labels.
Returns
-------
None
"""
for nz in range(rows.shape[0]):
i = rows[nz]
j = cols[nz]
if (target[i] == -1) or (target[j] == -1):
values[nz] *= np.exp(-unknown_dist)
elif target[i] != target[j]:
values[nz] *= np.exp(-far_dist)
return
@numba.jit()
def fast_metric_intersection(
rows, cols, values, discrete_space, metric, metric_args, scale
):
"""Under the assumption of categorical distance for the intersecting
simplicial set perform a fast intersection.
Parameters
----------
rows: array
An array of the row of each non-zero in the sparse matrix
representation.
cols: array
An array of the column of each non-zero in the sparse matrix
representation.
values: array of shape
An array of the values of each non-zero in the sparse matrix
representation.
discrete_space: array of shape (n_samples, n_features)
The vectors of categorical labels to use in the intersection.
metric: numba function
The function used to calculate distance over the target array.
scale: float
A scaling to apply to the metric.
Returns
-------
None
"""
for nz in range(rows.shape[0]):
i = rows[nz]
j = cols[nz]
dist = metric(discrete_space[i], discrete_space[j], *metric_args)
values[nz] *= np.exp(-(scale * dist))
return
@numba.njit()
def reprocess_row(probabilities, k=15, n_iters=32):
target = np.log2(k)
lo = 0.0
hi = NPY_INFINITY
mid = 1.0
for n in range(n_iters):
psum = 0.0
for j in range(probabilities.shape[0]):
psum += pow(probabilities[j], mid)
if np.fabs(psum - target) < SMOOTH_K_TOLERANCE:
break
if psum < target:
hi = mid
mid = (lo + hi) / 2.0
else:
lo = mid
if hi == NPY_INFINITY:
mid *= 2
else:
mid = (lo + hi) / 2.0
return np.power(probabilities, mid)
@numba.njit()
def reset_local_metrics(simplicial_set_indptr, simplicial_set_data):
for i in range(simplicial_set_indptr.shape[0] - 1):
simplicial_set_data[
simplicial_set_indptr[i] : simplicial_set_indptr[i + 1]
] = reprocess_row(
simplicial_set_data[simplicial_set_indptr[i] : simplicial_set_indptr[i + 1]]
)
return
def reset_local_connectivity(simplicial_set, reset_local_metric=False):
"""Reset the local connectivity requirement -- each data sample should
have complete confidence in at least one 1-simplex in the simplicial set.
We can enforce this by locally rescaling confidences, and then remerging the
different local simplicial sets together.
Parameters
----------
simplicial_set: sparse matrix
The simplicial set for which to recalculate with respect to local
connectivity.
Returns
-------
simplicial_set: sparse_matrix
The recalculated simplicial set, now with the local connectivity
assumption restored.
"""
simplicial_set = normalize(simplicial_set, norm="max")
if reset_local_metric:
simplicial_set = simplicial_set.tocsr()
reset_local_metrics(simplicial_set.indptr, simplicial_set.data)
simplicial_set = simplicial_set.tocoo()
transpose = simplicial_set.transpose()
prod_matrix = simplicial_set.multiply(transpose)
simplicial_set = simplicial_set + transpose - prod_matrix
simplicial_set.eliminate_zeros()
return simplicial_set
def discrete_metric_simplicial_set_intersection(
simplicial_set,
discrete_space,
unknown_dist=1.0,
far_dist=5.0,
metric=None,
metric_kws={},
metric_scale=1.0,
):
"""Combine a fuzzy simplicial set with another fuzzy simplicial set
generated from discrete metric data using discrete distances. The target
data is assumed to be categorical label data (a vector of labels),
and this will update the fuzzy simplicial set to respect that label data.
TODO: optional category cardinality based weighting of distance
Parameters
----------
simplicial_set: sparse matrix
The input fuzzy simplicial set.
discrete_space: array of shape (n_samples)
The categorical labels to use in the intersection.
unknown_dist: float (optional, default 1.0)
The distance an unknown label (-1) is assumed to be from any point.
far_dist: float (optional, default 5.0)
The distance between unmatched labels.
metric: str (optional, default None)
If not None, then use this metric to determine the
distance between values.
metric_scale: float (optional, default 1.0)
If using a custom metric scale the distance values by
this value -- this controls the weighting of the
intersection. Larger values weight more toward target.
Returns
-------
simplicial_set: sparse matrix
The resulting intersected fuzzy simplicial set.
"""
simplicial_set = simplicial_set.tocoo()
if metric is not None:
# We presume target is now a 2d array, with each row being a
# vector of target info
if metric in dist.named_distances:
metric_func = dist.named_distances[metric]
else:
raise ValueError("Discrete intersection metric is not recognized")
fast_metric_intersection(
simplicial_set.row,
simplicial_set.col,
simplicial_set.data,
discrete_space,
metric_func,
tuple(metric_kws.values()),
metric_scale,
)
else:
fast_intersection(
simplicial_set.row,
simplicial_set.col,
simplicial_set.data,
discrete_space,
unknown_dist,
far_dist,
)
simplicial_set.eliminate_zeros()
return reset_local_connectivity(simplicial_set)
def general_simplicial_set_intersection(
simplicial_set1, simplicial_set2, weight=0.5, right_complement=False
):
if right_complement:
result = simplicial_set1.tocoo()
else:
result = (simplicial_set1 + simplicial_set2).tocoo()
left = simplicial_set1.tocsr()
right = simplicial_set2.tocsr()
sparse.general_sset_intersection(
left.indptr,
left.indices,
left.data,
right.indptr,
right.indices,
right.data,
result.row,
result.col,
result.data,
mix_weight=weight,
right_complement=right_complement,
)
return result
def general_simplicial_set_union(simplicial_set1, simplicial_set2):
result = (simplicial_set1 + simplicial_set2).tocoo()
left = simplicial_set1.tocsr()
right = simplicial_set2.tocsr()
sparse.general_sset_union(
left.indptr,
left.indices,
left.data,
right.indptr,
right.indices,
right.data,
result.row,
result.col,
result.data,
)
return result
def make_epochs_per_sample(weights, n_epochs):
"""Given a set of weights and number of epochs generate the number of
epochs per sample for each weight.
Parameters
----------
weights: array of shape (n_1_simplices)
The weights ofhow much we wish to sample each 1-simplex.
n_epochs: int
The total number of epochs we want to train for.
Returns
-------
An array of number of epochs per sample, one for each 1-simplex.
"""
result = -1.0 * np.ones(weights.shape[0], dtype=np.float64)
n_samples = n_epochs * (weights / weights.max())
result[n_samples > 0] = float(n_epochs) / n_samples[n_samples > 0]
return result
def simplicial_set_embedding(
data,
graph,
n_components,
initial_alpha,
a,
b,
gamma,
negative_sample_rate,
n_epochs,
init,
random_state,
metric,
metric_kwds,
densmap,
densmap_kwds,
output_dens,
output_metric=dist.named_distances_with_gradients["euclidean"],
output_metric_kwds={},
euclidean_output=True,
parallel=False,
verbose=False,
):
"""Perform a fuzzy simplicial set embedding, using a specified
initialisation method and then minimizing the fuzzy set cross entropy
between the 1-skeletons of the high and low dimensional fuzzy simplicial
sets.
Parameters
----------
data: array of shape (n_samples, n_features)
The source data to be embedded by UMAP.
graph: sparse matrix
The 1-skeleton of the high dimensional fuzzy simplicial set as
represented by a graph for which we require a sparse matrix for the
(weighted) adjacency matrix.
n_components: int
The dimensionality of the euclidean space into which to embed the data.
initial_alpha: float
Initial learning rate for the SGD.
a: float
Parameter of differentiable approximation of right adjoint functor
b: float
Parameter of differentiable approximation of right adjoint functor
gamma: float
Weight to apply to negative samples.
negative_sample_rate: int (optional, default 5)
The number of negative samples to select per positive sample
in the optimization process. Increasing this value will result
in greater repulsive force being applied, greater optimization
cost, but slightly more accuracy.
n_epochs: int (optional, default 0)
The number of training epochs to be used in optimizing the
low dimensional embedding. Larger values result in more accurate
embeddings. If 0 is specified a value will be selected based on
the size of the input dataset (200 for large datasets, 500 for small).
init: string
How to initialize the low dimensional embedding. Options are:
* 'spectral': use a spectral embedding of the fuzzy 1-skeleton
* 'random': assign initial embedding positions at random.
* A numpy array of initial embedding positions.
random_state: numpy RandomState or equivalent
A state capable being used as a numpy random state.
metric: string or callable
The metric used to measure distance in high dimensional space; used if
multiple connected components need to be layed out.
metric_kwds: dict
Key word arguments to be passed to the metric function; used if
multiple connected components need to be layed out.
densmap: bool
Whether to use the density-augmented objective function to optimize
the embedding according to the densMAP algorithm.
densmap_kwds: dict
Key word arguments to be used by the densMAP optimization.
output_dens: bool
Whether to output local radii in the original data and the embedding.
output_metric: function
Function returning the distance between two points in embedding space and
the gradient of the distance wrt the first argument.
output_metric_kwds: dict
Key word arguments to be passed to the output_metric function.
euclidean_output: bool
Whether to use the faster code specialised for euclidean output metrics
parallel: bool (optional, default False)
Whether to run the computation using numba parallel.
Running in parallel is non-deterministic, and is not used
if a random seed has been set, to ensure reproducibility.
verbose: bool (optional, default False)
Whether to report information on the current progress of the algorithm.
Returns
-------
embedding: array of shape (n_samples, n_components)
The optimized of ``graph`` into an ``n_components`` dimensional
euclidean space.
aux_data: dict
Auxiliary output returned with the embedding. When densMAP extension
is turned on, this dictionary includes local radii in the original
data (``rad_orig``) and in the embedding (``rad_emb``).
"""
graph = graph.tocoo()
graph.sum_duplicates()
n_vertices = graph.shape[1]
if n_epochs <= 0:
# For smaller datasets we can use more epochs
if graph.shape[0] <= 10000:
n_epochs = 500
else:
n_epochs = 200
# Use more epochs for densMAP
if densmap:
n_epochs += 200
graph.data[graph.data < (graph.data.max() / float(n_epochs))] = 0.0
graph.eliminate_zeros()
if isinstance(init, str) and init == "random":
embedding = random_state.uniform(
low=-10.0, high=10.0, size=(graph.shape[0], n_components)
).astype(np.float32)
elif isinstance(init, str) and init == "spectral":
# We add a little noise to avoid local minima for optimization to come
initialisation = spectral_layout(
data,
graph,
n_components,
random_state,
metric=metric,
metric_kwds=metric_kwds,
)
expansion = 10.0 / np.abs(initialisation).max()
embedding = (initialisation * expansion).astype(
np.float32
) + random_state.normal(
scale=0.0001, size=[graph.shape[0], n_components]
).astype(
np.float32
)
else:
init_data = np.array(init)
if len(init_data.shape) == 2:
if np.unique(init_data, axis=0).shape[0] < init_data.shape[0]:
tree = KDTree(init_data)
dist, ind = tree.query(init_data, k=2)
nndist = np.mean(dist[:, 1])
embedding = init_data + random_state.normal(
scale=0.001 * nndist, size=init_data.shape
).astype(np.float32)
else:
embedding = init_data
epochs_per_sample = make_epochs_per_sample(graph.data, n_epochs)
head = graph.row
tail = graph.col
weight = graph.data
rng_state = random_state.randint(INT32_MIN, INT32_MAX, 3).astype(np.int64)
aux_data = {}
if densmap or output_dens:
if verbose:
print(ts() + " Computing original densities")
dists = densmap_kwds["graph_dists"]
mu_sum = np.zeros(n_vertices, dtype=np.float32)
ro = np.zeros(n_vertices, dtype=np.float32)
for i in range(len(head)):
j = head[i]
k = tail[i]
D = dists[j, k] * dists[j, k] # match sq-Euclidean used for embedding
mu = graph.data[i]
ro[j] += mu * D
ro[k] += mu * D
mu_sum[j] += mu
mu_sum[k] += mu
epsilon = 1e-8
ro = np.log(epsilon + (ro / mu_sum))
if densmap:
R = (ro - np.mean(ro)) / np.std(ro)
densmap_kwds["mu"] = graph.data
densmap_kwds["mu_sum"] = mu_sum
densmap_kwds["R"] = R
if output_dens:
aux_data["rad_orig"] = ro
embedding = (
10.0
* (embedding - np.min(embedding, 0))
/ (np.max(embedding, 0) - np.min(embedding, 0))
).astype(np.float32, order="C")
if euclidean_output:
embedding = optimize_layout_euclidean(
embedding,
embedding,
head,
tail,
n_epochs,
n_vertices,
epochs_per_sample,
a,
b,
rng_state,
gamma,
initial_alpha,
negative_sample_rate,
parallel=parallel,
verbose=verbose,
densmap=densmap,
densmap_kwds=densmap_kwds,
)
else:
embedding = optimize_layout_generic(
embedding,
embedding,
head,
tail,
n_epochs,
n_vertices,
epochs_per_sample,
a,
b,
rng_state,
gamma,
initial_alpha,
negative_sample_rate,
output_metric,
tuple(output_metric_kwds.values()),
verbose=verbose,
)
if output_dens:
if verbose:
print(ts() + " Computing embedding densities")
# Compute graph in embedding
(knn_indices, knn_dists, rp_forest,) = nearest_neighbors(
embedding,
densmap_kwds["n_neighbors"],
"euclidean",
{},
False,
random_state,
verbose=verbose,
)
emb_graph, emb_sigmas, emb_rhos, emb_dists = fuzzy_simplicial_set(
embedding,
densmap_kwds["n_neighbors"],
random_state,
"euclidean",
{},
knn_indices,
knn_dists,
verbose=verbose,
return_dists=True,
)
emb_graph = emb_graph.tocoo()
emb_graph.sum_duplicates()
emb_graph.eliminate_zeros()
n_vertices = emb_graph.shape[1]
mu_sum = np.zeros(n_vertices, dtype=np.float32)
re = np.zeros(n_vertices, dtype=np.float32)
head = emb_graph.row
tail = emb_graph.col
for i in range(len(head)):
j = head[i]
k = tail[i]
D = emb_dists[j, k]
mu = emb_graph.data[i]
re[j] += mu * D
re[k] += mu * D
mu_sum[j] += mu
mu_sum[k] += mu
epsilon = 1e-8
re = np.log(epsilon + (re / mu_sum))
aux_data["rad_emb"] = re
return embedding, aux_data
@numba.njit()
def init_transform(indices, weights, embedding):
"""Given indices and weights and an original embeddings
initialize the positions of new points relative to the
indices and weights (of their neighbors in the source data).
Parameters
----------
indices: array of shape (n_new_samples, n_neighbors)
The indices of the neighbors of each new sample
weights: array of shape (n_new_samples, n_neighbors)
The membership strengths of associated 1-simplices
for each of the new samples.
embedding: array of shape (n_samples, dim)
The original embedding of the source data.
Returns
-------
new_embedding: array of shape (n_new_samples, dim)
An initial embedding of the new sample points.
"""
result = np.zeros((indices.shape[0], embedding.shape[1]), dtype=np.float32)
for i in range(indices.shape[0]):
for j in range(indices.shape[1]):
for d in range(embedding.shape[1]):
result[i, d] += weights[i, j] * embedding[indices[i, j], d]
return result
def init_graph_transform(graph, embedding):
"""Given a bipartite graph representing the 1-simplices and strengths between the
new points and the original data set along with an embedding of the original points
initialize the positions of new points relative to the strengths (of their neighbors in the source data).
If a point is in our original data set it embeds at the original points coordinates.
If a point has no neighbours in our original dataset it embeds as the np.nan vector.
Otherwise a point is the weighted average of it's neighbours embedding locations.
Parameters
----------
graph: csr_matrix (n_new_samples, n_samples)
A matrix indicating the the 1-simplices and their associated strengths. These strengths should
be values between zero and one and not normalized. One indicating that the new point was identical
to one of our original points.
embedding: array of shape (n_samples, dim)
The original embedding of the source data.
Returns
-------
new_embedding: array of shape (n_new_samples, dim)
An initial embedding of the new sample points.
"""
result = np.zeros((graph.shape[0], embedding.shape[1]), dtype=np.float32)
for row_index in range(graph.shape[0]):
num_neighbours = len(graph[row_index].indices)
if num_neighbours == 0:
result[row_index] = np.nan
continue
for col_index in graph[row_index].indices:
if graph[row_index, col_index] == 1:
result[row_index, :] = embedding[col_index, :]
break
for d in range(embedding.shape[1]):
result[row_index, d] += (
graph[row_index, col_index]
/ num_neighbours
* embedding[col_index, d]
)
return result
@numba.njit()
def init_update(current_init, n_original_samples, indices):
for i in range(n_original_samples, indices.shape[0]):
n = 0
for j in range(indices.shape[1]):
for d in range(current_init.shape[1]):
if indices[i, j] < n_original_samples:
n += 1
current_init[i, d] += current_init[indices[i, j], d]
for d in range(current_init.shape[1]):
current_init[i, d] /= n
return
def find_ab_params(spread, min_dist):
"""Fit a, b params for the differentiable curve used in lower
dimensional fuzzy simplicial complex construction. We want the
smooth curve (from a pre-defined family with simple gradient) that
best matches an offset exponential decay.
"""
def curve(x, a, b):
return 1.0 / (1.0 + a * x ** (2 * b))
xv = np.linspace(0, spread * 3, 300)
yv = np.zeros(xv.shape)
yv[xv < min_dist] = 1.0
yv[xv >= min_dist] = np.exp(-(xv[xv >= min_dist] - min_dist) / spread)
params, covar = curve_fit(curve, xv, yv)
return params[0], params[1]
class UMAP(BaseEstimator):
"""Uniform Manifold Approximation and Projection
Finds a low dimensional embedding of the data that approximates
an underlying manifold.
Parameters
----------
n_neighbors: float (optional, default 15)
The size of local neighborhood (in terms of number of neighboring
sample points) used for manifold approximation. Larger values
result in more global views of the manifold, while smaller
values result in more local data being preserved. In general
values should be in the range 2 to 100.
n_components: int (optional, default 2)
The dimension of the space to embed into. This defaults to 2 to
provide easy visualization, but can reasonably be set to any
integer value in the range 2 to 100.
metric: string or function (optional, default 'euclidean')
The metric to use to compute distances in high dimensional space.
If a string is passed it must match a valid predefined metric. If
a general metric is required a function that takes two 1d arrays and
returns a float can be provided. For performance purposes it is
required that this be a numba jit'd function. Valid string metrics
include:
* euclidean
* manhattan
* chebyshev
* minkowski
* canberra
* braycurtis
* mahalanobis
* wminkowski
* seuclidean
* cosine
* correlation
* haversine
* hamming
* jaccard
* dice
* russelrao
* kulsinski
* ll_dirichlet
* hellinger
* rogerstanimoto
* sokalmichener
* sokalsneath
* yule
Metrics that take arguments (such as minkowski, mahalanobis etc.)
can have arguments passed via the metric_kwds dictionary. At this
time care must be taken and dictionary elements must be ordered
appropriately; this will hopefully be fixed in the future.
n_epochs: int (optional, default None)
The number of training epochs to be used in optimizing the
low dimensional embedding. Larger values result in more accurate
embeddings. If None is specified a value will be selected based on
the size of the input dataset (200 for large datasets, 500 for small).
learning_rate: float (optional, default 1.0)
The initial learning rate for the embedding optimization.
init: string (optional, default 'spectral')
How to initialize the low dimensional embedding. Options are:
* 'spectral': use a spectral embedding of the fuzzy 1-skeleton
* 'random': assign initial embedding positions at random.
* A numpy array of initial embedding positions.
min_dist: float (optional, default 0.1)
The effective minimum distance between embedded points. Smaller values
will result in a more clustered/clumped embedding where nearby points
on the manifold are drawn closer together, while larger values will
result on a more even dispersal of points. The value should be set
relative to the ``spread`` value, which determines the scale at which
embedded points will be spread out.
spread: float (optional, default 1.0)
The effective scale of embedded points. In combination with ``min_dist``
this determines how clustered/clumped the embedded points are.
low_memory: bool (optional, default False)
For some datasets the nearest neighbor computation can consume a lot of
memory. If you find that UMAP is failing due to memory constraints
consider setting this option to True. This approach is more
computationally expensive, but avoids excessive memory use.
set_op_mix_ratio: float (optional, default 1.0)
Interpolate between (fuzzy) union and intersection as the set operation
used to combine local fuzzy simplicial sets to obtain a global fuzzy
simplicial sets. Both fuzzy set operations use the product t-norm.
The value of this parameter should be between 0.0 and 1.0; a value of
1.0 will use a pure fuzzy union, while 0.0 will use a pure fuzzy
intersection.
local_connectivity: int (optional, default 1)
The local connectivity required -- i.e. the number of nearest
neighbors that should be assumed to be connected at a local level.
The higher this value the more connected the manifold becomes
locally. In practice this should be not more than the local intrinsic
dimension of the manifold.
repulsion_strength: float (optional, default 1.0)
Weighting applied to negative samples in low dimensional embedding
optimization. Values higher than one will result in greater weight
being given to negative samples.
negative_sample_rate: int (optional, default 5)
The number of negative samples to select per positive sample
in the optimization process. Increasing this value will result
in greater repulsive force being applied, greater optimization
cost, but slightly more accuracy.
transform_queue_size: float (optional, default 4.0)
For transform operations (embedding new points using a trained model_
this will control how aggressively to search for nearest neighbors.
Larger values will result in slower performance but more accurate
nearest neighbor evaluation.
a: float (optional, default None)
More specific parameters controlling the embedding. If None these
values are set automatically as determined by ``min_dist`` and
``spread``.
b: float (optional, default None)
More specific parameters controlling the embedding. If None these
values are set automatically as determined by ``min_dist`` and
``spread``.
random_state: int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
metric_kwds: dict (optional, default None)
Arguments to pass on to the metric, such as the ``p`` value for
Minkowski distance. If None then no arguments are passed on.
angular_rp_forest: bool (optional, default False)
Whether to use an angular random projection forest to initialise
the approximate nearest neighbor search. This can be faster, but is
mostly on useful for metric that use an angular style distance such
as cosine, correlation etc. In the case of those metrics angular forests
will be chosen automatically.
target_n_neighbors: int (optional, default -1)
The number of nearest neighbors to use to construct the target simplcial
set. If set to -1 use the ``n_neighbors`` value.
target_metric: string or callable (optional, default 'categorical')
The metric used to measure distance for a target array is using supervised
dimension reduction. By default this is 'categorical' which will measure
distance in terms of whether categories match or are different. Furthermore,
if semi-supervised is required target values of -1 will be trated as
unlabelled under the 'categorical' metric. If the target array takes
continuous values (e.g. for a regression problem) then metric of 'l1'
or 'l2' is probably more appropriate.
target_metric_kwds: dict (optional, default None)
Keyword argument to pass to the target metric when performing
supervised dimension reduction. If None then no arguments are passed on.
target_weight: float (optional, default 0.5)
weighting factor between data topology and target topology. A value of
0.0 weights entirely on data, a value of 1.0 weights entirely on target.
The default of 0.5 balances the weighting equally between data and target.
transform_seed: int (optional, default 42)
Random seed used for the stochastic aspects of the transform operation.
This ensures consistency in transform operations.
verbose: bool (optional, default False)
Controls verbosity of logging.
unique: bool (optional, default False)
Controls if the rows of your data should be uniqued before being
embedded. If you have more duplicates than you have n_neighbour
you can have the identical data points lying in different regions of
your space. It also violates the definition of a metric.
For to map from internal structures back to your data use the variable
_unique_inverse_.
densmap: bool (optional, default False)
Specifies whether the density-augmented objective of densMAP
should be used for optimization. Turning on this option generates
an embedding where the local densities are encouraged to be correlated
with those in the original space. Parameters below with the prefix 'dens'
further control the behavior of this extension.
dens_lambda: float (optional, default 2.0)
Controls the regularization weight of the density correlation term
in densMAP. Higher values prioritize density preservation over the
UMAP objective, and vice versa for values closer to zero. Setting this
parameter to zero is equivalent to running the original UMAP algorithm.
dens_frac: float (optional, default 0.3)
Controls the fraction of epochs (between 0 and 1) where the
density-augmented objective is used in densMAP. The first
(1 - dens_frac) fraction of epochs optimize the original UMAP objective
before introducing the density correlation term.
dens_var_shift: float (optional, default 0.1)
A small constant added to the variance of local radii in the
embedding when calculating the density correlation objective to
prevent numerical instability from dividing by a small number
output_dens: float (optional, default False)
Determines whether the local radii of the final embedding (an inverse
measure of local density) are computed and returned in addition to
the embedding. If set to True, local radii of the original data
are also included in the output for comparison; the output is a tuple
(embedding, original local radii, embedding local radii). This option
can also be used when densmap=False to calculate the densities for
UMAP embeddings.
disconnection_distance: float (optional, default np.inf or maximal value for bounded distances)
Disconnect any vertices of distance greater than or equal to disconnection_distance when approximating the
manifold via our k-nn graph. This is particularly useful in the case that you have a bounded metric. The
UMAP assumption that we have a connected manifold can be problematic when you have points that are maximally
different from all the rest of your data. The connected manifold assumption will make such points have perfect
similarity to a random set of other points. Too many such points will artificially connect your space.
"""
def __init__(
self,
n_neighbors=15,
n_components=2,
metric="euclidean",
metric_kwds=None,
output_metric="euclidean",
output_metric_kwds=None,
n_epochs=None,
learning_rate=1.0,
init="spectral",
min_dist=0.1,
spread=1.0,
low_memory=True,
n_jobs=-1,
set_op_mix_ratio=1.0,
local_connectivity=1.0,
repulsion_strength=1.0,
negative_sample_rate=5,
transform_queue_size=4.0,
a=None,
b=None,
random_state=None,
angular_rp_forest=False,
target_n_neighbors=-1,
target_metric="categorical",
target_metric_kwds=None,
target_weight=0.5,
transform_seed=42,
transform_mode="embedding",
force_approximation_algorithm=False,
verbose=False,
unique=False,
densmap=False,
dens_lambda=2.0,
dens_frac=0.3,
dens_var_shift=0.1,
output_dens=False,
disconnection_distance=None,
):
self.n_neighbors = n_neighbors
self.metric = metric
self.output_metric = output_metric
self.target_metric = target_metric
self.metric_kwds = metric_kwds
self.output_metric_kwds = output_metric_kwds
self.n_epochs = n_epochs
self.init = init
self.n_components = n_components
self.repulsion_strength = repulsion_strength
self.learning_rate = learning_rate
self.spread = spread
self.min_dist = min_dist
self.low_memory = low_memory
self.set_op_mix_ratio = set_op_mix_ratio
self.local_connectivity = local_connectivity
self.negative_sample_rate = negative_sample_rate
self.random_state = random_state
self.angular_rp_forest = angular_rp_forest
self.transform_queue_size = transform_queue_size
self.target_n_neighbors = target_n_neighbors
self.target_metric = target_metric
self.target_metric_kwds = target_metric_kwds
self.target_weight = target_weight
self.transform_seed = transform_seed
self.transform_mode = transform_mode
self.force_approximation_algorithm = force_approximation_algorithm
self.verbose = verbose
self.unique = unique
self.densmap = densmap
self.dens_lambda = dens_lambda if densmap else 0.0
self.dens_frac = dens_frac if densmap else 0.0
self.dens_var_shift = dens_var_shift
self.output_dens = output_dens
self.disconnection_distance = disconnection_distance
self.n_jobs = n_jobs
self.a = a
self.b = b
def _validate_parameters(self):
if self.set_op_mix_ratio < 0.0 or self.set_op_mix_ratio > 1.0:
raise ValueError("set_op_mix_ratio must be between 0.0 and 1.0")
if self.repulsion_strength < 0.0:
raise ValueError("repulsion_strength cannot be negative")
if self.min_dist > self.spread:
raise ValueError("min_dist must be less than or equal to spread")
if self.min_dist < 0.0:
raise ValueError("min_dist cannot be negative")
if not isinstance(self.init, str) and not isinstance(self.init, np.ndarray):
raise ValueError("init must be a string or ndarray")
if isinstance(self.init, str) and self.init not in ("spectral", "random",):
raise ValueError('string init values must be "spectral" or "random"')
if (
isinstance(self.init, np.ndarray)
and self.init.shape[1] != self.n_components
):
raise ValueError("init ndarray must match n_components value")
if not isinstance(self.metric, str) and not callable(self.metric):
raise ValueError("metric must be string or callable")
if self.negative_sample_rate < 0:
raise ValueError("negative sample rate must be positive")
if self._initial_alpha < 0.0:
raise ValueError("learning_rate must be positive")
if self.n_neighbors < 2:
raise ValueError("n_neighbors must be greater than 1")
if self.target_n_neighbors < 2 and self.target_n_neighbors != -1:
raise ValueError("target_n_neighbors must be greater than 1")
if not isinstance(self.n_components, int):
if isinstance(self.n_components, str):
raise ValueError("n_components must be an int")
if self.n_components % 1 != 0:
raise ValueError("n_components must be a whole number")
try:
# this will convert other types of int (eg. numpy int64)
# to Python int
self.n_components = int(self.n_components)
except ValueError:
raise ValueError("n_components must be an int")
if self.n_components < 1:
raise ValueError("n_components must be greater than 0")
if self.n_epochs is not None and (
self.n_epochs <= 10 or not isinstance(self.n_epochs, int)
):
raise ValueError("n_epochs must be a positive integer of at least 10")
if self.metric_kwds is None:
self._metric_kwds = {}
else:
self._metric_kwds = self.metric_kwds
if self.output_metric_kwds is None:
self._output_metric_kwds = {}
else:
self._output_metric_kwds = self.output_metric_kwds
if self.target_metric_kwds is None:
self._target_metric_kwds = {}
else:
self._target_metric_kwds = self.target_metric_kwds
# check sparsity of data upfront to set proper _input_distance_func &
# save repeated checks later on
if scipy.sparse.isspmatrix_csr(self._raw_data):
self._sparse_data = True
else:
self._sparse_data = False
# set input distance metric & inverse_transform distance metric
if callable(self.metric):
in_returns_grad = self._check_custom_metric(
self.metric, self._metric_kwds, self._raw_data
)
if in_returns_grad:
_m = self.metric
@numba.njit(fastmath=True)
def _dist_only(x, y, *kwds):
return _m(x, y, *kwds)[0]
self._input_distance_func = _dist_only
self._inverse_distance_func = self.metric
else:
self._input_distance_func = self.metric
self._inverse_distance_func = None
warn(
"custom distance metric does not return gradient; inverse_transform will be unavailable. "
"To enable using inverse_transform method method, define a distance function that returns "
"a tuple of (distance [float], gradient [np.array])"
)
elif self.metric == "precomputed":
if self.unique:
raise ValueError("unique is poorly defined on a precomputed metric")
warn(
"using precomputed metric; transform will be unavailable for new data and inverse_transform "
"will be unavailable for all data"
)
self._input_distance_func = self.metric
self._inverse_distance_func = None
elif self.metric == "hellinger" and self._raw_data.min() < 0:
raise ValueError("Metric 'hellinger' does not support negative values")
elif self.metric in dist.named_distances:
if self._sparse_data:
if self.metric in sparse.sparse_named_distances:
self._input_distance_func = sparse.sparse_named_distances[
self.metric
]
else:
raise ValueError(
"Metric {} is not supported for sparse data".format(self.metric)
)
else:
self._input_distance_func = dist.named_distances[self.metric]
try:
self._inverse_distance_func = dist.named_distances_with_gradients[
self.metric
]
except KeyError:
warn(
"gradient function is not yet implemented for {} distance metric; "
"inverse_transform will be unavailable".format(self.metric)
)
self._inverse_distance_func = None
else:
raise ValueError("metric is neither callable nor a recognised string")
# set output distance metric
if callable(self.output_metric):
out_returns_grad = self._check_custom_metric(
self.output_metric, self._output_metric_kwds
)
if out_returns_grad:
self._output_distance_func = self.output_metric
else:
raise ValueError(
"custom output_metric must return a tuple of (distance [float], gradient [np.array])"
)
elif self.output_metric == "precomputed":
raise ValueError("output_metric cannnot be 'precomputed'")
elif self.output_metric in dist.named_distances_with_gradients:
self._output_distance_func = dist.named_distances_with_gradients[
self.output_metric
]
elif self.output_metric in dist.named_distances:
raise ValueError(
"gradient function is not yet implemented for {}.".format(
self.output_metric
)
)
else:
raise ValueError(
"output_metric is neither callable nor a recognised string"
)
# set angularity for NN search based on metric
if self.metric in (
"cosine",
"correlation",
"dice",
"jaccard",
"ll_dirichlet",
"hellinger",
):
self.angular_rp_forest = True
if self.n_jobs < -1 or self.n_jobs == 0:
raise ValueError("n_jobs must be a postive integer, or -1 (for all cores)")
if self.dens_lambda < 0.0:
raise ValueError("dens_lambda cannot be negative")
if self.dens_frac < 0.0 or self.dens_frac > 1.0:
raise ValueError("dens_frac must be between 0.0 and 1.0")
if self.dens_var_shift < 0.0:
raise ValueError("dens_var_shift cannot be negative")
self._densmap_kwds = {
"lambda": self.dens_lambda,
"frac": self.dens_frac,
"var_shift": self.dens_var_shift,
"n_neighbors": self.n_neighbors,
}
if self.densmap:
if self.output_metric not in ("euclidean", "l2"):
raise ValueError(
"Non-Euclidean output metric not supported for densMAP."
)
# This will be used to prune all edges of greater than a fixed value from our knn graph.
# We have preset defaults described in DISCONNECTION_DISTANCES for our bounded measures.
# Otherwise a user can pass in their own value.
if self.disconnection_distance is None:
self._disconnection_distance = DISCONNECTION_DISTANCES.get(
self.metric, np.inf
)
elif isinstance(self.disconnection_distance, int) or isinstance(
self.disconnection_distance, float
):
self._disconnection_distance = self.disconnection_distance
else:
raise ValueError("disconnection_distance must either be None or a numeric.")
def _check_custom_metric(self, metric, kwds, data=None):
# quickly check to determine whether user-defined
# self.metric/self.output_metric returns both distance and gradient
if data is not None:
# if checking the high-dimensional distance metric, test directly on
# input data so we don't risk violating any assumptions potentially
# hard-coded in the metric (e.g., bounded; non-negative)
x, y = data[np.random.randint(0, data.shape[0], 2)]
else:
# if checking the manifold distance metric, simulate some data on a
# reasonable interval with output dimensionality
x, y = np.random.uniform(low=-10, high=10, size=(2, self.n_components))
if scipy.sparse.issparse(data):
metric_out = metric(x.indices, x.data, y.indices, y.data, **kwds)
else:
metric_out = metric(x, y, **kwds)
# True if metric returns iterable of length 2, False otherwise
return hasattr(metric_out, "__iter__") and len(metric_out) == 2
def _populate_combined_params(self, *models):
self.n_neighbors = flattened([m.n_neighbors for m in models])
self.metric = flattened([m.metric for m in models])
self.metric_kwds = flattened([m.metric_kwds for m in models])
self.output_metric = flattened([m.output_metric for m in models])
self.n_epochs = flattened(
[m.n_epochs if m.n_epochs is not None else -1 for m in models]
)
if all([x == -1 for x in self.n_epochs]):
self.n_epochs = None
self.init = flattened([m.init for m in models])
self.n_components = flattened([m.n_components for m in models])
self.repulsion_strength = flattened([m.repulsion_strength for m in models])
self.learning_rate = flattened([m.learning_rate for m in models])
self.spread = flattened([m.spread for m in models])
self.min_dist = flattened([m.min_dist for m in models])
self.low_memory = flattened([m.low_memory for m in models])
self.set_op_mix_ratio = flattened([m.set_op_mix_ratio for m in models])
self.local_connectivity = flattened([m.local_connectivity for m in models])
self.negative_sample_rate = flattened([m.negative_sample_rate for m in models])
self.random_state = flattened([m.random_state for m in models])
self.angular_rp_forest = flattened([m.angular_rp_forest for m in models])
self.transform_queue_size = flattened([m.transform_queue_size for m in models])
self.target_n_neighbors = flattened([m.target_n_neighbors for m in models])
self.target_metric = flattened([m.target_metric for m in models])
self.target_metric_kwds = flattened([m.target_metric_kwds for m in models])
self.target_weight = flattened([m.target_weight for m in models])
self.transform_seed = flattened([m.transform_seed for m in models])
self.force_approximation_algorithm = flattened(
[m.force_approximation_algorithm for m in models]
)
self.verbose = flattened([m.verbose for m in models])
self.unique = flattened([m.unique for m in models])
self.densmap = flattened([m.densmap for m in models])
self.dens_lambda = flattened([m.dens_lambda for m in models])
self.dens_frac = flattened([m.dens_frac for m in models])
self.dens_var_shift = flattened([m.dens_var_shift for m in models])
self.output_dens = flattened([m.output_dens for m in models])
self.a = flattened([m.a for m in models])
self.b = flattened([m.b for m in models])
self._a = flattened([m._a for m in models])
self._b = flattened([m._b for m in models])
def __mul__(self, other):
check_is_fitted(
self, attributes=["graph_"], msg="Only fitted UMAP models can be combined"
)
check_is_fitted(
other, attributes=["graph_"], msg="Only fitted UMAP models can be combined"
)
if self.graph_.shape[0] != other.graph_.shape[0]:
raise ValueError("Only models with the equivalent samples can be combined")
result = UMAP()
result._populate_combined_params(self, other)
result.graph_ = general_simplicial_set_intersection(
self.graph_, other.graph_, 0.5
)
result.graph_ = reset_local_connectivity(result.graph_, True)
if scipy.sparse.csgraph.connected_components(result.graph_)[0] > 1:
warn(
"Combined graph is not connected but multi-component layout is unsupported. "
"Falling back to random initialization."
)
init = "random"
else:
init = "spectral"
result.densmap = np.any(result.densmap)
result.output_dens = np.any(result.output_dens)
result._densmap_kwds = {
"lambda": np.max(result.dens_lambda),
"frac": np.max(result.dens_frac),
"var_shift": np.max(result.dens_var_shift),
"n_neighbors": np.max(result.n_neighbors),
}
if result.n_epochs is None:
n_epochs = -1
else:
n_epochs = np.max(result.n_epochs)
result.embedding_, aux_data = simplicial_set_embedding(
None,
result.graph_,
np.min(result.n_components),
|
np.min(result.learning_rate)
|
numpy.min
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import math
from collections import defaultdict
from typing import Any, List, NamedTuple, Optional, Set
# Third-party imports
import numpy as np
# First-party imports
from gluonts.core.component import validated
from gluonts.core.exception import assert_data_error
from gluonts.gluonts_tqdm import tqdm
class ScaleHistogram:
"""
Scale histogram of a timeseries dataset
This counts the number of timeseries whose mean of absolute values is in
the `[base ** i, base ** (i+1)]` range for all possible `i`.
The number of entries with empty target is counted separately.
Parameters
----------
base
Log-width of the histogram's buckets.
bin_counts
empty_target_count
"""
@validated()
def __init__(
self,
base: float = 2.0,
bin_counts: Optional[dict] = None,
empty_target_count: int = 0,
) -> None:
self._base = base
self.bin_counts = defaultdict(
int, {} if bin_counts is None else bin_counts
)
self.empty_target_count = empty_target_count
self.__init_args__ = dict(
base=self._base,
bin_counts=self.bin_counts,
empty_target_count=empty_target_count,
)
def bucket_index(self, target_values):
assert len(target_values) > 0
scale = np.mean(np.abs(target_values))
scale_bin = int(math.log(scale + 1.0, self._base))
return scale_bin
def add(self, target_values):
if len(target_values) > 0:
bucket = self.bucket_index(target_values)
self.bin_counts[bucket] = self.bin_counts[bucket] + 1
else:
self.empty_target_count = self.empty_target_count + 1
def count(self, target):
if len(target) > 0:
return self.bin_counts[self.bucket_index(target)]
else:
return self.empty_target_count
def __len__(self):
return self.empty_target_count + sum(self.bin_counts.values())
def __eq__(self, other):
return (
isinstance(other, ScaleHistogram)
and self.bin_counts == other.bin_counts
and self.empty_target_count == other.empty_target_count
and self._base == other._base
)
def __str__(self):
string_repr = [
'count of scales in {min}-{max}:{count}'.format(
min=self._base ** base_index - 1,
max=self._base ** (base_index + 1) - 1,
count=count,
)
for base_index, count in sorted(
self.bin_counts.items(), key=lambda x: x[0]
)
]
return '\n'.join(string_repr)
class DatasetStatistics(NamedTuple):
"""
A NamedTuple to store the statistics of a Dataset.
"""
cats: List[Set[int]]
integer_dataset: bool
max_target: float
mean_abs_target: float
mean_target: float
mean_target_length: float
min_target: float
num_dynamic_feat: int
num_missing_values: int
num_time_observations: int
num_time_series: int
scale_histogram: ScaleHistogram
def __str__(self):
# gets a pretty string representation of all dataset statistics
return "\n".join(
[f"{var_name}: {var}" for var_name, var in self._asdict().items()]
)
def __eq__(self, other):
for x, y in zip(self._asdict().values(), other._asdict().values()):
if isinstance(x, float):
if abs(x - y) > abs(0.0001 * x):
return False
elif x != y:
return False
return True
# TODO: reorganize modules to avoid circular dependency
# TODO: and substitute Any with Dataset
def calculate_dataset_statistics(ts_dataset: Any) -> DatasetStatistics:
"""
Computes the statistics of a given Dataset.
Parameters
----------
ts_dataset
Dataset of which to compute the statistics.
Returns
-------
DatasetStatistics
NamedTuple containing the statistics.
"""
num_time_observations = 0
num_time_series = 0
min_target = 1e20
max_target = -1e20
sum_target = 0.0
sum_abs_target = 0.0
integer_dataset = True
observed_cats: Optional[List[Set[int]]] = None
num_cats: Optional[int] = None
num_dynamic_feat: Optional[int] = None
num_missing_values = 0
scale_histogram = ScaleHistogram()
with tqdm(enumerate(ts_dataset, start=1), total=len(ts_dataset)) as it:
for num_time_series, ts in it:
target = ts['target']
observed_target = target[~
|
np.isnan(target)
|
numpy.isnan
|
"""
cgl_fairness
Copyright (c) 2022-present NAVER Corp.
MIT license
"""
import numpy as np
import random
from data_handler import SSLDataset
class TabularDataset(SSLDataset):
"""Adult dataset."""
# 1 idx -> sensi
# 2 idx -> label
# 3 idx -> filename or feature (image / tabular)
def __init__(self, dataset, sen_attr_idx, **kwargs):
super(TabularDataset, self).__init__(**kwargs)
self.sen_attr_idx = sen_attr_idx
dataset_train, dataset_test = dataset.split([0.8], shuffle=True, seed=0)
# features, labels = self._balance_test_set(dataset)
self.dataset = dataset_train if (self.split == 'train') or ('group' in self.version) else dataset_test
features = np.delete(self.dataset.features, self.sen_attr_idx, axis=1)
mean, std = self._get_mean_n_std(dataset_train.features)
features = (features - mean) / std
self.groups = np.expand_dims(self.dataset.features[:, self.sen_attr_idx], axis=1)
self.labels = np.squeeze(self.dataset.labels)
# self.features = self.dataset.features
self.features = np.concatenate((self.groups, self.dataset.labels, features), axis=1)
# For prepare mean and std from the train dataset
self.num_data, self.idxs_per_group = self._data_count(self.features, self.num_groups, self.num_classes)
# if semi-supervised learning,
if self.sv_ratio < 1:
# we want the different supervision according to the seed
random.seed(self.seed)
self.features, self.num_data, self.idxs_per_group = self.ssl_processing(self.features, self.num_data, self.idxs_per_group, )
if 'group' in self.version:
a, b = self.num_groups, self.num_classes
self.num_groups, self.num_classes = b, a
def get_dim(self):
return self.dataset.features.shape[-1]
def __getitem__(self, idx):
features = self.features[idx]
group = features[0]
label = features[1]
feature = features[2:]
if 'group' in self.version:
return np.float32(feature), 0, label, np.int64(group), (idx, 0)
else:
return np.float32(feature), 0, group,
|
np.int64(label)
|
numpy.int64
|
# METIS PROJECT 5 - KOJAK
#
# module of all pytorch-related code for Kojak
import audiomod
from pymongo import MongoClient
import pandas as pd
import numpy as np
from sklearn import metrics
import torch
import torch.utils.data as data_utils
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from collections import defaultdict, OrderedDict
from copy import deepcopy
import pickle
import time
import os
import matplotlib.pyplot as plt
# GLOBAL VARIABLES AND OBJECTS
client = MongoClient(
"mongodb://{}:{}@{}/kojak".format(
os.environ['mdbUN'],
os.environ['mdbPW'],
os.environ['mdbIP']
)
)
kdb = client.kojak
rs = torch.manual_seed(42)
# DATA LOADING
class SpectroDataset(data_utils.Dataset):
"""
Spectrogram dataset class
---
INIT PARAMS
dataset_name: name of dataset as labeled in MongoDB (str)
group: 'train' or 'test' or 'val', must be present in records of
dataset_name (str)
scaling: factor by which to scale the spectrogram dimensions, e.g. a
factor of 0.5 would make a spectrogram 1/4 the size (int, float)
dir_in: path to directory in which .wav files reside (str)
trasform: transform function or functions if not None
"""
def __init__(
self,
datagroup_df,
scaling=1,
dir_in="../audio/wav_chunked",
transform=None
):
self.sample_frame = datagroup_df
self.scaling = scaling
# this is a dictionary of audio parameters necessary for scaling
# spectrograms during creation
self.audio_params = {
'hl': 256,
'n_fft': 1024,
'n_mels': 512
}
self.dir_in = dir_in
self.transform = transform
def __len__(self):
return self.sample_frame.shape[0]
def __getitem__(self, ix):
"""Creates and returns spectrogram of requested item"""
chunk_id = self.sample_frame.loc[ix, 'chunk_id']
y, sr = audiomod.audio_loader(chunk_id)
sample = audiomod.make_spectro(
y,
sr,
hl = int(self.audio_params['hl'] / self.scaling),
n_fft = int(self.audio_params['n_fft'] / self.scaling),
n_mels = int(self.audio_params['n_mels'] * self.scaling)
)
# add singleton dimension
sample = np.expand_dims(sample, 0)
# normalize on -1 to 1 scale as done in PyTorch tutorial
sample = normalize_spec(sample, low=-1)
# convert to torch float tensor
sample = torch.from_numpy(sample).float()
if self.transform:
sample = self.transform(sample)
return sample, self.sample_frame.loc[ix, 'actual'], chunk_id
# CNN ARCHITECTURES
class CNN_cpcpff(nn.Module):
"""
params: Pass input params as a dictionary where each item is a layer and
each value is a list, following this convention:
Convolutional: c1: [kernel, stride, channels_out]
Max Pooling: p1: [kernel, stride]
Fully Connected: f1: [channels_in, channels_out]
For example:
params = {
'c1': [5,1,10],
'p1': [2,2],
'c2': [5,1,20],
'p2': [2,2],
'f1': [2600,50],
'f2': [50,2]
}
All list values must be integers.
rs: random seed (int)
normal: if True, update parameters with normal distribution based with
mean = 0 and std = 1 / sqrt(input_dims * kernel_w * kernel_h) (bool)
"""
def __init__(self, params, rs=23, normal=True):
super(CNN_cpcpff, self).__init__()
self.p = params
self.rs = rs
self.seed_gen = torch.manual_seed(self.rs)
# in channels, out channels, kernel, stride=s
self.conv1 = nn.Conv2d(1,
self.p['c1'][2],
self.p['c1'][0],
stride=self.p['c1'][1])
# 2x2 kernel, stride=2 -- stride defaults to kernel
self.pool1 = nn.MaxPool2d(self.p['p1'][0], self.p['p1'][1])
self.conv2 = nn.Conv2d(self.p['c1'][2],
self.p['c2'][2],
self.p['c2'][0],
stride=self.p['c2'][1])
self.pool2 = nn.MaxPool2d(self.p['p2'][0], self.p['p2'][1])
self.fc1 = nn.Linear(self.p['f1'][0], self.p['f1'][1])
self.fc2 = nn.Linear(self.p['f2'][0], self.p['f2'][1])
if normal:
self.apply(init_norm_auto)
# do I need to clear this?
self.seed_gen = None
def forward(self, x, softmax=False):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = x.view(x.size(0), -1) # need to reshape for fully connected layer
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
# make this an optional?
if softmax:
x = F.softmax(x)
return x
def save_myself(self, fname, dir_out='../data'):
"""
Saves current object as a .pkl file.
---
fname: filename of choice (str)
dir_out: path to save directory (str)
"""
# add timestamp here
fpath = os.path.join(dir_out, fname + '.p')
with open(fpath, 'wb') as pf:
pickle.dump(self, pf)
# CNN DESIGN HELPERS
def reduce_axis(pix_in, kernel_size, stride, drop_last=False):
"""
Calculate output pixels along one axis given input pixels,
filter size, and stride.
---
IN
pix_in: number of pixels along input axis (int)
kernel_size: assuming a square filter, pixels on one side (int)
stride: pixels per step (int)
drop_last: if True, ignore pixels on last step if fewer than
filter_dim (bool)
OUT
pix_out: number of pixels along output axis
"""
pix_out = (pix_in - kernel_size) // stride + 1
if not drop_last:
if (pix_in - kernel_size) % stride > 0:
pix_out += 1
return pix_out
def cnn_pixels_out(dim_in, layers, drop_last=False):
"""
Computes CNN output pixels given input dimensions and layer info.
Assumes a square kernel and drop_last=False for reduce_axis process.
---
IN
dim_in: (C, W, H) format, where each is an integer (tup or list)
layers: ((kernel, stride, filters_out), ...) format, where each is an
int. If a max pooling layer, set filters to 0 (tup or list)
OUT
pixels_out: number of pixels going into FC layer (int)
"""
c = dim_in[0]
w = dim_in[1]
h = dim_in[2]
print("{} x {} x {}".format(c,w,h))
for layer in layers:
if layer[2] != 0:
c = layer[2]
w = reduce_axis(w, layer[0], layer[1], drop_last=drop_last)
h = reduce_axis(h, layer[0], layer[1], drop_last=drop_last)
print("{} x {} x {}".format(c,w,h))
return c * w * h
# MODEL UTILITY FUNCTIONS
def normalize_spec(ndarray, low=0, high=1, min_db=-80):
"""
Normalize dB-scale spectrogram from low-high given min dB at creation.
"""
factor = min_db / (high-low)
# might just be able to do ndarray /= -min_db
# would invert the image though
ndarray -= factor
ndarray /= abs(factor)
return ndarray
def init_norm_auto(m):
"""Based on self.reset_parameters() in nn.Linear and nn.Conv2n"""
seed_gen = torch.manual_seed(23)
# print(m)
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
if isinstance(m, nn.Conv2d):
n = m.in_channels
for k in m.kernel_size:
n *= k
if isinstance(m, nn.Linear):
n = m.weight.size(1)
std = 1. / np.sqrt(n)
m.weight.data.normal_(mean=0, std=std)
if m.bias is not None:
m.bias.data.normal_(mean=0, std=std)
# print(m.weight)
def fit(cnn,
dataset,
optimizer,
criterion,
num_epochs,
batch_size,
minibatches=None
):
"""
Runs feed-forward and back-prop to train CNN model.
*** ROLL INTO CNN CLASS?
---
IN
cnn: CNN instance
dataset: built SpectroDataset object
optimizer: PyTorch optimizer for back-prop
criterion: PyTorch loss object for loss metric
num_epochs: number of times to cycle through data (int)
batch_size: number of records per batch (int)
minibatches: print loss and time every n minibatches (COMING SOON) (int)
OUT
loss_by_epoch: average loss per epoch (ndarray)
"""
train_loader = data_utils.DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=2,
drop_last=True
)
# if this throws errors, it's probably because of switch to array
# loss_by_epoch = np.array([])
loss_by_epoch = []
for epoch in range(num_epochs):
print("Epoch", epoch+1)
running_loss = 0.0
loss_per_batch = np.array([])
before = deepcopy(cnn.state_dict()['conv2.weight'])
then = time.perf_counter()
for i, data in enumerate(train_loader, 1):
sub_then = time.perf_counter()
# separate input data and labels, dump chunk IDs
spectros, labels, _ = data
# wrap in Variable for GD
spectros, labels = Variable(spectros), Variable(labels)
# zero parameter gradients, else accumulate
optimizer.zero_grad()
# forward prop
outputs = cnn(spectros)
# calculate loss
loss = criterion(outputs, labels)
# backprop
loss.backward()
# update weights
optimizer.step()
# verbosity
sub_now = time.perf_counter()
print("\r * {} loss: {:.3f}\tTime: {:.3f} ms"
.format(i, loss.data[0], (sub_now-sub_then)*1000), end='')
loss_per_batch = np.append(loss_per_batch, loss.data[0])
running_loss += loss.data[0]
# # print every n minibatches
# running_loss += loss.data[0]
# if i%minibatches == minibatches:
# print('[%d, %5d] loss: %.3f' % (
# epoch+1, i, running_loss/minibatches))
# running_loss = 0.0
now = time.perf_counter()
after = cnn.state_dict()['conv2.weight']
update = not np.allclose(before.numpy(), after.numpy())
avg_loss = running_loss/i
loss_by_epoch.append(loss_per_batch)
print("\r * Avg loss: {:.3f}\tTime: {:.3f} ms"
.format(running_loss/i, (now-then)*1000))
print(" * Weights updated:", update)
print('\n\aTraining Complete')
return np.vstack(loss_by_epoch)
def predict(cnn, dataset, batch_size=4, res_format='df'):
"""
Predicts values on trained CNN.
*** ROLL INTO CNN CLASS?
---
IN
cnn: trained CNN instance
dataset: built SpectroDataset object
batch_size: number of records per batch
res_format: results format, either 'df' for pandas dataframe or 'dict'
for dictionary (str)
OUT
results: if 'dict', dictionary with chunk ID as key, and a tuple of (actual,
predicted, output_array) as value (dict); if 'df', pandas dataframe
"""
loader = data_utils.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False, # set for False for test set
num_workers=2
)
results = {}
# could clean this up to load it straight into df instead of dict
for data in loader:
spectros, labels, chunk_ids = data
outputs = cnn(Variable(spectros), softmax=True)
_, pred = torch.max(outputs.data, 1)
for c_id, y, y_hat, out in zip(chunk_ids, labels, pred, outputs.data):
results[c_id] = (y, y_hat, out)
if res_format == 'df':
results = results_to_df(results)
return results
def results_to_df(results):
"""
Converts predict results to Pandas dataframe.
---
IN
results: dictionary generated by results function (dict)
OUT
df: pandas dataframe of results
"""
cols = ['chunk_id', 'actual', 'pred', 'p0', 'p1']
results_trans = OrderedDict.fromkeys(cols)
for k in results_trans.keys():
results_trans[k] = []
for k, v in results.items():
for col, val in zip(cols, [k, v[0], v[1], v[2][0], v[2][1]]):
results_trans[col].append(val)
df = pd.DataFrame(results_trans)
return df
# SCORING, CV, GRID SEARCH
def get_scores(train_df, test_df, verbose=True):
"""
Calculates accuracy, recall, and specificity for train and test
predictions.
### add precision?
---
IN
train_df: predict results df of train set
test_df: predict results df of test set
OUT
scores_dict: scores bundle (dict)
"""
scores_dict = defaultdict(list)
score_types = [
'acc',
# 'pre',
'rec',
'spec'
]
for df in [train_df, test_df]:
df_scores = []
df_scores.append(
metrics.accuracy_score(df.actual, df.pred))
# df_scores.append(
# metrics.precision_score(df.actual, df.pred))
df_scores.append(
metrics.recall_score(df.actual, df.pred))
df_scores.append(
metrics.recall_score(df.actual, df.pred, pos_label=0))
for n, s in zip(score_types, df_scores):
scores_dict[n].append(s)
return scores_dict
def print_scores(scores_dict, title=None):
"""
Print scores in table given scores dictionary as created by get_scores().
---
IN
scores_dict: dictionary of classification scores (dict)
title: title, if given (str)
NO OUT
"""
if title:
print(title)
print("Score\tTrain\tTest")
print("-" * 24)
for score in scores_dict.keys():
print("{}\t{:.3f}\t{:.3f}".format(
score.capitalize(),
scores_dict[score][0],
scores_dict[score][1])
)
def crossval(
cnn_params,
datagroup_df,
scaling,
optim_partial,
criterion,
num_epochs,
batch_size,
folds=4,
rs=23
):
"""
Performs cross validation on dataset with model, optimizer, criterion, and
hyperparameters as specified.
---
IN
cnn: untrained CNN model object
datagroup_df: dataframe of datagroup (df)
scaling: degree of scaling to apply to spectros, 0-1 (float)
optim_partial: partial object of optimizer, with all parameters preset
criterion: loss/error criterion object on which to optimize
num_epochs: number of epochs to run per fold (int)
batch_size: number of spectros per batch (int)
folds: number of folds for cross val (int)
rs: random seed for torch random generator (int)
OUT
scores_cv: average scores per fold, in- and out-of-sample (dict)
scores_bundle: in- and out-of-sample scores for each fold (dict)
losses_per_fold: list of avg loss per epoch for each fold (list)
"""
# add folds column to dataset df
df = audiomod.assign_cv_groups(datagroup_df, folds=folds)
scores_bundle = {}
losses_per_fold = []
for i in range(folds):
print("\n*** Fold {} ***".format(i+1))
train_df = (df[df.cv != i]
.filter(['chunk_id', 'actual'])
.reset_index(drop=True))
test_df = (df[df.cv == i]
.filter(['chunk_id', 'actual'])
.reset_index(drop=True))
# sanity check of tt lengths
print("\nTrain set length: {}".format(train_df.shape[0]))
print("Test set length: {}".format(test_df.shape[0]))
# create dataset objects for train and test
train_dataset = SpectroDataset(train_df, scaling=scaling)
test_dataset = SpectroDataset(test_df, scaling=scaling)
# spawn model with specified parameters
cnn = CNN_cpcpff(cnn_params, rs=rs)
print("Random seed: {}\n".format(cnn.rs))
# train model
loss_by_epoch = fit(
cnn,
train_dataset,
optim_partial(cnn.parameters()),
criterion,
num_epochs,
batch_size
)
losses_per_fold.append(loss_by_epoch)
# get in- and out-of-sample predictions
train_res = predict(cnn, train_dataset)
test_res = predict(cnn, test_dataset)
# calculate scores
scores_fold = get_scores(train_res, test_res)
scores_bundle[i] = scores_fold
print("\n", end="")
print_scores(scores_fold)
scores_cv = defaultdict(list)
# cycle through fold, score, and train/test to average all scores
for score_type in scores_fold.keys():
for ix in range(2):
score_sum = 0
for fold in range(folds):
score_sum += scores_bundle[fold][score_type][ix]
scores_cv[score_type].append(score_sum / folds)
print("\n", end="")
print_scores(scores_cv, "CV Average Scores")
return scores_cv, scores_bundle, losses_per_fold
# OTHER UTILITIES
def tensor_stats(tensor):
"""
Prints basic stats for any torch tensor.
---
IN
tensor: any torch tensor
NO OUT
"""
print("Min:", tensor.min())
print("Max:", tensor.max())
print("Mean:", tensor.mean())
print("Std:", tensor.std())
print("Shape:", tensor.size())
def plot_loss(
lbe,
model_name,
plot_values='all',
line_style='b-',
save=False,
fname=None
):
"""
Plots the loss by epoch.
---
IN
lbe: list of avg loss at the end of each epoch (list)
model_name: name of model (str)
plot_values: if 'all', plots loss for each batch; if 'epoch', plots
average loss per epoch (str)
NO OUT
"""
msg = "plot_values must be 'all' or 'epoch'"
assert plot_values == 'all' or plot_values == 'epoch', msg
if isinstance(lbe, list):
lbe = np.array([
|
np.array(l)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 22 10:56:58 2020
@author: Amirh
"""
from ..utils._base_feature_selector import BaseFeatureSelector
from ..functions import ff
import numpy as np
import time
import itertools
from pathos.multiprocessing import ProcessingPool as Pool
from sklearn.base import clone
import random
class PlusLMinusR(BaseFeatureSelector):
def __init__(self, model, n_f, weight, scoring, l=3, r=2, cv=None,
verbose= True, random_state=None, n_jobs = 1,**kwargs):
"""
PLus-L Minus-R is a sequential algorithm that iteratively adds L
features and removes R features from the selection, using sequential
forwards selection (SFS) and sequential backward selection (SBS). This
algorithm includes forward and backaward sequential selection. If L is bigger
R, the algorithm starts with SFS and adds L feature then removes R features
using SBS. If R is bigger than L, the algorithm starts with SBS and removes
R features then adds L features using SFS.
Parameters
----------
model : class
Instantiated Sklearn regression or classification estimator.
n_f : int
Number of features needed to be extracted.
weight : int
Maximization or minimization objective, for maximization use +1
and for mimization use -1.
scoring : callable
Callable sklearn score or customized score function that takes in y_pred and y_true
l : int, optional
Number of features to add using sequential forward selection (SFS).
The default is 3.
r : int, optional
Number of features to remove using sequential backward selection (SBS).
The default is 2.
cv : class, optional
Instantiated sklearn cross-validation class. The default is None.
random_state : int, optional
Determines the random state for random number generation,
cross-validation, and classifier/regression model setting.
The default is None.
n_jobs : int, optional
Number of cores to use for multiprocessing. The default is 1, no
multiprocessing is used.
Returns
-------
Instantiated Optimziation model class.
"""
super().__init__(scoring=scoring, n_f = n_f,**kwargs)
# =============================================================================
# checking the input arguments
# =============================================================================
if n_f != int(n_f):
raise ValueError("n_f should be an integer")
if l != int(l):
raise ValueError("l should be an integer")
if r != int(r):
raise ValueError("r should be an integer")
if weight != int(weight):
raise ValueError("wieght should be an integer either -1 or +1")
try:
self.fitness = ff
except:
raise ImportError("Cannot find/import ff.py defined in the same directory")
self.model = model
self.n_f = n_f
self.cv = cv
self.weight = weight
self.scoring = scoring
self.l = l
self.r = r
self.verbose = verbose
self.random_state = random_state
self.n_jobs = n_jobs
random.seed(self.random_state)
np.random.seed(self.random_state)
try:
self.cv.random_state = self.random_state
self.cv.shuffle = True
except:
pass
try:
self.model.random_state = self.random_state
except:
pass
def fit(self,x,y,decor=None,scale=False, test_size = None,**kwargs):
"""
Fit the model to input data X and target values Y, with extra optional
arguments for preprocessing data.
Parameters
----------
x : ndarray or sparse matrix of shape (n_samples, n_features)
Input data.
y : ndarray of shape (n_samples,)
Target values.
decor : tuple or float, optional
Decorrellation parameter. If a number in range [0,1] is chosen
then the input features that have a correllation factor above the
give value, are removed. If tuple of (string, float [0,1]) is chosen
then the input data features are ranked based on their feature
importance method, indicated by the string, then decorrelated and the
ones with higher importance are retained. The string could be one
from the following:
"variance"
"f_test"
"mutual_info"
"chi2"
"pearson"
The default is None.
scale : bool, optional
Wether to scale the input data or not (centering and scaling).
The default is False.
test_size : float, optional
Represents the portion of input data to be included for test split.
Needs to be between 0 and 1. The default is None.
Attributes
----------
best_fits: Vector of shape (n_iter,)
Fitness values of each iteration.
best_sol: Vector of shape (n_f,)
Indices of selected fetures after preprocessing input data.
best_sol_acc: float
Optimized fitness value for the identified solution.
model_best: Class
Regression/classification estimator given to the optimization
algorithm. At this stage model_best = model
Returns
-------
Vector of shape (n_f,)
Indices of selected fetures of the original input data (prior to
preprocessing). If no deocr is given, this equals to best_sol.
Vector of shape (n_iter,)
Fitness values of each iteration.
"""
self.load(x,y,decor=decor,scale=scale)
if test_size is not None:
self.train_test_split(test_size, random_state=self.random_state,**kwargs)
a = time.time()
# if x_train is defined
if hasattr(self, 'x_train') and hasattr(self, 'x_test') and hasattr(self, 'y_train') and hasattr(self, 'y_test'):
self._test = True
else:
self._test = False
self.best_fits = self._optimize()
self.best_sol = self.sel_features
self.best_sol_acc = self.sel_features_acc
self.best_features = self.best_sol
self.model_best = clone(self.model)
if self.verbose:
print("Optimization completed in {:.2f} seconds".format(time.time() - a))
print("Best feature set is {}, {:.4f}".format(np.array(self.x_cols[self.sel_features]), self.sel_features_acc))
return self.x_cols[self.sel_features], self.best_fits
def _pool_fitness_calc_single(self,indices):
if self._test:
models = list(self.model for i in indices)
cv_models = list(self.cv for i in indices)
xs = list(self.x_train[:,i].reshape(-1,1) for i in indices)
ys = list(self.y_train for i in indices)
xtests = list(self.x_test[:,i].reshape(-1,1) for i in indices)
ytests = list(self.y_test for i in indices)
scorers = list(self.scorer for i in indices)
if self.n_jobs == 1:
fitness_values = np.array(list(map(self.fitness.calculate_fitness,models,
cv_models,scorers,
xs,ys,xtests,ytests)))[:,0]
elif self.n_jobs > 1 :
with Pool(self.n_jobs) as pool:
fitness_values = np.array(pool.map(self.fitness.calculate_fitness,models,
cv_models,scorers,
xs,ys,xtests,ytests))[:,0]
elif self.n_jobs == -1 :
with Pool(self.n_jobs) as pool:
fitness_values = np.array(pool.map(self.fitness.calculate_fitness,models,
cv_models,scorers,
xs,ys,xtests,ytests))[:,0]
else:
models = list(self.model for i in indices)
cv_models = list(self.cv for i in indices)
xs = list(self.x[:,i].reshape(-1,1) for i in indices)
ys = list(self.y for i in indices)
scorers = list(self.scorer for i in indices)
if self.n_jobs == 1:
fitness_values = np.array(list(map(self.fitness.calculate_fitness,models,
cv_models,scorers,
xs,ys)))[:,0]
elif self.n_jobs > 1 :
with Pool(self.n_jobs) as pool:
fitness_values = np.array(pool.map(self.fitness.calculate_fitness,models,
cv_models,scorers,
xs,ys))[:,0]
elif self.n_jobs == -1 :
with Pool() as pool:
fitness_values = np.array(pool.map(self.fitness.calculate_fitness,models,
cv_models,scorers,
xs,ys))[:,0]
return fitness_values
def _pool_fitness_calc_multiple(self,indices):
if self._test:
models = list(self.model for i in indices)
cv_models = list(self.cv for i in indices)
xs = list(self.x_train[:,i] for i in indices)
ys = list(self.y_train for i in indices)
xtests = list(self.x_test[:,i] for i in indices)
ytests = list(self.y_test for i in indices)
scorers = list(self.scorer for i in indices)
if self.n_jobs == 1:
fitness_values = np.array(list(map(self.fitness.calculate_fitness,
models, cv_models,scorers,
xs, ys,
xtests, ytests)))[:,0]
elif self.n_jobs > 1 :
with Pool(self.n_jobs) as pool:
fitness_values = np.array(pool.map(self.fitness.calculate_fitness,
models, cv_models,scorers,
xs, ys,
xtests, ytests))[:,0]
elif self.n_jobs == -1:
with Pool() as pool:
fitness_values = np.array(pool.map(self.fitness.calculate_fitness,
models, cv_models,scorers,
xs, ys,
xtests, ytests))[:,0]
else:
models = list(self.model for i in indices)
cv_models = list(self.cv for i in indices)
xs = list(self.x[:,i] for i in indices)
ys = list(self.y for i in indices)
scorers = list(self.scorer for i in indices)
if self.n_jobs == 1:
fitness_values = np.array(list(map(self.fitness.calculate_fitness,
models, cv_models,scorers,
xs, ys)))[:,0]
elif self.n_jobs > 1:
with Pool(self.n_jobs) as pool:
fitness_values = np.array(pool.map(self.fitness.calculate_fitness,
models, cv_models,scorers,
xs, ys))[:,0]
elif self.n_jobs == -1:
with Pool() as pool:
fitness_values = np.array(pool.map(self.fitness.calculate_fitness,
models, cv_models,scorers,
xs, ys))[:,0]
return fitness_values
def _optimize(self):
"""
optimize when train and test sets are avaialble
"""
if self.l == self.r:
raise ValueError('L and R cannot be euqal.')
elif self.l > self.r:
"""
starting with an empty list and consequently running SFS and SBS
"""
self.sel_features = []
sel_features = []
features_available = list(range(self.x.shape[1]))
self.best_fits = []
while len(self.sel_features) != self.n_f:
# running Sequential Forward Selection L times
# finding the first best feature
new_indices = [sel_features + [j] for j in features_available]
if len(self.sel_features) == 0:
fitness_values = self._pool_fitness_calc_single(features_available)
else:
fitness_values = self._pool_fitness_calc_multiple(new_indices)
for i in range(self.l):
if self.weight == 1:
attr_index = np.argmax(fitness_values)
elif self.weight == -1:
attr_index = np.argmin(fitness_values)
# appending the feature to the selected feature list
#sel_features.extend([z for z in new_indices[attr_index] if z not in sel_features])
sel_features = new_indices[attr_index]
# deleting the best found feature from the feature indices set
#features_available = [z for z in features_available if z not in new_indices[attr_index]]
features_available.remove(sel_features[-1])
# if last iteration, dont calculate and go to SBS
if i != self.l-1:
new_indices = [sel_features + [j] for j in features_available]
if len(new_indices[0]) != 1:
fitness_values = self._pool_fitness_calc_multiple(new_indices)
else:
fitness_values = self._pool_fitness_calc_single(new_indices)
# running Sequential Backward selection R times
# getting every combination of features available
new_indices = list(itertools.combinations(sel_features,len(sel_features)-1))
if len(new_indices[0]) != 1:
fitness_values = self._pool_fitness_calc_multiple(new_indices)
else:
fitness_values = self._pool_fitness_calc_single(new_indices)
for i in range(self.r):
if self.weight == 1:
attr_index = np.argmax(fitness_values)
elif self.weight == -1:
attr_index = np.argmin(fitness_values)
# appending the feature to the selected feature list so it can be identified later
features_available.extend([z for z in sel_features if z not in new_indices[attr_index]])
# updating sel_features
sel_features = list(new_indices[attr_index])
# if last iteration, dont calculate nad go to SFS
if i != self.r-1:
# recalculating fitness of every combination of remaining feature sets
new_indices = list(itertools.combinations(sel_features,len(sel_features)-1))
if len(new_indices[0]) != 1:
fitness_values = self._pool_fitness_calc_multiple(new_indices)
else:
fitness_values = self._pool_fitness_calc_single(new_indices)
self.sel_features = sel_features
self.sel_features_acc = fitness_values[attr_index]
if self.verbose:
print("Features: {} Fitness_score: {:.4} ".format(self.sel_features,self.sel_features_acc))
self.best_fits.append(self.sel_features_acc)
if len(self.sel_features) == self.l:
self.sel_features = self.sel_features[:self.n_f]
elif self.l < self.r:
"""
starting with a full list and consequently running SBS and SFS
"""
self.sel_features = list(range(self.x.shape[1]))
sel_features = list(range(self.x.shape[1]))
features_available = []
self.best_fits = []
while len(self.sel_features) != self.n_f:
# running Sequential Backward Selection R times
# removing one feature and calculating fitness values using combination
new_indices = list(itertools.combinations(self.sel_features,len(self.sel_features)-1))
fitness_values = self._pool_fitness_calc_multiple(new_indices)
for i in range(self.r):
# the worst feature is the one that when removed, the smallest drop in accuracy is seen
if self.weight == 1:
attr_index =
|
np.argmax(fitness_values)
|
numpy.argmax
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
from logger import *
from models.deeplabv3plus import Deeplab_v3plus
from cityscapes import CityScapes
from configs import config_factory
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.distributed as dist
import os
import sys
import os.path as osp
import logging
import time
import numpy as np
from tqdm import tqdm
import argparse
def parse_args():
parse = argparse.ArgumentParser()
parse.add_argument(
'--local_rank',
dest = 'local_rank',
type = int,
default = -1,
)
return parse.parse_args()
class MscEval(object):
def __init__(self, cfg, *args, **kwargs):
self.cfg = cfg
self.distributed = dist.is_initialized()
## dataloader
dsval = CityScapes(cfg, mode='val')
sampler = None
if self.distributed:
sampler = torch.utils.data.distributed.DistributedSampler(dsval)
self.dl = DataLoader(dsval,
batch_size = cfg.eval_batchsize,
sampler = sampler,
shuffle = False,
num_workers = cfg.eval_n_workers,
drop_last = False)
def __call__(self, net):
## evaluate
hist_size = (self.cfg.n_classes, self.cfg.n_classes)
hist = np.zeros(hist_size, dtype=np.float32)
if dist.is_initialized() and dist.get_rank()!=0:
diter = enumerate(self.dl)
else:
diter = enumerate(tqdm(self.dl))
for i, (imgs, label) in diter:
N, _, H, W = label.shape
probs = torch.zeros((N, self.cfg.n_classes, H, W))
probs.requires_grad = False
for sc in self.cfg.eval_scales:
new_hw = [int(H*sc), int(W*sc)]
with torch.no_grad():
im = F.interpolate(imgs, new_hw, mode='bilinear', align_corners=True)
im = im.cuda()
out = net(im)
out = F.interpolate(out, (H, W), mode='bilinear', align_corners=True)
prob = F.softmax(out, 1)
probs += prob.cpu()
if self.cfg.eval_flip:
out = net(torch.flip(im, dims=(3,)))
out = torch.flip(out, dims=(3,))
out = F.interpolate(out, (H, W), mode='bilinear',
align_corners=True)
prob = F.softmax(out, 1)
probs += prob.cpu()
del out, prob
probs = probs.data.numpy()
preds = np.argmax(probs, axis=1)
hist_once = self.compute_hist(preds, label.data.numpy().squeeze(1))
hist = hist + hist_once
if self.distributed:
hist = torch.tensor(hist).cuda()
dist.all_reduce(hist, dist.ReduceOp.SUM)
hist = hist.cpu().numpy().astype(np.float32)
IOUs =
|
np.diag(hist)
|
numpy.diag
|
"""
This library collects a bunch of Optimizers inspired by the paper
The older optimizers are stored in Optimizer.py. Those classes are equipped with a `step_simple` function taking in
scores and codes to generate the next batch of codes.
"""
# from matplotlib import use as use_backend
# use_backend("Agg")
import matplotlib.pylab as plt
# plt.ioff()
#
import os
import time
import sys
# import utils
import numpy as np
from numpy.linalg import norm
from numpy.random import randn
from numpy import sqrt, zeros, abs, floor, log, log2, eye, exp
from geometry_utils import ExpMap, VecTransport, radial_proj, orthogonalize, renormalize
orig_stdout = sys.stdout
#%% Classic Optimizers as Reference
class CholeskyCMAES:
""" Note this is a variant of CMAES Cholesky suitable for high dimensional optimization"""
def __init__(self, space_dimen, population_size=None, init_sigma=3.0, init_code=None, Aupdate_freq=10,
maximize=True, random_seed=None, optim_params={}):
N = space_dimen
self.space_dimen = space_dimen
# Overall control parameter
self.maximize = maximize # if the program is to maximize or to minimize
# Strategy parameter setting: Selection
if population_size is None:
self.lambda_ = int(4 + floor(3 * log2(N))) # population size, offspring number
# the relation between dimension and population size.
else:
self.lambda_ = population_size # use custom specified population size
mu = self.lambda_ / 2 # number of parents/points for recombination
# Select half the population size as parents
weights = log(mu + 1 / 2) - (log(np.arange(1, 1 + floor(mu)))) # muXone array for weighted recombination
self.mu = int(floor(mu))
self.weights = weights / sum(weights) # normalize recombination weights array
mueff = self.weights.sum() ** 2 / sum(self.weights ** 2) # variance-effectiveness of sum w_i x_i
self.weights.shape = (1, -1) # Add the 1st dim 1 to the weights mat
self.mueff = mueff # add to class variable
self.sigma = init_sigma # Note by default, sigma is None here.
print("Space dimension: %d, Population size: %d, Select size:%d, Optimization Parameters:\nInitial sigma: %.3f"
% (self.space_dimen, self.lambda_, self.mu, self.sigma))
# Strategy parameter settiself.weightsng: Adaptation
self.cc = 4 / (N + 4) # defaultly 0.0009756
self.cs = sqrt(mueff) / (sqrt(mueff) + sqrt(N)) # 0.0499
self.c1 = 2 / (N + sqrt(2)) ** 2 # 1.1912701410022985e-07
if "cc" in optim_params.keys(): # if there is outside value for these parameter, overwrite them
self.cc = optim_params["cc"]
if "cs" in optim_params.keys():
self.cs = optim_params["cs"]
if "c1" in optim_params.keys():
self.c1 = optim_params["c1"]
self.damps = 1 + self.cs + 2 * max(0, sqrt((mueff - 1) / (N + 1)) - 1) # damping for sigma usually close to 1
print("cc=%.3f, cs=%.3f, c1=%.3f damps=%.3f" % (self.cc, self.cs, self.c1, self.damps))
if init_code is not None:
self.init_x = np.asarray(init_code)
self.init_x.shape = (1, N)
else:
self.init_x = None # FIXED Nov. 1st
self.xmean = zeros((1, N))
self.xold = zeros((1, N))
# Initialize dynamic (internal) strategy parameters and constants
self.pc = zeros((1, N))
self.ps = zeros((1, N)) # evolution paths for C and sigma
self.A = eye(N, N) # covariant matrix is represent by the factors A * A '=C
self.Ainv = eye(N, N)
self.eigeneval = 0 # track update of B and D
self.counteval = 0
if Aupdate_freq is None:
self.update_crit = self.lambda_ / self.c1 / N / 10
else:
self.update_crit = Aupdate_freq * self.lambda_
self.chiN = sqrt(N) * (1 - 1 / (4 * N) + 1 / (21 * N ** 2))
# expectation of ||N(0,I)|| == norm(randn(N,1)) in 1/N expansion formula
self._istep = 0
def step_simple(self, scores, codes):
""" Taking scores and codes to return new codes, without generating images
Used in cases when the images are better handled in outer objects like Experiment object
"""
# Note it's important to decide which variable is to be saved in the `Optimizer` object
# Note to confirm with other code, this part is transposed.
# set short name for everything to simplify equations
N = self.space_dimen
lambda_, mu, mueff, chiN = self.lambda_, self.mu, self.mueff, self.chiN
cc, cs, c1, damps = self.cc, self.cs, self.c1, self.damps
sigma, A, Ainv, ps, pc, = self.sigma, self.A, self.Ainv, self.ps, self.pc,
# Sort by fitness and compute weighted mean into xmean
if self.maximize is False:
code_sort_index = np.argsort( scores) # add - operator it will do maximization.
else:
code_sort_index = np.argsort(-scores)
# scores = scores[code_sort_index] # Ascending order. minimization
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
if self.init_x is None:
select_n = len(code_sort_index[0:mu])
temp_weight = self.weights[:, :select_n] / np.sum(self.weights[:, :select_n]) # in case the codes is not enough
self.xmean = temp_weight @ codes[code_sort_index[0:mu], :]
else:
self.xmean = self.init_x
else:
self.xold = self.xmean
self.xmean = self.weights @ codes[code_sort_index[0:mu], :] # Weighted recombination, new mean value
# Cumulation statistics through steps: Update evolution paths
randzw = self.weights @ self.randz[code_sort_index[0:mu], :]
ps = (1 - cs) * ps + sqrt(cs * (2 - cs) * mueff) * randzw
pc = (1 - cc) * pc + sqrt(cc * (2 - cc) * mueff) * randzw @ A
# Adapt step size sigma
sigma = sigma * exp((cs / damps) * (norm(ps) / chiN - 1))
# self.sigma = self.sigma * exp((self.cs / self.damps) * (norm(ps) / self.chiN - 1))
print("sigma: %.2f" % sigma)
# Update A and Ainv with search path
if self.counteval - self.eigeneval > self.update_crit: # to achieve O(N ^ 2) do decomposition less frequently
self.eigeneval = self.counteval
t1 = time.time()
v = pc @ Ainv
normv = v @ v.T
# Directly update the A Ainv instead of C itself
A = sqrt(1 - c1) * A + sqrt(1 - c1) / normv * (
sqrt(1 + normv * c1 / (1 - c1)) - 1) * v.T @ pc # FIXME, dimension error, # FIXED aug.13th
Ainv = 1 / sqrt(1 - c1) * Ainv - 1 / sqrt(1 - c1) / normv * (
1 - 1 / sqrt(1 + normv * c1 / (1 - c1))) * Ainv @ v.T @ v
t2 = time.time()
print("A, Ainv update! Time cost: %.2f s" % (t2 - t1))
# Generate new sample by sampling from Gaussian distribution
new_samples = zeros((self.lambda_, N))
self.randz = randn(self.lambda_, N) # save the random number for generating the code.
for k in range(self.lambda_):
new_samples[k:k + 1, :] = self.xmean + sigma * (self.randz[k, :] @ A) # m + sig * Normal(0,C)
# Clever way to generate multivariate gaussian!!
# Stretch the guassian hyperspher with D and transform the
# ellipsoid by B mat linear transform between coordinates
self.counteval += 1
self.sigma, self.A, self.Ainv, self.ps, self.pc = sigma, A, Ainv, ps, pc,
self._istep += 1
return new_samples
#%% Optimizers that use pre-computed Hessian information
class HessCMAES:
""" Note this is a variant of CMAES Cholesky suitable for high dimensional optimization"""
def __init__(self, space_dimen, population_size=None, cutoff=None, init_sigma=3.0, init_code=None, Aupdate_freq=10, maximize=True, random_seed=None, optim_params={}):
if cutoff is None: cutoff = space_dimen
N = cutoff
self.code_len = space_dimen
self.space_dimen = cutoff # Overall control parameter
self.maximize = maximize # if the program is to maximize or to minimize
# Strategy parameter setting: Selection
if population_size is None:
self.lambda_ = int(4 + floor(3 * log2(N))) # population size, offspring number
# the relation between dimension and population size.
else:
self.lambda_ = population_size # use custom specified population size
mu = self.lambda_ / 2 # number of parents/points for recombination
# Select half the population size as parents
weights = log(mu + 1 / 2) - (log(np.arange(1, 1 + floor(mu)))) # muXone array for weighted recombination
self.mu = int(floor(mu))
self.weights = weights / sum(weights) # normalize recombination weights array
mueff = self.weights.sum() ** 2 / sum(self.weights ** 2) # variance-effectiveness of sum w_i x_i
self.weights.shape = (1, -1) # Add the 1st dim 1 to the weights mat
self.mueff = mueff # add to class variable
self.sigma = init_sigma # Note by default, sigma is None here.
print("Space dimension: %d, Population size: %d, Select size:%d, Optimization Parameters:\nInitial sigma: %.3f"
% (self.space_dimen, self.lambda_, self.mu, self.sigma))
# Strategy parameter settiself.weightsng: Adaptation
self.cc = 4 / (N + 4) # defaultly 0.0009756
self.cs = sqrt(mueff) / (sqrt(mueff) + sqrt(N)) # 0.0499
self.c1 = 2 / (N + sqrt(2)) ** 2 # 1.1912701410022985e-07
if "cc" in optim_params.keys(): # if there is outside value for these parameter, overwrite them
self.cc = optim_params["cc"]
if "cs" in optim_params.keys():
self.cs = optim_params["cs"]
if "c1" in optim_params.keys():
self.c1 = optim_params["c1"]
self.damps = 1 + self.cs + 2 * max(0, sqrt((mueff - 1) / (N + 1)) - 1) # damping for sigma usually close to 1
print("cc=%.3f, cs=%.3f, c1=%.3f damps=%.3f" % (self.cc, self.cs, self.c1, self.damps))
if init_code is not None:
self.init_x = np.asarray(init_code).reshape(1,-1)
# if self.init_x.shape[1] == space_dimen:
# self.projection = True
# elif self.init_x.shape[1] == cutoff:
# self.projection = False
# else:
# raise ValueError
else:
self.init_x = None # FIXED Nov. 1st
self.xmean = zeros((1, N))
self.xold = zeros((1, N))
# Initialize dynamic (internal) strategy parameters and constants
self.pc = zeros((1, space_dimen))
self.ps = zeros((1, N)) # evolution paths for C and sigma
self.A = eye(N, space_dimen, ) # covariant matrix is represent by the factors A * A '=C
self.Ainv = eye(space_dimen, N, )
self.eigeneval = 0 # track update of B and D
self.counteval = 0
if Aupdate_freq is None:
self.update_crit = self.lambda_ / self.c1 / N / 10
else:
self.update_crit = Aupdate_freq * self.lambda_
self.chiN = sqrt(N) * (1 - 1 / (4 * N) + 1 / (21 * N ** 2))
# expectation of ||N(0,I)|| == norm(randn(N,1)) in 1/N expansion formula
self._istep = 0
def set_Hessian(self, eigvals, eigvects, cutoff=None, expon=1/2.5):
cutoff = self.space_dimen
self.eigvals = eigvals[:cutoff]
self.eigvects = eigvects[:, :cutoff]
self.scaling = self.eigvals ** (-expon)
self.A = self.scaling[:,np.newaxis] * self.eigvects.T # cutoff by spacedimen
self.Ainv = (1 / self.scaling[np.newaxis,:]) * self.eigvects # spacedimen by cutoff
# if self.projection:
# self.init_x = self.init_x @ self.Ainv
def step_simple(self, scores, codes):
""" Taking scores and codes to return new codes, without generating images
Used in cases when the images are better handled in outer objects like Experiment object
"""
# Note it's important to decide which variable is to be saved in the `Optimizer` object
# Note to confirm with other code, this part is transposed.
# set short name for everything to simplify equations
N = self.space_dimen
lambda_, mu, mueff, chiN = self.lambda_, self.mu, self.mueff, self.chiN
cc, cs, c1, damps = self.cc, self.cs, self.c1, self.damps
sigma, A, Ainv, ps, pc, = self.sigma, self.A, self.Ainv, self.ps, self.pc,
# Sort by fitness and compute weighted mean into xmean
if self.maximize is False:
code_sort_index = np.argsort( scores) # add - operator it will do maximization.
else:
code_sort_index = np.argsort(-scores)
# scores = scores[code_sort_index] # Ascending order. minimization
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
if self.init_x is None:
select_n = len(code_sort_index[0:mu])
temp_weight = self.weights[:, :select_n] / np.sum(self.weights[:, :select_n]) # in case the codes is not enough
self.xmean = temp_weight @ codes[code_sort_index[0:mu], :]
else:
self.xmean = self.init_x
else:
self.xold = self.xmean
self.xmean = self.weights @ codes[code_sort_index[0:mu], :] # Weighted recombination, new mean value
# Cumulation statistics through steps: Update evolution paths
randzw = self.weights @ self.randz[code_sort_index[0:mu], :]
ps = (1 - cs) * ps + sqrt(cs * (2 - cs) * mueff) * randzw
pc = (1 - cc) * pc + sqrt(cc * (2 - cc) * mueff) * randzw @ A
# Adapt step size sigma
sigma = sigma * exp((cs / damps) * (norm(ps) / chiN - 1))
# self.sigma = self.sigma * exp((self.cs / self.damps) * (norm(ps) / self.chiN - 1))
print("sigma: %.2f" % sigma)
# Update A and Ainv with search path
if self.counteval - self.eigeneval > self.update_crit: # to achieve O(N ^ 2) do decomposition less frequently
self.eigeneval = self.counteval
t1 = time.time()
v = pc @ Ainv # (1, spacedimen) * (spacedimen, N) -> (1,N)
normv = v @ v.T
# Directly update the A Ainv instead of C itself
A = sqrt(1 - c1) * A + sqrt(1 - c1) / normv * (
sqrt(1 + normv * c1 / (1 - c1)) - 1) * v.T @ pc # FIXME, dimension error
Ainv = 1 / sqrt(1 - c1) * Ainv - 1 / sqrt(1 - c1) / normv * (
1 - 1 / sqrt(1 + normv * c1 / (1 - c1))) * Ainv @ v.T @ v
t2 = time.time()
print("A, Ainv update! Time cost: %.2f s" % (t2 - t1))
# Generate new sample by sampling from Gaussian distribution
new_samples = zeros((self.lambda_, N))
self.randz = randn(self.lambda_, N) # save the random number for generating the code.
new_samples = self.xmean + sigma * self.randz @ A
self.counteval += self.lambda_
# Clever way to generate multivariate gaussian!!
# Stretch the guassian hyperspher with D and transform the
# ellipsoid by B mat linear transform between coordinates
self.sigma, self.A, self.Ainv, self.ps, self.pc = sigma, A, Ainv, ps, pc,
self._istep += 1
return new_samples
#%% New Optimizers from the paper.
class HessAware_ADAM:
def __init__(self, space_dimen, population_size=40, lr=0.1, mu=1, nu=0.9, maximize=True):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
self.mu = mu # scale of estimating gradient
self.nu = nu # update rate for D
self.lr = lr # learning rate (step size) of moving along gradient
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.D = np.ones((1, self.dimen)) # running average of gradient square
self.Hdiag = np.ones((1, self.dimen)) # Diagonal of estimated Hessian
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros((self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}
self.xcur = np.zeros((1, self.dimen)) # current base point
self.xnew = np.zeros((1, self.dimen)) # new base point
self.fcur = 0 # f(xcur)
self.fnew = 0 # f(xnew)
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
def step_simple(self, scores, codes):
''' Assume the 1st row of codes is the xnew new starting point '''
# set short name for everything to simplify equations
N = self.dimen
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
self.xcur = codes[0:1, :]
self.xnew = codes[0:1, :]
else:
# self.xcur = self.xnew # should be same as following
self.xcur = codes[0:1, :]
self.weights = (scores - scores[0]) / self.mu
HAgrad = self.weights[1:] @ (codes[1:] - self.xcur) / self.B # it doesn't matter if it includes the 0 row!
if self.maximize is True:
self.xnew = self.xcur + self.lr * HAgrad # add - operator it will do maximization.
else:
self.xnew = self.xcur - self.lr * HAgrad
self.D = self.nu * self.D + (1 - self.nu) * HAgrad ** 2 # running average of gradient square # Missing square before
self.Hdiag = self.D / (1 - self.nu ** self._istep) # Diagonal of estimated Hessian
# Generate new sample by sampling from Gaussian distribution
new_samples = zeros((self.B + 1, N))
self.innerU = randn(self.B, N) # save the random number for generating the code.
self.outerV = self.innerU / sqrt(self.Hdiag) # H^{-1/2}U
new_samples[0:1, :] = self.xnew
new_samples[1: , :] = self.xnew + self.mu * self.outerV # m + sig * Normal(0,C)
self._istep += 1
return new_samples
#%%
class HessAware_Gauss:
"""Gaussian Sampling method for estimating Hessian"""
def __init__(self, space_dimen, population_size=40, lr=0.1, mu=1, Lambda=0.9, Hupdate_freq=5, maximize=True):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
self.mu = mu # scale of the Gaussian distribution to estimate gradient
assert Lambda > 0
self.Lambda = Lambda # diagonal regularizer for Hessian matrix
self.lr = lr # learning rate (step size) of moving along gradient
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros((self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}, equals self.innerU @ H^{-1/2}
self.xcur = np.zeros((1, self.dimen)) # current base point
self.xnew = np.zeros((1, self.dimen)) # new base point
self.fcur = 0 # f(xcur)
self.fnew = 0 # f(xnew)
self.Hupdate_freq = int(Hupdate_freq) # Update Hessian (add additional samples every how many generations)
self.HB = population_size # Batch size of samples to estimate Hessian, can be different from self.B
self.HinnerU = np.zeros((self.HB, self.dimen)) # sample deviation vectors for Hessian construction
# SVD of the weighted HinnerU for Hessian construction
self.HessUC = np.zeros((self.HB, self.dimen)) # Basis vector for the linear subspace defined by the samples
self.HessD = np.zeros(self.HB) # diagonal values of the Lambda matrix
self.HessV = np.zeros((self.HB, self.HB)) # seems not used....
self.HUDiag = np.zeros(self.HB)
self.hess_comp = False
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
def step_hessian(self, scores):
'''Currently only use part of the samples to estimate hessian, maybe need more '''
fbasis = scores[0]
fpos = scores[-2*self.HB:-self.HB]
fneg = scores[-self.HB:]
weights = abs((fpos + fneg - 2 * fbasis) / 2 / self.mu ** 2 / self.HB) # use abs to enforce positive definiteness
C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# H = C^TC + Lambda * I
self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
print("Hessian Samples Spectrum", self.HessD)
print("Hessian Samples Full Power:%f \nLambda:%f" % ((self.HessD ** 2).sum(), self.Lambda) )
def step_simple(self, scores, codes):
''' Assume the 1st row of codes is the xnew new starting point '''
# set short name for everything to simplify equations
N = self.dimen
if self.hess_comp: # if this flag is True then more samples have been added to the trial
self.step_hessian(scores)
# you should only get images for gradient estimation, get rid of the Hessian samples, or make use of it to estimate gradient
codes = codes[:self.B+1, :]
scores = scores[:self.B+1]
self.hess_comp = False
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
self.xcur = codes[0:1, :]
self.xnew = codes[0:1, :]
else:
# self.xcur = self.xnew # should be same as following line
self.xcur = codes[0:1, :]
self.weights = (scores - scores[0]) / self.mu
# estimate gradient from the codes and scores
HAgrad = self.weights[1:] @ (codes[1:] - self.xcur) / self.B # it doesn't matter if it includes the 0 row!
print("Estimated Gradient Norm %f"%np.linalg.norm(HAgrad))
if self.maximize is True:
self.xnew = self.xcur + self.lr * HAgrad # add - operator it will do maximization.
else:
self.xnew = self.xcur - self.lr * HAgrad
# Generate new sample by sampling from Gaussian distribution
new_samples = zeros((self.B + 1, N))
self.innerU = randn(self.B, N) # Isotropic gaussian distributions
self.outerV = self.innerU / sqrt(self.Lambda) + ((self.innerU @ self.HessUC.T) * self.HUDiag) @ self.HessUC # H^{-1/2}U
new_samples[0:1, :] = self.xnew
new_samples[1: , :] = self.xnew + self.mu * self.outerV # m + sig * Normal(0,C)
if self._istep % self.Hupdate_freq == 0:
# add more samples to next batch for hessian computation
self.hess_comp = True
self.HinnerU = randn(self.HB, N)
H_pos_samples = self.xnew + self.mu * self.HinnerU
H_neg_samples = self.xnew - self.mu * self.HinnerU
new_samples = np.concatenate((new_samples, H_pos_samples, H_neg_samples), axis=0)
self._istep += 1
return new_samples
def rankweight(lambda_, mu=None):
""" Rank weight inspired by CMA-ES code
mu is the cut off number, how many samples will be kept while `lambda_ - mu` will be ignore
"""
if mu is None:
mu = lambda_ / 2 # number of parents/points for recombination
# Defaultly Select half the population size as parents
weights = zeros(int(lambda_))
mu_int = int(floor(mu))
weights[:mu_int] = log(mu + 1 / 2) - (log(np.arange(1, 1 + floor(mu)))) # muXone array for weighted recombination
weights = weights / sum(weights)
return weights
# Major Classes.
class HessAware_Gauss_Spherical:
"""Gaussian Sampling method for estimating Hessian"""
def __init__(self, space_dimen, population_size=40, lr=0.1, mu=1, Lambda=0.9, Hupdate_freq=5,
sphere_norm=300, maximize=True, rankweight=False):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
self.mu = mu # scale of the Gaussian distribution to estimate gradient
assert Lambda > 0
self.Lambda = Lambda # diagonal regularizer for Hessian matrix
self.lr = lr # learning rate (step size) of moving along gradient
self.sphere_norm = sphere_norm
self.tang_codes = zeros((self.B, self.dimen))
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros(
(self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}, equals self.innerU @ H^{-1/2}
self.xcur = np.zeros((1, self.dimen)) # current base point
self.xnew = np.zeros((1, self.dimen)) # new base point
self.fcur = 0 # f(xcur)
self.fnew = 0 # f(xnew)
self.Hupdate_freq = int(Hupdate_freq) # Update Hessian (add additional samples every how many generations)
self.HB = population_size # Batch size of samples to estimate Hessian, can be different from self.B
self.HinnerU = np.zeros((self.HB, self.dimen)) # sample deviation vectors for Hessian construction
# SVD of the weighted HinnerU for Hessian construction
self.HessUC = np.zeros((self.HB, self.dimen)) # Basis vector for the linear subspace defined by the samples
self.HessD = np.zeros(self.HB) # diagonal values of the Lambda matrix
self.HessV = np.zeros((self.HB, self.HB)) # seems not used....
self.HUDiag = np.zeros(self.HB)
self.hess_comp = False
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
self.rankweight = rankweight # Switch between using raw score as weight VS use rank weight as score
print(
"Spereical Space dimension: %d, Population size: %d, Optimization Parameters:\n Exploration: %.3f\n Learning rate: %.3f"
% (self.dimen, self.B, self.mu, self.lr))
if self.rankweight:
if select_cutoff is None:
self.select_cutoff = int(population_size / 2)
else:
self.select_cutoff = select_cutoff
print("Using rank weight, selection size: %d\n" % self.select_cutoff)
def step_hessian(self, scores):
'''Currently not implemented in Spherical Version.'''
fbasis = scores[0]
fpos = scores[-2 * self.HB:-self.HB]
fneg = scores[-self.HB:]
weights = abs(
(fpos + fneg - 2 * fbasis) / 2 / self.mu ** 2 / self.HB) # use abs to enforce positive definiteness
C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# H = C^TC + Lambda * I
self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
print("Hessian Samples Spectrum", self.HessD)
print("Hessian Samples Full Power:%f \nLambda:%f" % ((self.HessD ** 2).sum(), self.Lambda))
def step_simple(self, scores, codes):
''' Assume the 1st row of codes is the xnew new starting point '''
# set short name for everything to simplify equations
N = self.dimen
if self.hess_comp: # if this flag is True then more samples have been added to the trial
self.step_hessian(scores)
# you should only get images for gradient estimation, get rid of the Hessian samples, or make use of it to estimate gradient
codes = codes[:self.B + 1, :]
scores = scores[:self.B + 1]
self.hess_comp = False
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
print('First generation\n')
self.xcur = codes[0:1, :]
self.xnew = codes[0:1, :]
# No reweighting as there should be a single code
else:
# self.xcur = self.xnew # should be same as following line
self.xcur = codes[0:1, :]
if self.rankweight is False: # use the score difference as weight
# B normalizer should go here larger cohort of codes gives more estimates
self.weights = (scores[1:] - scores[0]) / self.B # / self.mu
else: # use a function of rank as weight, not really gradient.
if self.maximize is False: # note for weighted recombination, the maximization flag is here.
code_rank = np.argsort(np.argsort(scores[1:])) # add - operator it will do maximization.
else:
code_rank = np.argsort(np.argsort(-scores[1:]))
# Consider do we need to consider the basis code and score here? Or no?
# Note the weights here are internally normalized s.t. sum up to 1, no need to normalize more.
self.weights = rankweight(len(scores) - 1, mu=self.select_cutoff)[
code_rank] # map the rank to the corresponding weight of recombination
# estimate gradient from the codes and scores
# HAgrad = self.weights[1:] @ (codes[1:] - self.xcur) / self.B # it doesn't matter if it includes the 0 row!
HAgrad = self.weights[np.newaxis, :] @ self.tang_codes
print("Estimated Gradient Norm %f" % np.linalg.norm(HAgrad))
if self.rankweight is False:
if self.maximize is True:
self.xnew = ExpMap(self.xcur, self.lr * HAgrad) # add - operator it will do maximization.
else:
self.xnew = ExpMap(self.xcur, - self.lr * HAgrad)
else:
self.xnew = ExpMap(self.xcur, self.lr * HAgrad)
# vtan_new = VecTransport(self.xcur, self.xnew, vtan_old)
# uni_vtan_old = vtan_old / np.linalg.norm(vtan_old);
# uni_vtan_new = vtan_new / np.linalg.norm(vtan_new); # uniform the tangent vector
# Generate new sample by sampling from Gaussian distribution
self.tang_codes = zeros((self.B, N)) # Tangent vectors of exploration
new_samples = zeros((self.B + 1, N))
self.innerU = randn(self.B, N) # Isotropic gaussian distributions
self.outerV = self.innerU / sqrt(self.Lambda) + (
(self.innerU @ self.HessUC.T) * self.HUDiag) @ self.HessUC # H^{-1/2}U
new_samples[0:1, :] = self.xnew
self.tang_codes[:, :] = self.mu * self.outerV # m + sig * Normal(0,C)
new_samples[1:, ] = ExpMap(self.xnew, self.tang_codes)
if (self._istep + 1) % self.Hupdate_freq == 0:
# add more samples to next batch for hessian computation
self.hess_comp = True
self.HinnerU = randn(self.HB, N)
H_pos_samples = self.xnew + self.mu * self.HinnerU
H_neg_samples = self.xnew - self.mu * self.HinnerU
new_samples = np.concatenate((new_samples, H_pos_samples, H_neg_samples), axis=0)
self._istep += 1
self._curr_samples = new_samples / norm(new_samples, axis=1)[:, np.newaxis] * self.sphere_norm
return self._curr_samples
class HessAware_Gauss_Cylind:
""" Cylindrical Evolution, Both angular and radial. """
def __init__(self, space_dimen, population_size=40, population_kept=None, lr_norm=0.5, mu_norm=5, lr_sph=2,
mu_sph=0.005,
Lambda=1, Hupdate_freq=201, max_norm=300, maximize=True, rankweight=False):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
assert Lambda > 0
self.Lambda = Lambda # diagonal regularizer for Hessian matrix
self.lr_norm = lr_norm # learning rate (step size) of moving along gradient
self.mu_norm = mu_norm # scale of the Gaussian distribution to estimate gradient
self.lr_sph = lr_sph
self.mu_sph = mu_sph
self.sphere_flag = True # initialize the whole system as linear?
self.max_norm = max_norm
self.tang_codes = zeros((self.B, self.dimen))
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros(
(self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}, equals self.innerU @ H^{-1/2}
self.xcur = np.zeros((1, self.dimen)) # current base point
self.xnew = np.zeros((1, self.dimen)) # new base point
self.fcur = 0 # f(xcur)
self.fnew = 0 # f(xnew)
self.Hupdate_freq = int(Hupdate_freq) # Update Hessian (add additional samples every how many generations)
self.HB = population_size # Batch size of samples to estimate Hessian, can be different from self.B
self.HinnerU = np.zeros((self.HB, self.dimen)) # sample deviation vectors for Hessian construction
# SVD of the weighted HinnerU for Hessian construction
self.HessUC = np.zeros((self.HB, self.dimen)) # Basis vector for the linear subspace defined by the samples
self.HessD = np.zeros(self.HB) # diagonal values of the Lambda matrix
self.HessV = np.zeros((self.HB, self.HB)) # seems not used....
self.HUDiag = np.zeros(self.HB)
self.hess_comp = False
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
self.rankweight = rankweight # Switch between using raw score as weight VS use rank weight as score
print("Spereical Space dimension: %d, Population size: %d, Optimization Parameters:\n"
"Norm Exploration Range %.3f Learning rate: %.3f\n Angular Exploration Range:%.3f Learning Rate: %.3f"
% (self.dimen, self.B, self.mu_norm, self.lr_norm, self.mu_sph, self.lr_sph))
if rankweight:
self.BKeep = population_kept if population_kept is not None else int(self.B // 2)
print("Using rank based weights. Keep population size: %d" % (self.BKeep))
def step_hessian(self, scores):
''' Currently not implemented in Spherical Version. '''
raise NotImplementedError
# fbasis = scores[0]
# fpos = scores[-2 * self.HB:-self.HB]
# fneg = scores[-self.HB:]
# weights = abs(
# (fpos + fneg - 2 * fbasis) / 2 / self.mu ** 2 / self.HB) # use abs to enforce positive definiteness
# C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# # H = C^TC + Lambda * I
# self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
# self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
# print("Hessian Samples Spectrum", self.HessD)
# print("Hessian Samples Full Power:%f \nLambda:%f" % ((self.HessD ** 2).sum(), self.Lambda))
def step_simple(self, scores, codes):
''' Assume the 1st row of codes is the xnew new starting point '''
# set short name for everything to simplify equations
N = self.dimen
if self.hess_comp: # if this flag is True then more samples have been added to the trial
raise NotImplementedError
self.step_hessian(scores)
# you should only get images for gradient estimation, get rid of the Hessian samples, or make use of it to estimate gradient
codes = codes[:self.B + 1, :]
scores = scores[:self.B + 1]
self.hess_comp = False
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
print('First generation\n')
self.xcur = codes[0:1, :]
self.xnew = codes[0:1, :]
# No reweighting as there should be a single code
else:
# self.xcur = self.xnew # should be same as following line
self.xcur = codes[0:1, :]
if self.rankweight is False: # use the score difference as weight
# B normalizer should go here larger cohort of codes gives more estimates
self.weights = (scores[:] - scores[0]) / self.B # / self.mu
else: # use a function of rank as weight, not really gradient.
if self.maximize is False: # note for weighted recombination, the maximization flag is here.
code_rank = np.argsort(np.argsort(scores[:])) # add - operator it will do maximization.
else:
code_rank = np.argsort(np.argsort(-scores[:]))
# Consider do we need to consider the basis code and score here? Or no?
# Note the weights here are internally normalized s.t. sum up to 1, no need to normalize more.
self.weights = rankweight(len(scores), mu=self.BKeep)[code_rank]
# map the rank to the corresponding weight of recombination
# estimate gradient from the codes and scores
# HAgrad = self.weights[1:] @ (codes[1:] - self.xcur) / self.B # it doesn't matter if it includes the 0 row!
tang_codes_aug = np.concatenate((np.zeros((1, self.tang_codes.shape[1])), self.tang_codes), axis=0)
HAgrad = self.weights[np.newaxis,
:] @ tang_codes_aug # self.tang_codes # Changed to take the current location into account.
normgrad = self.weights[np.newaxis, 1:] @ (self.code_norms - norm(self.xcur)) # Recombine norms to get,
print("Estimated Angular Gradient Norm %f" % norm(HAgrad))
print("Estimated Radial Gradient Norm %f" % normgrad)
mov_sign = -1 if (not self.maximize) and (not self.rankweight) else 1
normnew = np.minimum(self.max_norm, norm(
self.xcur) + mov_sign * self.lr_norm * normgrad) # use the new norm to normalize ynew
self.xnew = ExpMap(self.xcur, mov_sign * self.lr_sph * HAgrad) # add - operator it will do maximization.
self.xnew = renormalize(self.xnew, normnew)
# Generate new sample by sampling from Gaussian distribution
self.innerU = randn(self.B, N) # Isotropic gaussian distributions
self.outerV = self.innerU / sqrt(self.Lambda) + (
(self.innerU @ self.HessUC.T) * self.HUDiag) @ self.HessUC # H^{-1/2}U
self.tang_codes = self.mu_sph * self.outerV # m + sig * Normal(0,C)
self.tang_codes = orthogonalize(self.xnew, self.tang_codes) # Tangent vectors of exploration
new_norms = norm(self.xnew) + self.mu_norm * randn(self.B)
new_norms = np.minimum(self.max_norm, new_norms)
new_samples = zeros((self.B + 1, N))
new_samples[0:1, :] = self.xnew
new_samples[1:, ] = ExpMap(self.xnew, self.tang_codes)
new_samples[1:, ] = renormalize(new_samples[1:, ], new_norms)
print("norm of new samples", norm(new_samples, axis=1))
self.code_norms = new_norms # doesn't include the norm of the basis vector.
if (self._istep + 1) % self.Hupdate_freq == 0:
# add more samples to next batch for hessian computation
self.hess_comp = True
self.HinnerU = randn(self.HB, N)
H_pos_samples = self.xnew + self.mu * self.HinnerU
H_neg_samples = self.xnew - self.mu * self.HinnerU
new_samples = np.concatenate((new_samples, H_pos_samples, H_neg_samples), axis=0)
self._istep += 1
return new_samples
#%
class HessEstim_Gauss:
"""Code to generate samples and estimate Hessian from it"""
def __init__(self, space_dimen):
self.dimen = space_dimen
self.HB = 0
self.std = 2
def GaussSampling(self, xmean, batch=100, std=2):
xmean = xmean.reshape(1, -1)
self.std = std
self.HB = batch
self.HinnerU = randn(self.HB, self.dimen) # / sqrt(self.dimen) # make it unit var along the code vector dimension
H_pos_samples = xmean + self.std * self.HinnerU
H_neg_samples = xmean - self.std * self.HinnerU
new_samples = np.concatenate((xmean, H_pos_samples, H_neg_samples), axis=0)
return new_samples
def HessEstim(self, scores):
fbasis = scores[0]
fpos = scores[-2 * self.HB:-self.HB]
fneg = scores[-self.HB:]
weights = abs(
(fpos + fneg - 2 * fbasis) / 2 / self.std ** 2 / self.HB) # use abs to enforce positive definiteness
C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# H = C^TC + Lambda * I
self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
# self.HessV.shape = (HB, HB); self.HessD.shape = (HB,), self.HessUC.shape = (HB, dimen)
# self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
print("Hessian Samples Spectrum", self.HessD)
print("Hessian Samples Full Power:%f" % ((self.HessD ** 2).sum()))
return self.HessV, self.HessD, self.HessUC
#%
class HessAware_Gauss_DC:
"""Gaussian Sampling method for estimating Hessian"""
def __init__(self, space_dimen, population_size=40, lr=0.1, mu=1, Lambda=0.9, Hupdate_freq=5,
maximize=True, max_norm=300, rankweight=False, nat_grad=False):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
self.mu = mu # scale of the Gaussian distribution to estimate gradient
assert Lambda > 0
self.Lambda = Lambda # diagonal regularizer for Hessian matrix
self.lr = lr # learning rate (step size) of moving along gradient
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros((self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}, equals self.innerU @ H^{-1/2}
self.xnew = np.zeros((1, self.dimen)) # new base point
self.xscore = 0
self.Hupdate_freq = int(Hupdate_freq) # Update Hessian (add additional samples every how many generations)
self.HB = population_size # Batch size of samples to estimate Hessian, can be different from self.B
self.HinnerU = np.zeros((self.HB, self.dimen)) # sample deviation vectors for Hessian construction
# SVD of the weighted HinnerU for Hessian construction
self.HessUC = np.zeros((self.HB, self.dimen)) # Basis vector for the linear subspace defined by the samples
self.HessD = np.zeros(self.HB) # diagonal values of the Lambda matrix
self.HessV = np.zeros((self.HB, self.HB)) # seems not used....
self.HUDiag = np.zeros(self.HB)
self.hess_comp = False
self._istep = 0 # step counter
self.maximize = maximize # maximize / minimize the function
self.code_stored = np.array([]).reshape((0, self.dimen))
self.score_stored = np.array([])
self.N_in_samp = 0
self.max_norm = max_norm
self.nat_grad = nat_grad # use the natural gradient definition, or normal gradient.
self.rankweight = rankweight
def new_generation(self, init_score, init_code):
self.xscore = init_score
self.score_stored = np.array([])
self.xnew = init_code
self.code_stored = np.array([]).reshape((0, self.dimen))
self.N_in_samp = 0
def compute_hess(self, scores, Lambda_Frac=100):
'''Currently only use part of the samples to estimate hessian, maybe need more '''
fbasis = self.xscore
fpos = scores[:self.HB]
fneg = scores[-self.HB:]
weights = abs((fpos + fneg - 2 * fbasis) / 2 / self.mu ** 2 / self.HB) # use abs to enforce positive definiteness
C = sqrt(weights[:, np.newaxis]) * self.HinnerU # or the sqrt may not work.
# H = C^TC + Lambda * I
self.HessV, self.HessD, self.HessUC = np.linalg.svd(C, full_matrices=False)
self.Lambda = (self.HessD ** 2).sum() / Lambda_Frac
self.HUDiag = 1 / sqrt(self.HessD ** 2 + self.Lambda) - 1 / sqrt(self.Lambda)
print("Hessian Samples Spectrum", self.HessD)
print("Hessian Samples Full Power:%f \nLambda:%f" % ((self.HessD ** 2).sum(), self.Lambda) )
def compute_grad(self, scores):
# add the new scores to storage
self.score_stored = np.concatenate((self.score_stored, scores), axis=0) if self.score_stored.size else scores
if self.rankweight is False: # use the score difference as weight
# B normalizer should go here larger cohort of codes gives more estimates
self.weights = (self.score_stored - self.xscore) / self.score_stored.size # / self.mu
# assert(self.N_in_samp == self.score_stored.size)
else: # use a function of rank as weight, not really gradient.
# Note descent check **could be** built into ranking weight?
# If not better just don't give weights to that sample
if self.maximize is False: # note for weighted recombination, the maximization flag is here.
code_rank = np.argsort(np.argsort( self.score_stored)) # add - operator it will do maximization.
else:
code_rank = np.argsort(np.argsort(-self.score_stored))
# Consider do we need to consider the basis code and score here? Or no?
# Note the weights here are internally normalized s.t. sum up to 1, no need to normalize more.
self.weights = rankweight(len(self.score_stored), mu=20)[code_rank] # map the rank to the corresponding weight of recombination
# only keep the top 20 codes and recombine them.
if self.nat_grad: # if or not using the Hessian to rescale the codes
hagrad = self.weights @ (self.code_stored - self.xnew) # /self.mu
else:
Hdcode = self.Lambda * (self.code_stored - self.xnew) + (
((self.code_stored - self.xnew) @ self.HessUC.T) * self.HessD **2) @ self.HessUC
hagrad = self.weights @ Hdcode # /self.mu
print("Gradient Norm %.2f" % (np.linalg.norm(hagrad)))
# if self.rankweight is False:
# if self.maximize:
# ynew = radial_proj(self.xnew + self.lr * hagrad, max_norm=self.max_norm)
# else:
# ynew = radial_proj(self.xnew - self.lr * hagrad, max_norm=self.max_norm)
# else: # if using rankweight, then the maximization if performed in the recombination step.
# ynew = radial_proj(self.xnew + self.lr * hagrad, max_norm=self.max_norm)
mov_sign = -1 if (not self.maximize) and (not self.rankweight) else 1
ynew = radial_proj(self.xnew + mov_sign * self.lr * hagrad, max_norm=self.max_norm)
return ynew
def generate_sample(self, samp_num=None, hess_comp=False):
''' Assume the 1st row of codes is the xnew new starting point '''
N = self.dimen
# Generate new sample by sampling from Gaussian distribution
if hess_comp:
# self.hess_comp = True
self.HinnerU = randn(self.HB, N)
H_pos_samples = self.xnew + self.mu * self.HinnerU
H_neg_samples = self.xnew - self.mu * self.HinnerU
new_samples = np.concatenate((H_pos_samples, H_neg_samples), axis=0)
# new_samples = radial_proj(new_samples, self.max_norm)
else:
new_samples = zeros((samp_num, N))
self.innerU = randn(samp_num, N) # Isotropic gaussian distributions
self.outerV = self.innerU / sqrt(self.Lambda) + (
(self.innerU @ self.HessUC.T) * self.HUDiag) @ self.HessUC # H^{-1/2}U
# new_samples[0:1, :] = self.xnew
new_samples[:, :] = self.xnew + self.mu * self.outerV # m + sig * Normal(0,C) self.mu *
new_samples = radial_proj(new_samples, self.max_norm)
self.code_stored = np.concatenate((self.code_stored, new_samples), axis=0) if self.code_stored.size else new_samples
self.N_in_samp += samp_num
return new_samples
# set short name for everything to simplify equations
#%
class HessAware_ADAM_DC:
"""Gaussian Sampling method for estimating Hessian"""
def __init__(self, space_dimen, population_size=40, lr=0.1, mu=1, nu=0.9, maximize=True, max_norm=300):
self.dimen = space_dimen # dimension of input space
self.B = population_size # population batch size
self.mu = mu # scale of estimating gradient
self.nu = nu # update rate for D
self.lr = lr # learning rate (step size) of moving along gradient
self.grad = np.zeros((1, self.dimen)) # estimated gradient
self.D = np.ones((1, self.dimen)) # running average of gradient square
self.Hdiag = np.ones((1, self.dimen)) # Diagonal of estimated Hessian
self.innerU = np.zeros((self.B, self.dimen)) # inner random vectors with covariance matrix Id
self.outerV = np.zeros((self.B, self.dimen)) # outer random vectors with covariance matrix H^{-1}
self.xnew =
|
np.zeros((1, self.dimen))
|
numpy.zeros
|
#!/usr/bin/python3
'''
Abstract:
This is a program for ploting probability distribution of labels.
Usage:
plot_prob_dist.py [AI dir]
Editor and Practicer:
Jacob975
##################################
# Python3 #
# This code is made in python3 #
##################################
20180730
####################################
update log
20180730 version alpha 1:
1. The code works
20191016 version alpha 2:
1. Assign star as blue, YSO as red.
'''
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import tensorflow as tf
import numpy as np
import time
from sys import argv
import os
import itertools
from colour import Color
from sed_test_cnn import bias_variable, weight_variable
from convert_lib import ensemble_mjy_to_mag
import convert_lib
# Assign RGB color to represent stars, galaxies, and YSOs.
def assign_color(color_code):
bgr_color_code = np.transpose([ color_code[:,2],
color_code[:,1],
color_code[:,0]
])
sgys_color = [Color(rgb = tuple(bgr_color_code[i])).hex_l for i in range(len(bgr_color_code))]
sgys_color = np.asarray(sgys_color)
return sgys_color
def plot_prob(arti_mag, sgys_color, sort_order):
# Print the color for each IR3 slice
print ("IR3")
for i, IR3 in enumerate(IR3_arti_mag):
if i%10 != 0:
continue
if i%20 == 0:
plt.close()
fig = plt.figure(
figsize = (8,8)
)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(
xs = arti_mag[np.where(arti_mag[:,0] == IR3[0]), 0],
ys = arti_mag[np.where(arti_mag[:,0] == IR3[0]), 1],
zs = arti_mag[np.where(arti_mag[:,0] == IR3[0]), 2],
zdir='z',
s=20,
c = sgys_color[np.where(arti_mag[:,0] == IR3[0])],
depthshade=False)
plt.tick_params(
top='off',
bottom='off',
left='off',
right='off',
labelleft='off',
labelbottom='off',
)
ax.set_xlim(
|
np.amin(IR3_arti_mag[:,0])
|
numpy.amin
|
import os
import unittest
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.keras.backend as K
from tensorflow import keras
import EggNet
from EggNet.Reader import MnistDataDownloader, MnistDataReader, DataSetType
class MnistConvTestCase(unittest.TestCase):
def test_blur(self):
k = EggNet.make_gauss_kernel()
cl = EggNet.Conv2dLayer(in_channels=1, out_channels=1, kernel_size=5)
loader = MnistDataDownloader("../../test/MNIST/")
path_img, path_lbl = loader.get_path(DataSetType.TRAIN)
reader = MnistDataReader(path_img, path_lbl)
for lbl, img in reader.get_next(4):
img = img.astype(np.float) / 255.0
img = np.reshape(img, newshape=[-1, 28, 28, 1])
k = np.reshape(k, newshape=[k.shape[0], k.shape[1], 1, 1])
cl.kernel = k
img_out = cl(img)
# Check the dimensions
self.assertEqual(img_out.shape, (4, 28, 28, 1))
# Uncomment to see the image
img_out = np.reshape(img_out, newshape=[1, 4 * 28, 28, 1])
img_out = np.squeeze(img_out)
plt.imshow(img_out, cmap='gray', vmin=0.0, vmax=1.0)
plt.show()
break
def test_tensorflow_parameter_0(self):
r1 = EggNet.ReshapeLayer(newshape=[-1, 28, 28, 1])
cn1 = EggNet.Conv2dLayer(in_channels=1, out_channels=16, kernel_size=3, activation='relu') # [? 28 28 16]
checkpoint_path = "test/training_1/cp.ckpt"
checkpoint_dir = os.path.abspath(os.path.dirname(checkpoint_path))
os.path.join(checkpoint_dir, "model_config.json")
if not os.path.exists(checkpoint_dir):
raise RuntimeError("There is no trained model data!")
# Reload the model from the 2 files we saved
with open(os.path.join(checkpoint_dir, "model_config.json")) as json_file:
json_config = json_file.read()
model = keras.models.model_from_json(json_config)
model.load_weights(os.path.join(checkpoint_dir, "weights.h5"))
# Print a summary
Ws = model.get_weights()
# See Keras Documentation
# https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D
#
# Default ordering of weights for Conv: (batch, height, width, channels)
# Default ordering of weights for Dense
self.assertEqual(cn1.kernel.shape, Ws[0].shape)
self.assertEqual(cn1.b.shape, Ws[1].shape)
# Assign values
cn1.kernel = Ws[0]
cn1.b = Ws[1]
layers = [r1, cn1, ]
interesting_layers = [1] # don't care about reshape layers
n = EggNet.Network(layers)
loader = MnistDataDownloader("../../test/MNIST/")
path_img, path_lbl = loader.get_path(DataSetType.TRAIN)
reader = MnistDataReader(path_img, path_lbl)
for lbls, imgs in reader.get_next(10):
imgs = imgs.astype(np.float) / 255.0
imgs_r = np.reshape(imgs, newshape=[-1, 28, 28, 1])
# Check the tensorflow model
y_keras = model.predict(imgs)
# Keras Model Debug
inp = model.input # input placeholder
outputs = [layer.output for layer in model.layers] # all layer outputs
outputs = [outputs[i] for i in (1, 2, 3, 4, 6, 8)] # remove dropout, reshape
functors = [K.function([inp], [out]) for out in outputs] # evaluation functions
layer_outs = [func([imgs, 1.]) for func in functors]
# print(layer_outs)
# Check the results of the own made NN
y, zs = n.forward_intermediate(imgs_r)
zs = [zs[i] for i in interesting_layers] # remove reshape layers
eps = 0.1
index = 0
for l_keras_out, l_out in zip(layer_outs, zs):
err = np.abs((l_keras_out - l_out).flatten())
# print(l_keras_out - l_out)
# err_image = 1.0 * (np.abs(l_keras_out - l_out) > eps)
# # err_image = np.reshape(err_image[], newshape=(1, -1, 28, 1))
# err_image = np.squeeze(err_image[0, :, :, 0])
# plt.imshow(err_image, vmin=0.0, vmax=1.0, cmap='gray')
# plt.show()
right_indices = indices(err < eps, lambda b: b)
false_indices = indices(err > eps, lambda b: b)
wrong_values = err[false_indices]
# print(wrong_values)
if not np.all(right_indices):
print("error in layer ", index)
index += 1
lbls_pred_keras = y_keras.argmax(axis=1)
lbls_pred = y.argmax(axis=1)
print("Original: ", lbls.reshape(-1))
print("Keras: ", lbls_pred_keras.reshape(-1))
print("Our Model: ", lbls_pred.reshape(-1))
break
# img_out = np.reshape(imgs, newshape=[1, -1, 28, 1])
# img_out = np.squeeze(img_out)
# plt.imshow(img_out, cmap='gray', vmin=0.0, vmax=1.0)
# plt.show()
def test_tensorflow_parameter(self):
r1 = EggNet.ReshapeLayer(newshape=[-1, 28, 28, 1])
cn1 = EggNet.Conv2dLayer(in_channels=1, out_channels=16, kernel_size=3, activation='relu') # [? 28 28 16]
mp1 = EggNet.MaxPool2dLayer(size=2) # [? 14 14 16]
cn2 = EggNet.Conv2dLayer(in_channels=16, out_channels=32, kernel_size=3, activation='relu') # [? 14 14 32]
mp2 = EggNet.MaxPool2dLayer(size=2) # [? 7 7 32]
r2 = EggNet.ReshapeLayer(newshape=[-1, 32 * 7 * 7])
fc1 = EggNet.FullyConnectedLayer(input_size=32 * 7 * 7, output_size=64, activation='relu')
fc2 = EggNet.FullyConnectedLayer(input_size=64, output_size=10, activation='softmax')
checkpoint_path = "test/training_1/cp.ckpt"
checkpoint_dir = os.path.abspath(os.path.dirname(checkpoint_path))
os.path.join(checkpoint_dir, "model_config.json")
if not os.path.exists(checkpoint_dir):
raise RuntimeError("There is no trained model data!")
# Reload the model from the 2 files we saved
with open(os.path.join(checkpoint_dir, "model_config.json")) as json_file:
json_config = json_file.read()
model = keras.models.model_from_json(json_config)
model.load_weights(os.path.join(checkpoint_dir, "weights.h5"))
# Print a summary
Ws = model.get_weights()
# K0 = Ws[0]
# plt.imshow(K0[:,:,1,1], vmin=0.0, vmax=1.0, cmap='gray')
# plt.show()
# See Keras Documentation
# https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D
#
# Default ordering of weights for Conv: (batch, height, width, channels)
# Default ordering of weights for Dense
self.assertEqual(cn1.kernel.shape, Ws[0].shape)
self.assertEqual(cn1.b.shape, Ws[1].shape)
self.assertEqual(cn2.kernel.shape, Ws[2].shape)
self.assertEqual(cn2.b.shape, Ws[3].shape)
self.assertEqual(fc1.W.shape, Ws[4].shape)
self.assertEqual(fc1.b.shape, Ws[5].shape)
self.assertEqual(fc2.W.shape, Ws[6].shape)
self.assertEqual(fc2.b.shape, Ws[7].shape)
# Assign values
cn1.kernel = Ws[0]
cn1.b = Ws[1]
cn2.kernel = Ws[2]
cn2.b = Ws[3]
fc1.W = Ws[4]
fc1.b = Ws[5]
fc2.W = Ws[6]
fc2.b = Ws[7]
layers = [r1, cn1, mp1, cn2, mp2, r2, fc1, fc2]
interesting_layers = [1, 2, 3, 4, 6, 7] # don't care about reshape layers
net = EggNet.Network(layers)
loader = MnistDataDownloader("../../test/MNIST/")
path_img, path_lbl = loader.get_path(DataSetType.TRAIN)
reader = MnistDataReader(path_img, path_lbl)
for lbls, imgs in reader.get_next(20):
imgs = imgs.astype(np.float) / 255.0
imgs_r =
|
np.reshape(imgs, newshape=[-1, 28, 28, 1])
|
numpy.reshape
|
import logging
import numpy as np
import os
import tempfile
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict
from functools import lru_cache
import torch
from fvcore.common.file_io import PathManager
from detectron2.data import MetadataCatalog
from detectron2.utils import comm
from detectron2.evaluation.evaluator import DatasetEvaluator
ids_to_names = {}
ids_to_names[0] = 'hand'
ids_to_names[1] = 'no_contact'
ids_to_names[2] = 'self_contact'
ids_to_names[3] = 'other_person_contact'
ids_to_names[4] = 'object_contact'
class PascalVOCContactHandsEvaluator(DatasetEvaluator):
"""
Evaluate Pascal VOC AP.
It contains a synchronization, therefore has to be called from all ranks.
Note that this is a rewrite of the official Matlab API.
The results should be similar, but not identical to the one produced by
the official API.
"""
def __init__(self, dataset_name):
"""
Args:
dataset_name (str): name of the dataset, e.g., "voc_2007_test"
"""
self._dataset_name = dataset_name
meta = MetadataCatalog.get(dataset_name)
self._anno_file_template = os.path.join(meta.dirname, "Annotations", "{}.xml")
self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt")
self._class_names = meta.thing_classes
assert meta.year in [2007, 2012], meta.year
self._is_2007 = meta.year == 2007
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
def reset(self):
self._predictions = defaultdict(list) # class name -> list of prediction strings
def process(self, inputs, outputs):
for input, output in zip(inputs, outputs):
image_id = input["image_id"]
instances = output["instances"].to(self._cpu_device)
boxes = instances.pred_boxes.tensor.numpy()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
cats = instances.pred_cats.numpy()
for box, score, _cls, cat in zip(boxes, scores, classes, cats):
xmin, ymin, xmax, ymax = box
nc, sc, pc, oc = cat
#print(nc, sc, pc, oc)
# The inverse of data loading logic in `datasets/pascal_voc.py`
xmin += 1
ymin += 1
self._predictions[_cls].append(
f"{image_id} {score:.10f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f} {nc:.10f} {sc:.10f} {pc:.10f} {oc:.10f}"
)
def evaluate(self):
"""
Returns:
dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75".
"""
all_predictions = comm.gather(self._predictions, dst=0)
if not comm.is_main_process():
return
predictions = defaultdict(list)
for predictions_per_rank in all_predictions:
for clsid, lines in predictions_per_rank.items():
predictions[clsid].extend(lines)
del all_predictions
self._logger.info(
"Evaluating {} using {} metric. "
"Note that results do not use the official Matlab API.".format(
self._dataset_name, 2007 if self._is_2007 else 2012
)
)
with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname:
res_file_template = os.path.join(dirname, "{}.txt")
aps = defaultdict(list) # iou -> ap per class
for cls_id, cls_name in enumerate(self._class_names):
lines = predictions.get(cls_id, [""])
with open(res_file_template.format(cls_name), "w") as f:
f.write("\n".join(lines))
thresh = 50
APs = voc_eval(
res_file_template,
self._anno_file_template,
self._image_set_path,
cls_name,
ovthresh=thresh / 100.0,
use_07_metric=self._is_2007,
)
ret = OrderedDict()
ret["APs"] = {"hand": APs["hand"]["ap"],
"No Contact": APs["no_contact"]["ap"],
"Self Contact": APs["self_contact"]["ap"],
"Other Person Contact": APs["other_person_contact"]["ap"],
"Object Contact": APs["object_contact"]["ap"],
"Mean Contact AP": APs["mAP_contact"]
}
return ret
##############################################################################
#
# Below code is modified from
# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""Python implementation of the PASCAL VOC devkit's AP evaluation code."""
@lru_cache(maxsize=None)
def parse_rec(filename):
"""Parse a PASCAL VOC xml file."""
with PathManager.open(filename) as f:
tree = ET.parse(f)
objects = []
for obj in tree.findall("object"):
obj_struct = {}
obj_struct["name"] = obj.find("name").text
obj_struct["pose"] = obj.find("pose").text
obj_struct["truncated"] = int(obj.find("truncated").text)
obj_struct["difficult"] = int(obj.find("difficult").text)
bbox = obj.find("bndbox")
obj_struct["bbox"] = [
int(bbox.find("xmin").text),
int(bbox.find("ymin").text),
int(bbox.find("xmax").text),
int(bbox.find("ymax").text),
]
contact_state = obj.find("contact_state").text
contact_state = contact_state.split(',')[0:4]
cats = [float(c) for c in contact_state]
obj_struct["contact_state"] = cats
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False):
"""
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
APs = {}
mAP_contact = 0
for cat_idx in range(5):
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# first load gt
# read list of images
with PathManager.open(imagesetfile, "r") as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
# load annots
recs = {}
for imagename in imagenames:
recs[imagename] = parse_rec(annopath.format(imagename))
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj["name"] == classname]
bbox = np.array([x["bbox"] for x in R])
difficult = np.array([x["difficult"] for x in R]).astype(np.bool)
gt_cats = np.array([x["contact_state"] for x in R])
if cat_idx > 0:
# Process gt boxes to remove ones marked with unsure contact states
bbox_orig = np.array([x["bbox"] for x in R])
keepmask = gt_cats[:, cat_idx-1] < 2
bbox = bbox_orig[keepmask]
gt_cats = gt_cats[keepmask]
difficult = difficult[keepmask]
unsure_bbox = bbox_orig[~keepmask]
# Select gt boxes with contact state 'cat_idx-1'.
bbox = bbox[gt_cats[:, cat_idx-1] == 1, :]
difficult = difficult[gt_cats[:, cat_idx-1] == 1]
det = [False] * bbox.shape[0]
npos = npos + bbox.shape[0]
if cat_idx > 0:
class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det, "unsure_bbox": unsure_bbox}
else:
class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}
# read dets
detfile = detpath.format(classname)
with open(detfile, "r") as f:
lines = f.readlines()
splitlines = [x.strip().split(" ") for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:6]] for x in splitlines]).reshape(-1, 4)
det_cats = np.array([[float(z) for z in x[6:]] for x in splitlines]).reshape(-1, 4)
if cat_idx > 0:
# Multiply contact score with detection score for joint detection and contact
confidence = confidence * det_cats[:, cat_idx-1]
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# Process detections which overlaps with unsure boxes
if cat_idx > 0:
nd = len(image_ids)
indicator = []
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
unsure_bbox = R["unsure_bbox"].astype(float)
if unsure_bbox.shape[0] > 0:
# compute overlaps
# intersection
ixmin = np.maximum(unsure_bbox[:, 0], bb[0])
iymin = np.maximum(unsure_bbox[:, 1], bb[1])
ixmax = np.minimum(unsure_bbox[:, 2], bb[2])
iymax = np.minimum(unsure_bbox[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
# union
uni = (
(bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
+ (unsure_bbox[:, 2] - unsure_bbox[:, 0] + 1.0) * (unsure_bbox[:, 3] - unsure_bbox[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
num_unsure_bbox = len(overlaps)
keepmask_det = np.sum(overlaps==0.0) == num_unsure_bbox
indicator.append(keepmask_det)
else:
indicator.append(True)
BB = BB[indicator, :]
image_ids = [image_ids[i] for i in range(len(image_ids)) if indicator[i]]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R["bbox"].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax =
|
np.minimum(BBGT[:, 2], bb[2])
|
numpy.minimum
|
import unittest
import numpy as np
from desc.grid import LinearGrid
from desc.basis import polyder_vec, polyval_vec, powers, jacobi, fourier
from desc.basis import PowerSeries, DoubleFourierSeries, FourierZernikeBasis
class TestBasis(unittest.TestCase):
"""Tests Basis classes"""
def test_polyder(self):
"""Tests polyder_vec function
"""
p0 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1]])
p1 = polyder_vec(p0, 1)
p2 = polyder_vec(p0, 2)
correct_p1 = np.array([[0, 2, 0], [0, 0, 1], [0, 0, 0], [0, 2, 1]])
correct_p2 = np.array([[0, 0, 2], [0, 0, 0], [0, 0, 0], [0, 0, 2]])
np.testing.assert_allclose(p1, correct_p1, atol=1e-8)
np.testing.assert_allclose(p2, correct_p2, atol=1e-8)
def test_polyval(self):
"""Tests polyval_vec function
"""
p = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1]])
x = np.linspace(0, 1, 11)
correct_vals = np.array([x**2, x,
|
np.ones_like(x)
|
numpy.ones_like
|
import argparse
import decoding_data
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas
import plots
parser = argparse.ArgumentParser()
parser.add_argument("decoder_group_dir", type=str, help="path to decoder group directory")
parser.add_argument('--include-conditional', dest='include_conditional', action='store_true')
parser.set_defaults(include_conditional=False)
parser.add_argument('--include-output', dest='include_output', action='store_true')
parser.set_defaults(include_output=False)
args = parser.parse_args()
decoder_group_dir = args.decoder_group_dir
include_conditional = args.include_conditional
include_output = args.include_output
out_dir = decoder_group_dir
regression_stats_path = decoding_data.get_group_regression_stats_path(decoder_group_dir)
regression_data_path = decoding_data.get_group_regression_data_path(decoder_group_dir)
df = pandas.read_csv(regression_stats_path)
df_individual = pandas.read_csv(regression_data_path)
network_ids = df_individual["network_id"].unique()
n_networks = len(network_ids)
key_label_dict = {
"hidden_state": "state (t)",
"hidden_gates_tplus1": "gates (t+1)",
"hidden_all": "state (t)\nand\ngates (t+1)",
"output": "output (t)",
"io_confidence": "optimal confidence (t)",
"io_evidence": "optimal log-odds (t)",
"io_lr_tplus1": "optimal learning rate (t+1)",
"io_confidence_0": r"$(1 | 0)$",
"io_confidence_1": r"$(1 | 1)$",
"io_confidence_xt": r"$(1 | x_{t})$",
"io_confidence_1minusxt": r"$(1 | \overline{x_{t}})$",
"io_evidence_0": r"$(1 | 0)$",
"io_evidence_1": r"$(1 | 1)$",
"io_evidence_xt": r"$(1 | x_{t})$",
"io_evidence_1minusxt": r"$(1 | \overline{x_{t}})$",
"io_lr_tplus1_0": r"$(1 | 0)$",
"io_lr_tplus1_1": r"$(1 | 1)$",
"io_lr_tplus1_xt": r"$(1 | x_{t})$",
"io_lr_tplus1_1minusxt": r"$(1 | \overline{x_{t}})$",
}
plots.configure_plot_style()
color = "#6bbceb"
predictor_keys = decoding_data.get_confidence_predictor_keys(decoder_group_dir)
if include_output:
predictor_keys.insert(0, "output")
n_groups = len(predictor_keys)
is_markov = "io_confidence_1" in df["outcome"].unique()
if is_markov:
# Markov case
outcome_formats = ["{:}_0", "{:}_1"]
if include_conditional:
outcome_formats += ["{:}_xt", "{:}_1minusxt"]
outcome_kinds = ["io_confidence", "io_evidence", "io_lr_tplus1"]
outcome_keys_by_kind = [ [ s.format(kind) for s in outcome_formats] for kind in outcome_kinds ]
for i_kind, kind in enumerate(outcome_kinds):
outcome_keys = outcome_keys_by_kind[i_kind]
n_outcomes = len(outcome_keys)
figsize = (6.4, 4.8 * n_groups * n_outcomes / 3.)
ypos = np.arange(n_outcomes)
# fig = plt.figure(figsize=figsize)
fig, axes = plt.subplots(nrows=n_groups, sharex=True,
# figsize=figsize
)
plt.subplots_adjust(hspace=0.05)
plt.xlabel(r"median variance explained ($r^2$)")
plt.xlim(0, 100.2)
plt.xticks(range(0, 101, 10))
axes[-1].xaxis.set_major_formatter(plots.get_formatter_percent())
plt.suptitle(key_label_dict[kind].capitalize())
for i_predictor, predictor in enumerate(predictor_keys):
r2_medians = np.empty((n_outcomes))
r2_errors = np.empty((2, n_outcomes))
r2_values =
|
np.empty((n_outcomes, n_networks))
|
numpy.empty
|
# A system that uses a genetic algorithm to generate a target sentence
import random
import matplotlib.pyplot as plt
import numpy as np
import string
import seaborn as sns
sns.set()
def fitness_function(individual, target_sentence='Hello World'):
"""
computes the score of the individual based on its performance
approaching the target sentence.
"""
assert len(target_sentence) == len(individual)
score = np.sum([
individual[i] == target_sentence[i]
for i in range(len(target_sentence))
])
return score
# Discrete
class GeneticAlgorithm:
def __init__(self,
fitness_function,
num_attributes=2,
population_size=100,
crossover_prob=.75,
mutation_prob=.05):
self.fitness_function = fitness_function
self.num_attributes = num_attributes
self.population_size = population_size
self.crossover_prob = crossover_prob
self.mutation_prob = mutation_prob
self.population = None
self.population_avg_score = 0
self.fitness_scores = None
self.fittest_individuals = None
def initialize_population(self):
"""
init a population of individuals
args:
num_attributes: length of each individual (attributes)
population_size: number of individuals
returns:
population_size lists of n length each.
"""
attributes = []
for attribute in range(self.num_attributes):
attributes.append(
np.random.choice(
list(string.punctuation + string.ascii_letters +
string.whitespace),
size=self.population_size))
self.population = np.array(attributes).T
def compute_fitness_score(self):
"""
computing the fitness score of the population.
args:
individual: numpy array representing the chromosomes of the parent.
returns:
population_size lists of n length each.
"""
scores = np.array([
self.fitness_function(individual) for individual in self.population
])
self.fitness_scores = scores
def roulette_wheel_selection(self):
"""
Select the fittest individuals based on their fitness scores.
each individual is associated with its index in the input array.
---
Args:
fitness_scores: numpy array of fitness score of each individual
Returns:
parents: np array of two individuals chosen from the population.
"""
sum_scores = np.sum(np.abs(self.fitness_scores))
selection_prob = np.abs(self.fitness_scores) / sum_scores
parents = random.choices(self.population, weights=selection_prob, k=2)
return parents
def run(self):
def cross_over(parents):
"""
produces a new individual by combining the genetic information of both parents.
args:
individual_1: numpy array representing the chromosomes of the first parent.
individual_2: numpy array representing the chromosomes of the second parent.
returns:
child: newly created individual by cross over of the two parents.
"""
if np.random.uniform() <= self.crossover_prob:
parent_1, parent_2 = parents
crossover_point = np.random.choice(
range(1, self.num_attributes))
child = np.concatenate(
(parent_1[:crossover_point], parent_2[crossover_point:]))
return child
else:
return random.choices(parents)[0]
def mutate(individual):
"""
produces a new individual by mutating the original one.
args:
individual: numpy array representing the chromosomes of the parent.
returns:
new: newly mutated individual.
"""
new_individual = []
for attribute in individual:
if np.random.uniform() <= self.mutation_prob:
new_individual.append(random.choice(string.ascii_letters))
else:
new_individual.append(attribute)
return new_individual
new_population = []
# reproduce the new population
for _ in range(self.population_size):
parents = self.roulette_wheel_selection()
child = cross_over(parents)
child = mutate(child)
new_population.append(child)
self.population =
|
np.array(new_population)
|
numpy.array
|
"""Test suite for linear trust-region subsolvers."""
import math
from collections import namedtuple
import numpy as np
import pytest
from estimagic.optimization.linear_subsolvers import improve_geomtery_trsbox_linear
from estimagic.optimization.linear_subsolvers import minimize_trsbox_linear
from numpy.testing import assert_array_almost_equal as aaae
@pytest.mark.parametrize(
"model_gradient, lower_bounds, upper_bounds, delta, expected",
[
(
np.array([1.0, 0.0, 1.0]),
-np.ones(3),
np.ones(3),
2.0,
np.array([-1.0, 0.0, -1.0]),
),
(
np.array([0.00028774, 0.00763968, 0.01217268]),
-np.ones(3),
np.ones(3),
9.5367431640625e-05,
np.array([-1.90902854e-06, -5.06859218e-05, -8.07603861e-05]),
),
(
np.array([0.00028774, 0.00763968, 0.01217268]),
np.array([0, -1, -1]),
np.ones(3),
0.1,
np.array([0.0, -5.31586927e-02, -8.47003742e-02]),
),
(
np.arange(5) * 0.1,
-np.ones(5),
np.ones(5),
0.1,
np.array([0.0, -0.01825742, -0.03651484, -0.05477226, -0.07302967]),
),
(
np.arange(4, -1, -1) * 0.1,
-np.ones(5),
np.ones(5),
0.1,
np.array([-0.07302967, -0.05477226, -0.03651484, -0.01825742, 0]),
),
(
np.arange(5) * 0.1,
np.array([-1, -1, 0, -1, -1]),
np.array([1, 1, 0.2, 0.2, 1]),
0.1,
np.array([0.0, -1.96116135e-02, 0.0, -5.88348405e-02, -7.84464541e-02]),
),
(
|
np.arange(4, -1, -1)
|
numpy.arange
|
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
from active_learning_cfd.cfd_regressor import load_regression_history_repetitions
from active_learning_cfd.error_measures import calculate_error, mean_relative_error
from matplotlib import rc
from cycler import cycler
rc("text", usetex=True)
plt.style.use("tableau-colorblind10")
colors = plt.rcParams["axes.prop_cycle"]
linestyle_cycler = colors + cycler(
"linestyle", ["--", "-", ":", "-.", "-", "-", "-", "-", "-", "-"]
)
rc("axes", prop_cycle=linestyle_cycler)
case_name = "orifice"
reference_filename = "reference_solution.csv"
figsize = [6.4, 2.5]
reference_solution = np.genfromtxt(reference_filename, delimiter=",")
X_true = reference_solution[:, 0:-1]
y_true = reference_solution[:, -1]
strategy_list = (
["gp_52_greedyio", "GP (Matern 5/2)"],
["gp_rbf_greedyio", "GP (RBF)"],
["gp_cubic_greedyio", "GP (Cubic)"],
["lin_greedyio", "Linear"],
["rfr_greedyio", "Random forest"],
["svr_greedyio", "Support vector"],
["nn_greedyio", "Multilayer perceptron"],
)
plt.figure(figsize=figsize)
plt.xlabel("Samples")
plt.ylabel("$\epsilon$ [\%]")
plt.ylim([0, 80])
for name, label in strategy_list:
regression_history_all = load_regression_history_repetitions(
"regression_" + name, case_name
)
n_samples_list = regression_history_all[0][0].keys()
error_samples = sorted(n_samples_list)
error_avgs = np.zeros(len(n_samples_list))
error_min = np.zeros(len(n_samples_list))
error_max = np.zeros(len(n_samples_list))
error_repetitions = np.zeros(len(n_samples_list))
for i, n_samples in enumerate(error_samples):
error_list = []
for regression_history, features_range in regression_history_all:
error = calculate_error(
X_true,
y_true,
regression_history[n_samples],
features_range,
error_measure=mean_relative_error,
)
error_list.append(error)
error_avgs[i] = np.average(error_list)
error_min[i], error_max[i] = sp.stats.t.interval(
0.95,
len(error_list) - 1,
loc=np.mean(error_list),
scale=sp.stats.sem(error_list),
)
error_repetitions[i] = len(error_list)
plt.plot(error_samples, error_avgs * 100, label="\\small " + label)
plt.fill_between(error_samples, error_min * 100, error_max * 100, alpha=0.5)
plt.xlim([0, 60])
plt.text(
0.9,
0.9,
"Case 2",
horizontalalignment="left",
verticalalignment="top",
transform=plt.gca().transAxes,
)
plt.tight_layout()
plt.savefig(case_name + "_regressions.png", dpi=400)
sampling_list = (
["gp_52_std", "Variational"],
["gp_52_greedyi", "Greedy I"],
["gp_52_greedyo", "Greedy O"],
["gp_52_greedyio", "Greedy I/O"],
["gp_52_rdm", "Random"],
)
plt.figure(figsize=figsize)
plt.xlabel("Samples")
plt.ylabel("$\epsilon$ [\%]")
for name, label in sampling_list:
regression_history_all = load_regression_history_repetitions(
"regression_" + name, case_name
)
n_samples_list = regression_history_all[0][0].keys()
error_samples = sorted(n_samples_list)
error_avgs = np.zeros(len(n_samples_list))
error_min = np.zeros(len(n_samples_list))
error_max = np.zeros(len(n_samples_list))
error_repetitions = np.zeros(len(n_samples_list))
for i, n_samples in enumerate(error_samples):
error_list = []
for regression_history, features_range in regression_history_all:
error = calculate_error(
X_true,
y_true,
regression_history[n_samples],
features_range,
error_measure=mean_relative_error,
)
error_list.append(error)
error_avgs[i] =
|
np.average(error_list)
|
numpy.average
|
"""
Copyright 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy
import numpy as np
from itertools import combinations_with_replacement
from sigpy.signomials import Signomial, relative_coeff_vector
__NUMERIC_TYPES__ = (int, float, np.int_, np.float_)
def sage_dual(s, level=0, additional_cons=None):
"""
:param s: a Signomial object.
:param level: a nonnegative integer
:param additional_cons: a list of CVXPY Constraint objects over the variables in s.c
(unless you are working with SAGE polynomials, there likely won't be any of these).
:return: a CVXPY Problem object representing the dual formulation for s_{SAGE}^{(level)}
In the discussion that follows, let s satisfy s.alpha[0,:] == np.zeros((1,n)).
When level == 0, the returned CVXPY problem has the following explicit form:
min (s.c).T * v
s.t. v[0] == 1
v[i] * ln(v[i] / v[j]) <= (s.alpha[i,:] - s.alpha[j,:]) * mu[i] for i \in N0, j \in Nc0, j != i.
mu[i] \in R^{s.n} for i \in N0
v \in R^{s.m}_{+}
where N = { i : s.c[i] < 0}, N0 = union(N, {0}), Nc = { i : s.c[i] >= 0}, and Nc0 = union(Nc, {0}).
When level > 0, the form of the optimization problem is harder to state explicitly. At a high level, the resultant
CVXPY problem is the same as above, with the following modifications:
(1) we introduce a multiplier signomial
t_mul = Signomial(s.alpha, np.ones(s.m)) ** level,
(2) as well as a constant signomial
t_cst = Signomial(s.alpha, [1, 0, ..., 0]).
(3) Then "s" is replaced by
s_mod == s * t_mul,
(4) and "v[0] == 1" is replaced by
a * v == 1,
where vector "a" is an appropriate permutation of (t_mul * t_cst).c, and finally
(5) the index sets N0 and Nc0 are replaced by
N_I = union(N, I) and Nc_I = union(Nc, I)
for
I = { i | a[i] != 0 }.
"""
# Signomial definitions (for the objective).
s_mod = Signomial(s.alpha_c)
t_mul = Signomial(s.alpha, np.ones(s.m)) ** level
lagrangian = (s_mod - cvxpy.Variable(name='gamma')) * t_mul
s_mod = s_mod * t_mul
# C_SAGE^STAR (v must belong to the set defined by these constraints).
v = cvxpy.Variable(shape=(lagrangian.m, 1), name='v')
constraints = relative_c_sage_star(lagrangian, v)
# Equality constraint (for the Lagrangian to be bounded).
a = relative_coeff_vector(t_mul, lagrangian.alpha)
a = a.reshape(a.size, 1)
constraints.append(a.T * v == 1)
# Objective definition and problem creation.
obj_vec = relative_coeff_vector(s_mod, lagrangian.alpha)
obj = cvxpy.Minimize(obj_vec * v)
if additional_cons is not None:
constraints += additional_cons
prob = cvxpy.Problem(obj, constraints)
# Add fields that we can access later.
prob.s_mod = s_mod
prob.s = s
prob.level = level
return prob
def relative_c_sage_star(s, v):
"""
Given the Signomial s and a CVXPY variable "v", return a list of CVXPY Constraint objects such
that v is a conic dual variable to the constraint "s.c \in C_{SAGE}(s.alpha)".
:param s: a Signomial object
:param v: a CVXPY Variable with v.size == s.m.
:return a list of CVXPY Constraint objects.
Remark 1: The CVXPY function kl_div operates in a way that differs from the relative entropy function as described
in the literature on SAGE relaxations. Refer to the CVXPY documentation if our usage seems odd.
Remark 2: This implementation is vectorized to minimize the length of the list "constraints". By doing this we
significantly speed up the process of CVXPY converting our problem to its internal standard form.
"""
alpha, c = s.alpha_c_arrays()
if s.m <= 2:
return [v >= 0]
non_constants = [i for i, c_i in enumerate(c) if not isinstance(c_i, __NUMERIC_TYPES__)]
N_I = [i for i, c_i in enumerate(c) if (i in non_constants) or c_i < 0]
Nc_I = [i for i, c_i in enumerate(c) if (i in non_constants) or c_i > 0]
# variable definitions
mu = cvxpy.Variable(shape=(len(N_I), s.n), name=('mu_' + str(v.id)))
# constraints
constraints = []
for i, ii in enumerate(N_I):
# i = the index used for "mu", ii = index used for alpha and v
j_neq_ii = [j for j in Nc_I if j != ii]
expr1 = v[ii] * np.ones((len(j_neq_ii), 1))
expr2 = cvxpy.kl_div(expr1, v[j_neq_ii]) + expr1 - v[j_neq_ii]
expr3 = (alpha[ii, :] - alpha[j_neq_ii, :]) * mu[i, :].T
constraints.append(expr2 <= cvxpy.reshape(expr3, (len(j_neq_ii), 1)))
constraints.append(v[list(set(N_I + Nc_I))] >= 0)
return constraints
def sage_primal(s, level=0, special_multiplier=None, additional_cons=None):
"""
:param s: a Signomial object.
:param level: a nonnegative integer
:param special_multiplier: an optional parameter, applicable when level > 0. Must be a nonzero
SAGE function.
:param additional_cons: a list of CVXPY Constraint objects over the variables in s.c
(unless you are working with SAGE polynomials, there likely won't be any of these).
:return: a CVXPY Problem object representing the primal formulation for s_{SAGE}^{(level)}
Unlike the sage_dual, this formulation can be stated in full generality without too much trouble.
We define a multiplier signomial "t" as either the standard multiplier (Signomial(s.alpha, np.ones(s.n))),
or a user-provided multiplier. We then return a CVXPY Problem representing
max gamma
s.t. s_mod.c \in C_{SAGE}(s_mod.alpha)
where s_mod := (t ** level) * (s - gamma).
Our implementation of Signomial objects allows CVXPY variables in the coefficient vector c. As a result, the
mapping "gamma \to s_mod.c" is an affine function that takes in a CVXPY Variable and returns a CVXPY Expression.
This makes it very simple to represent "s_mod.c \in C_{SAGE}(s_mod.alpha)" via CVXPY Constraints. The work defining
the necessary CVXPY variables and constructing the CVXPY constraints is handled by the function "c_sage."
"""
if special_multiplier is None:
t = Signomial(s.alpha, np.ones(s.m))
else:
# noinspection PyTypeChecker
if np.all(special_multiplier.c == 0):
raise RuntimeError('The multiplier must be a nonzero signomial.')
# test if SAGE
prob = sage_feasibility(special_multiplier)
if prob.solve() < 0:
raise RuntimeError('The multiplier must be a SAGE function.')
t = special_multiplier
gamma = cvxpy.Variable(name='gamma')
s_mod = (s - gamma) * (t ** level)
s_mod.remove_terms_with_zero_as_coefficient()
constraints = relative_c_sage(s_mod)
obj = cvxpy.Maximize(gamma)
if additional_cons is not None:
constraints += additional_cons
prob = cvxpy.Problem(obj, constraints)
# Add fields that we can access later.
prob.s_mod = s_mod
prob.s = s
prob.level = level
return prob
def relative_c_sage(s):
"""
Given a signomial "s", return a list of CVXPY Constraint objects over CVXPY Variables c_vars and nu_vars
such that s is SAGE iff c_vars and nu_vars satisfy every constraint in this list.
:param s: a Signomial object (likely with the property that s.c is a CVXPY Expression).
:return: constraints - a list of CVXPY Constraint objects.
"""
if s.m <= 2:
return [cvxpy.vstack(s.c.tolist()) >= 0]
alpha, c = s.alpha_c_arrays()
non_constants = [i for i, c_i in enumerate(c) if not isinstance(c_i, __NUMERIC_TYPES__)]
N_I = [i for i, c_i in enumerate(c) if (i in non_constants) or (c_i < 0)]
c_vars = dict()
nu_vars = dict()
constraints = []
for i in N_I:
c_i, nu_i, constrs_i = relative_c_age(s, i)
c_vars[i] = c_i
nu_vars[i] = nu_i
constraints += constrs_i
# Now the constraints that the c_vars sum to c.
vec_expr = sum(c_vars.values())
c = cvxpy.vstack(c.tolist())
constraints.append(vec_expr == c)
return constraints
def relative_c_age(s, i):
constraints = list()
idx_set = np.arange(s.m) != i
# variable definitions
c_var = cvxpy.Variable(shape=(s.m, 1), name='c^{(' + str(i) + '})_' + str(s))
nu_var = cvxpy.Variable(shape=(s.m-1, 1), name='nu^{(' + str(i) + '})_' + str(s), nonneg=True)
# variable non-negativity constraints
constraints.append(c_var[idx_set] >= 0)
# main constraints
constraints.append(
(s.alpha[idx_set, :] - s.alpha[i, :]).T * nu_var == np.zeros(shape=(s.n, 1))) # convex cover constraint
kl_expr1 = cvxpy.kl_div(nu_var, np.exp(1) * c_var[idx_set])
kl_expr2 = nu_var -
|
np.exp(1)
|
numpy.exp
|
"""RESQML grid module handling IJK cartesian grids."""
# note: only IJK Grid format supported at present
# see also rq_import.py
# Nexus is a registered trademark of the Halliburton Company
import logging
log = logging.getLogger(__name__)
import numpy as np
import resqpy.grid_surface as rqgs
import resqpy.olio.grid_functions as gf
import resqpy.olio.uuid as bu
import resqpy.olio.write_hdf5 as rwh5
import resqpy.olio.xml_et as rqet
import resqpy.crs as rqc
from resqpy.olio.base import BaseResqpy
from ._transmissibility import transmissibility, half_cell_transmissibility
from ._extract_functions import extract_grid_parent, extract_extent_kji, extract_grid_is_right_handed, \
extract_k_direction_is_down, extract_geometry_time_index, extract_crs_uuid, extract_k_gaps, \
extract_pillar_shape, extract_has_split_coordinate_lines, extract_children, extract_stratigraphy, \
extract_inactive_mask, extract_property_collection, set_k_direction_from_points
from ._write_hdf5_from_caches import _write_hdf5_from_caches
from ._write_nexus_corp import write_nexus_corp
from ._defined_geometry import pillar_geometry_is_defined, cell_geometry_is_defined, geometry_defined_for_all_cells, \
set_geometry_is_defined, geometry_defined_for_all_pillars, cell_geometry_is_defined_ref, \
pillar_geometry_is_defined_ref
from ._faults import find_faults, fault_throws, fault_throws_per_edge_per_column
from ._face_functions import clear_face_sets, make_face_sets_from_pillar_lists, make_face_set_from_dataframe, \
set_face_set_gcs_list_from_dict, is_split_column_face, split_column_faces, face_centre, face_centres_kji_01
from ._points_functions import point_areally, point, points_ref, point_raw, unsplit_points_ref, corner_points, \
invalidate_corner_points, interpolated_points, x_section_corner_points, split_x_section_points, \
unsplit_x_section_points, uncache_points, horizon_points, split_horizon_points, \
centre_point_list, interpolated_point, split_gap_x_section_points, \
centre_point, z_corner_point_depths, coordinate_line_end_points, set_cached_points_from_property, \
find_cell_for_point_xy, split_horizons_points
from ._create_grid_xml import _create_grid_xml
from ._pillars import create_column_pillar_mapping, pillar_foursome, pillar_distances_sqr, nearest_pillar, nearest_rod
from ._cell_properties import thickness, volume, pinched_out, cell_inactive, interface_length, interface_vector, \
interface_lengths_kji, interface_vectors_kji, poly_line_for_cell
from ._connection_sets import fault_connection_set, pinchout_connection_set, k_gap_connection_set
from ._xyz import xyz_box, xyz_box_centre, bounding_box, composite_bounding_box, z_inc_down, \
check_top_and_base_cell_edge_directions, local_to_global_crs, global_to_local_crs
from ._pixel_maps import pixel_maps, pixel_map_for_split_horizon_points
import warnings
class Grid(BaseResqpy):
"""Class for RESQML Grid (extent and geometry) within RESQML model object."""
resqml_type = 'IjkGridRepresentation'
@property
def nk_plus_k_gaps(self):
"""Returns the number of layers including any K gaps."""
if self.nk is None:
return None
if self.k_gaps is None:
return self.nk
return self.nk + self.k_gaps
def __init__(self,
parent_model,
uuid = None,
find_properties = True,
geometry_required = True,
title = None,
originator = None,
extra_metadata = {}):
"""Create a Grid object and optionally populate from xml tree.
arguments:
parent_model (model.Model object): the model which this grid is part of
uuid (uuid.UUID, optional): if present, the new grid object is populated from the RESQML object
find_properties (boolean, default True): if True and uuid is present, a
grid property collection is instantiated as an attribute, holding properties for which
this grid is the supporting representation
geometry_required (boolean, default True): if True and no geometry node exists in the xml,
an assertion error is raised; ignored if uuid is None
title (str, optional): citation title for new grid; ignored if loading from xml
originator (str, optional): name of person creating the grid; defaults to login id;
ignored if loading from xml
extra_metadata (dict, optional): dictionary of extra metadata items to add to the grid;
ignored if loading from xml
returns:
a newly created Grid object
notes:
only IJK grids are handled at the moment (the resqml standard also defines 5 other varieties)
:meta common:
"""
# note: currently only handles IJK grids
self.parent_grid_uuid = None #: parent grid when this is a local grid
self.parent_window = None #: FineCoarse cell index mapping info between self and parent grid
self.is_refinement = None #: True indicates self is a refinement wrt. parent; False means coarsening
self.local_grid_uuid_list = None #: LGR & LGC children list
self.grid_representation = None #: flavour of grid, currently 'IjkGrid' or 'IjkBlockGrid'; not much used
self.geometry_root = None #: xml node at root of geometry sub-tree
self.extent_kji = None #: size of grid: (nk, nj, ni)
self.ni = self.nj = self.nk = None #: duplicated extent information as individual integers
self.crs_uuid = None #: uuid of the coordinate reference system used by the grid's geometry
self.crs = None #: Crs object
self.points_cached = None #: numpy array of raw points data; loaded on demand
# Following are only relevant to structured grid varieties
self.grid_is_right_handed = None #: boolean indicating ijk handedness
self.k_direction_is_down = None #: boolean indicating dominant direction of k increase
self.pillar_shape = None #: string: often 'curved' is used, even for straight pillars
self.has_split_coordinate_lines = None #: boolean; affects dimensionality of points array
self.split_pillars_count = None #: int
self.k_gaps = None #: int; number of k gaps, or None
self.k_gap_after_array = None #: 1D numpy bool array of extent nk-1, or None
self.k_raw_index_array = None #: 1D numpy int array of extent nk, or None
self.geometry_defined_for_all_pillars_cached = None
self.geometry_defined_for_all_cells_cached = None
self.xyz_box_cached = None #: numpy array of shape (2, 3) being (min max, x y z)
self.property_collection = None #: GridPropertyCollection object
self.inactive = None #: numpy bool array: inactive cell mask (not native resqml - derived from active property)
self.all_inactive = None #: numpy bool indicating whether all cells are inactive
self.active_property_uuid = None #: uuid of property holding active cell boolean array (used to populate inactive)
self.pinchout = None #: numpy bool array: pinchout mask, only set on demand (not native resqml)
self.grid_skin = None #: outer skin of grid as a GridSkin object, computed and cached on demand
self.stratigraphic_column_rank_uuid = None #: optional reference for interpreting stratigraphic units
self.stratigraphic_units = None #: optional array of unit indices (one per layer or K gap)
self.time_index = None #: optional time index for dynamic geometry
self.time_series_uuid = None #: optional time series for dynamic geometry
super().__init__(model = parent_model,
uuid = uuid,
title = title,
originator = originator,
extra_metadata = extra_metadata)
if not self.title:
self.title = 'ROOT'
if uuid is not None:
if geometry_required:
assert self.geometry_root is not None, 'grid geometry not present in xml'
if find_properties:
self.extract_property_collection()
def _load_from_xml(self):
# Extract simple attributes from xml and set as attributes in this resqpy object
grid_root = self.root
assert grid_root is not None
self.grid_representation = 'IjkGrid' # this attribute not much used
self.extract_extent_kji()
self.nk = self.extent_kji[0] # for convenience available as individual attribs as well as np triplet
self.nj = self.extent_kji[1]
self.ni = self.extent_kji[2]
self.geometry_root = rqet.find_tag(grid_root, 'Geometry')
if self.geometry_root is None:
self.geometry_defined_for_all_pillars_cached = True
self.geometry_defined_for_all_cells_cached = True
self.pillar_shape = 'straight'
self.has_split_coordinate_lines = False
self.k_direction_is_down = True # arbitrary, as 'down' is rather meaningless without a crs
if self.extra_metadata is not None:
crs_uuid = self.extra_metadata.get('crs uuid')
if crs_uuid is not None:
self.set_crs(crs_uuid)
else:
self.extract_crs_uuid()
self.set_crs()
self.extract_has_split_coordinate_lines()
self.extract_grid_is_right_handed()
pillar_geometry_is_defined(self) # note: if there is no geometry at all, resqpy sets this True
cell_geometry_is_defined(self) # note: if there is no geometry at all, resqpy sets this True
self.extract_pillar_shape()
self.extract_k_direction_is_down()
self.extract_geometry_time_index()
self.extract_k_gaps()
if self.geometry_root is None:
assert not self.k_gaps, 'K gaps present in grid without geometry'
self.extract_parent()
self.extract_children()
# self.create_column_pillar_mapping() # mapping now created on demand in other methods
self.extract_inactive_mask()
self.extract_stratigraphy()
def set_modified(self, update_xml = False, update_hdf5 = False):
"""Assigns a new uuid to this grid; also calls set_modified() for parent model.
arguments:
update_xml (boolean, default False): if True, the uuid is modified in the xml tree
for the grid part
update_hdf5: (boolean, default False): if True, the uuid in the hdf5 internal path names
for the datasets (arrays) for the grid are updated
returns:
the new uuid for this grid object
notes:
a resqml object should be thought of as immutable; therefore when modifying an object,
it is preferable to assign it a new unique identifer which this method does for a grid;
the hdf5 internal path names held in xml are only updated if both update_xml and update_hdf5
are True;
if the grid object has been created using the Model.copy_part() method, it is not
necessary to call this function as a new uuid will already have been assigned;
NB: relationships are not updated by this function, including the relationship to the
hdf5 external part
"""
old_uuid = self.uuid
self.uuid = bu.new_uuid()
if old_uuid is not None:
log.info('changing uuid for grid from: ' + str(old_uuid) + ' to: ' + str(self.uuid))
else:
log.info('setting new uuid for grid: ' + str(self.uuid))
if update_xml:
rqet.patch_uuid_in_part_root(self.root, self.uuid)
self.model.add_part('obj_IjkGridRepresentation', self.uuid, self.root)
self.model.remove_part(rqet.part_name_for_object('obj_IjkGridRepresentation', old_uuid))
if update_hdf5:
hdf5_uuid_list = self.model.h5_uuid_list(self.root)
for ext_uuid in hdf5_uuid_list:
hdf5_file = self.model.h5_access(ext_uuid, mode = 'r+')
rwh5.change_uuid(hdf5_file, old_uuid, self.uuid)
if update_xml and update_hdf5:
self.model.change_uuid_in_hdf5_references(self.root, old_uuid, self.uuid)
self.model.set_modified()
return self.uuid
def cell_count(self, active_only = False, non_pinched_out_only = False, geometry_defined_only = False):
"""Returns number of cells in grid; optionally limited by active, non-pinched-out, or having geometry.
arguments:
active_only (boolean, default False): if True, the count of active cells is returned
non_pinched_out_only (boolean, default False): if True, the count of cells with vertical
thickness greater than 0.001 (units are crs vertical units) is returned
geometry_defined_only (boolean, default False): if True, the count of cells which have a
defined geometry is returned (a zero thickness cell may still have a defined geometry)
returns:
integer being the number of cells in the grid
"""
# todo: elsewhere: setting of active array from boolean array or zero pore volume
if not (active_only or non_pinched_out_only or geometry_defined_only):
return np.prod(self.extent_kji)
if non_pinched_out_only:
self.pinched_out(cache_pinchout_array = True)
return self.pinchout.size - np.count_nonzero(self.pinchout)
if active_only:
if self.all_inactive:
return 0
if self.inactive is not None:
return self.inactive.size - np.count_nonzero(self.inactive)
else:
geometry_defined_only = True
if geometry_defined_only:
if geometry_defined_for_all_cells(self, cache_array = True):
return np.prod(self.extent_kji)
return np.count_nonzero(self.array_cell_geometry_is_defined)
return None
def natural_cell_index(self, cell_kji0):
"""Returns a single integer for the cell, being the index into a flattened array."""
return (cell_kji0[0] * self.nj + cell_kji0[1]) * self.ni + cell_kji0[2]
def natural_cell_indices(self, cell_kji0s):
"""Returns a numpy integer array with a value for each of the cells, being the index into a flattened array.
argument:
cell_kji0s: numpy integer array of shape (..., 3) being a list of cell indices in kji0 protocol
returns:
numpy integer array of shape (...,) being the equivalent natural cell indices (for a flattened array of cells)
"""
return (cell_kji0s[..., 0] * self.nj + cell_kji0s[..., 1]) * self.ni + cell_kji0s[..., 2]
def denaturalized_cell_index(self, c0):
"""Returns a 3 element cell_kji0 index (as a tuple) for the cell with given natural index."""
k0, ji0 = divmod(c0, self.nj * self.ni)
j0, i0 = divmod(ji0, self.ni)
return (k0, j0, i0)
def denaturalized_cell_indices(self, c0s):
"""Returns an integer array holding kji0 indices for the cells with given natural indices.
argument:
c0s: numpy integer array of shape (..., 3) being natural cell indices (for a flattened array)
returns:
numpy integer array of shape (..., 3) being the equivalent kji0 protocol cell indices
"""
k0s, ji0s = divmod(c0s, self.nj * self.ni)
j0s, i0s = divmod(ji0s, self.ni)
return np.stack((k0s, j0s, i0s), axis = -1)
def resolve_geometry_child(self, tag, child_node = None):
"""If xml child node is None, looks for tag amongst children of geometry root.
arguments:
tag (string): the tag of the geometry child node of interest
child_node (optional): the already resolved xml root of the child, or None
returns:
xml node of child of geometry node for this grid, which matches tag
note:
if child_node argument is not None, it is simply returned;
if child_node is None, the geometry node for this grid is scanned for a child with matching tag
"""
if child_node is not None:
return child_node
return rqet.find_tag(self.geometry_root, tag)
def _set_k_raw_index_array(self):
"""Sets the layering raw index array based on the k gap after boolean array."""
if self.k_gap_after_array is None:
self.k_raw_index_array = None
return
self.k_raw_index_array = np.empty((self.nk,), dtype = int)
gap_count = 0
for k in range(self.nk):
self.k_raw_index_array[k] = k + gap_count
if k < self.nk - 1 and self.k_gap_after_array[k]:
gap_count += 1
assert gap_count == self.k_gaps, 'inconsistency in k gap data'
def set_parent(self, parent_grid_uuid, self_is_refinement, parent_window):
"""Set relationship with respect to a parent grid.
arguments:
parent_grid_uuid (uuid.UUID): the uuid of the parent grid
self_is_refinement (boolean): if True, this grid is a refinement of the subset of the parent grid;
if False, this grid is a coarsening
parent_window: (olio.fine_coarse.FineCoarse object): slice mapping information in K, j & I axes; note
that self_is_refinement determines which of the 2 grids is fine and which is coarse
"""
if self.parent_grid_uuid is not None:
log.warning('overwriting parent grid information')
self.parent_grid_uuid = parent_grid_uuid
if parent_grid_uuid is None:
self.parent_window = None
self.is_refinement = None
else:
parent_window.assert_valid()
self.parent_window = parent_window
self.is_refinement = self_is_refinement
def actual_pillar_shape(self, patch_metadata = False, tolerance = 0.001):
"""Returns actual shape of pillars.
arguments:
patch_metadata (boolean, default False): if True, the actual shape replaces whatever was in the metadata
tolerance (float, default 0.001): a length value (in units of grid xy units) used as a Manhattan distance
limit in the xy plane when considering whether a point lies 'on' a straight line
returns:
string: 'vertical', 'straight' or 'curved'
note:
setting patch_metadata True will affect the attribute in this Grid object; however, it will not be
preserved unless the create_xml() method is called, followed at some point with model.store_epc()
"""
pillar_shape = gf.actual_pillar_shape(self.points_ref(masked = False), tolerance = tolerance)
if patch_metadata:
self.pillar_shape = pillar_shape
return pillar_shape
def cache_all_geometry_arrays(self):
"""Loads from hdf5 into memory all the arrays defining the grid geometry.
returns:
None
notes:
call this method if much grid geometry processing is coming up, to save having to worry about
individual caching arguments to many other methods;
this method does not create a column to pillar mapping which will often also be needed;
the arrays are cached as direct attributes to this grid object;
the names, shapes and types of the attributes are:
array_cell_geometry_is_defined (nk, nj, ni) bool
array_pillar_geometry_is_defined (nj + 1, ni + 1) bool
points_cached (nk + 1, nj + 1, ni + 1, 3) or (nk + 1, np, 3) float (np = number of primary pillars)
split_pillar_indices_cached (nps) int (nps = number of primary pillars that are split)
cols_for_split_pillars (npxc) int (npxc = number of column corners using extra pillars due to splitting)
cols_for_split_pillars_cl (npx) int (npx = number of extra pillars due to splitting)
the last 3 are only present when the grid has one or more split pillars;
the split pillar data includes the use of a 'jagged' array (effectively an array of lists represented as
a linear array and a 'cumulative length' index array)
:meta common:
"""
# todo: recheck the description of split pillar arrays given in the doc string
cell_geometry_is_defined(self, cache_array = True)
pillar_geometry_is_defined(self, cache_array = True)
self.point(cache_array = True)
if self.has_split_coordinate_lines:
split_root = None
if not hasattr(self, 'split_pillar_indices_cached'):
split_root = self.resolve_geometry_child('SplitCoordinateLines')
# assert(rqet.node_type(split_root) == 'ColumnLayerSplitCoordinateLines')
pillar_indices_root = rqet.find_tag(split_root, 'PillarIndices')
h5_key_pair = self.model.h5_uuid_and_path_for_node(pillar_indices_root)
self.model.h5_array_element(h5_key_pair,
index = None,
cache_array = True,
object = self,
array_attribute = 'split_pillar_indices_cached',
dtype = 'int')
if not hasattr(self, 'cols_for_split_pillars'):
if split_root is None:
split_root = self.resolve_geometry_child('SplitCoordinateLines')
cpscl_root = rqet.find_tag(split_root, 'ColumnsPerSplitCoordinateLine')
cpscl_elements_root = rqet.find_tag(cpscl_root, 'Elements')
h5_key_pair = self.model.h5_uuid_and_path_for_node(cpscl_elements_root)
self.model.h5_array_element(h5_key_pair,
index = None,
cache_array = True,
object = self,
array_attribute = 'cols_for_split_pillars',
dtype = 'int')
cpscl_cum_length_root = rqet.find_tag(cpscl_root, 'CumulativeLength')
h5_key_pair = self.model.h5_uuid_and_path_for_node(cpscl_cum_length_root)
self.model.h5_array_element(h5_key_pair,
index = None,
cache_array = True,
object = self,
array_attribute = 'cols_for_split_pillars_cl',
dtype = 'int')
def column_is_inactive(self, col_ji0):
"""Returns True if all the cells in the specified column are inactive.
arguments:
col_ji0 (int pair): the (j0, i0) column indices
returns:
boolean: True if all the cells in the column are inactive; False if at least one cell is active
"""
self.extract_inactive_mask()
if self.inactive is None:
return False # no inactive mask indicates all cells are active
return
|
np.all(self.inactive[:, col_ji0[0], col_ji0[1]])
|
numpy.all
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pure Python helper methods for :mod:`bezier.triangle`.
.. |eacute| unicode:: U+000E9 .. LATIN SMALL LETTER E WITH ACUTE
:trim:
"""
import functools
import operator
import numpy as np
from bezier.hazmat import curve_helpers
from bezier.hazmat import helpers as _py_helpers
from bezier.hazmat import intersection_helpers
_MAX_POLY_SUBDIVISIONS = 5
_SIGN = np.sign # pylint: disable=no-member
_FLOAT64 = np.float64 # pylint: disable=no-member
_SAME_CURVATURE = "Tangent curves have same curvature."
_WRONG_CURVE = "Start and end node not defined on same curve"
CLASSIFICATION_T = intersection_helpers.IntersectionClassification
# NOTE: The ``SUBDIVIDE`` matrices are public since used in
# the ``triangle`` module.
LINEAR_SUBDIVIDE_A = (
np.asfortranarray([[2, 1, 1], [0, 1, 0], [0, 0, 1]], dtype=_FLOAT64) / 2.0
)
LINEAR_SUBDIVIDE_B = (
np.asfortranarray([[0, 1, 1], [1, 0, 1], [1, 1, 0]], dtype=_FLOAT64) / 2.0
)
LINEAR_SUBDIVIDE_C = (
np.asfortranarray([[1, 0, 0], [1, 2, 1], [0, 0, 1]], dtype=_FLOAT64) / 2.0
)
LINEAR_SUBDIVIDE_D = (
np.asfortranarray([[1, 0, 0], [0, 1, 0], [1, 1, 2]], dtype=_FLOAT64) / 2.0
)
QUADRATIC_SUBDIVIDE_A = (
np.asfortranarray(
[
[4, 2, 1, 2, 1, 1],
[0, 2, 2, 0, 1, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 2, 1, 2],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
],
dtype=_FLOAT64,
)
/ 4.0
)
QUADRATIC_SUBDIVIDE_B = (
np.asfortranarray(
[
[0, 0, 1, 0, 1, 1],
[0, 1, 0, 1, 1, 2],
[1, 0, 0, 1, 0, 1],
[0, 1, 2, 1, 1, 0],
[2, 1, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0],
],
dtype=_FLOAT64,
)
/ 4.0
)
QUADRATIC_SUBDIVIDE_C = (
np.asfortranarray(
[
[1, 0, 0, 0, 0, 0],
[2, 2, 0, 1, 0, 0],
[1, 2, 4, 1, 2, 1],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 2, 2],
[0, 0, 0, 0, 0, 1],
],
dtype=_FLOAT64,
)
/ 4.0
)
QUADRATIC_SUBDIVIDE_D = (
np.asfortranarray(
[
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[2, 1, 0, 2, 0, 0],
[0, 1, 2, 0, 2, 0],
[1, 1, 1, 2, 2, 4],
],
dtype=_FLOAT64,
)
/ 4.0
)
CUBIC_SUBDIVIDE_A = (
np.asfortranarray(
[
[8, 4, 2, 1, 4, 2, 1, 2, 1, 1],
[0, 4, 4, 3, 0, 2, 2, 0, 1, 0],
[0, 0, 2, 3, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4, 2, 1, 4, 2, 3],
[0, 0, 0, 0, 0, 2, 2, 0, 2, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 2, 1, 3],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
],
dtype=_FLOAT64,
)
/ 8.0
)
CUBIC_SUBDIVIDE_B = (
np.asfortranarray(
[
[0, 0, 0, 1, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 0, 0, 1, 1, 1, 2, 3],
[0, 1, 0, 0, 1, 1, 0, 2, 1, 3],
[1, 0, 0, 0, 1, 0, 0, 1, 0, 1],
[0, 0, 1, 3, 0, 1, 2, 1, 1, 0],
[0, 2, 2, 0, 2, 2, 2, 2, 2, 0],
[3, 1, 0, 0, 2, 1, 0, 1, 1, 0],
[0, 1, 2, 3, 1, 1, 1, 0, 0, 0],
[3, 2, 1, 0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
],
dtype=_FLOAT64,
)
/ 8.0
)
CUBIC_SUBDIVIDE_C = (
np.asfortranarray(
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 2, 0, 0, 1, 0, 0, 0, 0, 0],
[3, 4, 4, 0, 2, 2, 0, 1, 0, 0],
[1, 2, 4, 8, 1, 2, 4, 1, 2, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 2, 0, 2, 0, 0],
[0, 0, 0, 0, 1, 2, 4, 2, 4, 3],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 2, 3],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
],
dtype=_FLOAT64,
)
/ 8.0
)
CUBIC_SUBDIVIDE_D = (
np.asfortranarray(
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[3, 1, 0, 0, 2, 0, 0, 0, 0, 0],
[0, 2, 2, 0, 0, 2, 0, 0, 0, 0],
[0, 0, 1, 3, 0, 0, 2, 0, 0, 0],
[3, 2, 1, 0, 4, 2, 0, 4, 0, 0],
[0, 1, 2, 3, 0, 2, 4, 0, 4, 0],
[1, 1, 1, 1, 2, 2, 2, 4, 4, 8],
],
dtype=_FLOAT64,
)
/ 8.0
)
QUARTIC_SUBDIVIDE_A = (
np.asfortranarray(
[
[16, 8, 4, 2, 1, 8, 4, 2, 1, 4, 2, 1, 2, 1, 1],
[0, 8, 8, 6, 4, 0, 4, 4, 3, 0, 2, 2, 0, 1, 0],
[0, 0, 4, 6, 6, 0, 0, 2, 3, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 2, 4, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 8, 4, 2, 1, 8, 4, 2, 6, 3, 4],
[0, 0, 0, 0, 0, 0, 4, 4, 3, 0, 4, 4, 0, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 2, 3, 0, 0, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 2, 1, 6, 3, 6],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 4],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
],
dtype=_FLOAT64,
)
/ 16.0
)
QUARTIC_SUBDIVIDE_B = (
np.asfortranarray(
[
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1],
[0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 2, 1, 3, 4],
[0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 2, 1, 3, 3, 6],
[0, 1, 0, 0, 0, 1, 1, 0, 0, 2, 1, 0, 3, 1, 4],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1],
[0, 0, 0, 1, 4, 0, 0, 1, 3, 0, 1, 2, 1, 1, 0],
[0, 0, 2, 3, 0, 0, 2, 3, 3, 2, 3, 4, 3, 3, 0],
[0, 3, 2, 0, 0, 3, 3, 2, 0, 4, 3, 2, 3, 3, 0],
[4, 1, 0, 0, 0, 3, 1, 0, 0, 2, 1, 0, 1, 1, 0],
[0, 0, 1, 3, 6, 0, 1, 2, 3, 1, 1, 1, 0, 0, 0],
[0, 3, 4, 3, 0, 3, 3, 3, 3, 2, 2, 2, 0, 0, 0],
[6, 3, 1, 0, 0, 3, 2, 1, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 2, 3, 4, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[4, 3, 2, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
dtype=_FLOAT64,
)
/ 16.0
)
QUARTIC_SUBDIVIDE_C = (
np.asfortranarray(
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[6, 6, 4, 0, 0, 3, 2, 0, 0, 1, 0, 0, 0, 0, 0],
[4, 6, 8, 8, 0, 3, 4, 4, 0, 2, 2, 0, 1, 0, 0],
[1, 2, 4, 8, 16, 1, 2, 4, 8, 1, 2, 4, 1, 2, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 3, 2, 0, 0, 2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 3, 4, 4, 0, 4, 4, 0, 3, 0, 0],
[0, 0, 0, 0, 0, 1, 2, 4, 8, 2, 4, 8, 3, 6, 4],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 3, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 4, 3, 6, 6],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 4],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
],
dtype=_FLOAT64,
)
/ 16.0
)
QUARTIC_SUBDIVIDE_D = (
np.asfortranarray(
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 3, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 3, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 4, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0],
[6, 3, 1, 0, 0, 6, 2, 0, 0, 4, 0, 0, 0, 0, 0],
[0, 3, 4, 3, 0, 0, 4, 4, 0, 0, 4, 0, 0, 0, 0],
[0, 0, 1, 3, 6, 0, 0, 2, 6, 0, 0, 4, 0, 0, 0],
[4, 3, 2, 1, 0, 6, 4, 2, 0, 8, 4, 0, 8, 0, 0],
[0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 8, 0, 8, 0],
[1, 1, 1, 1, 1, 2, 2, 2, 2, 4, 4, 4, 8, 8, 16],
],
dtype=_FLOAT64,
)
/ 16.0
)
_WEIGHTS_SUBDIVIDE0 = np.asfortranarray([1.0, 0.0, 0.0])
_WEIGHTS_SUBDIVIDE1 = np.asfortranarray([0.5, 0.5, 0.0])
_WEIGHTS_SUBDIVIDE2 =
|
np.asfortranarray([0.5, 0.0, 0.5])
|
numpy.asfortranarray
|
import torch
import torch.nn as nn
import numpy as np
import argparse
import soundfile as sf
import os
from analyzer import Tanhize, read_whole_features,pw2wav
from torch.autograd import Variable
from datetime import datetime
parser = argparse.ArgumentParser(description='Voice Convert.py')
parser.add_argument('--corpus_name', default='vcc2016', help='Corpus name')
parser.add_argument('--src', default='SF1', help='source speaker [SF1 - SM2]')
parser.add_argument('--trg', default='TM3', help='target speaker [SF1 - TM3]')
parser.add_argument('--output_dir', default='./logdir', help='root of output dir')
parser.add_argument('--model_name', default='./model/vawgan.pt', help='load ./model/[vawgan.pt]')
parser.add_argument('--file_pattern', default='./dataset/vcc2016/bin/Testing Set/{}/*.bin', help='file pattern')
parser.add_argument('--speaker_list', default='./etc/speakers.tsv', help='Speaker list (one speaker per line)')
args = parser.parse_args()
def nh_to_nchw(x):
return x.reshape(-1,1,513,1)
def convert_f0(f0, src, trg):
print(f0)
print(np.fromfile(os.path.join('./etc', '{}.npf'.format(src)), np.float32))
print(np.fromfile(os.path.join('./etc', '{}.npf'.format(trg)), np.float32))
mu_s, std_s = np.fromfile(os.path.join('./etc', '{}.npf'.format(src)), np.float32)
mu_t, std_t = np.fromfile(os.path.join('./etc', '{}.npf'.format(trg)), np.float32)
print('//')
lf0 = np.where(f0 > 1., np.log(f0), f0)
print(lf0)
print('//')
lf0 =
|
np.where(lf0 > 1., (lf0 - mu_s)/std_s * std_t + mu_t, lf0)
|
numpy.where
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
to_list = ak._v2.operations.convert.to_list
"""includes test_0117, test_0110, test_0042, test_0127,
test_0198, test_0446, test_0585, test_0590, test_0612,
test_0724, test_0866, test_0973
"""
def test_flatten_ListOffsetArray():
array = ak._v2.highlevel.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array)
) == [
1.1,
2.2,
3.3,
4.4,
5.5,
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array[1:])
) == [4.4, 5.5]
array = ak._v2.highlevel.Array(
[[[0.0, 1.1, 2.2], [], [3.3, 4.4]], [], [[5.5]], [[], [6.6, 7.7, 8.8, 9.9]]]
).layout
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array)
) == [
[0.0, 1.1, 2.2],
[],
[3.3, 4.4],
[5.5],
[],
[6.6, 7.7, 8.8, 9.9],
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array[1:])
) == [
[5.5],
[],
[6.6, 7.7, 8.8, 9.9],
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array[:, 1:])
) == [
[],
[3.3, 4.4],
[6.6, 7.7, 8.8, 9.9],
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=2)
) == [
[0.0, 1.1, 2.2, 3.3, 4.4],
[],
[5.5],
[6.6, 7.7, 8.8, 9.9],
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array[1:], axis=2)
) == [
[],
[5.5],
[6.6, 7.7, 8.8, 9.9],
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array[:, 1:], axis=2)
) == [
[3.3, 4.4],
[],
[],
[6.6, 7.7, 8.8, 9.9],
]
array = ak._v2.highlevel.Array(
np.arange(2 * 3 * 5 * 7).reshape(2, 3, 5, 7).tolist()
).layout
assert (
ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=1)
)
== np.arange(2 * 3 * 5 * 7).reshape(2 * 3, 5, 7).tolist()
)
assert (
ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=2)
)
== np.arange(2 * 3 * 5 * 7).reshape(2, 3 * 5, 7).tolist()
)
assert (
ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=3)
)
== np.arange(2 * 3 * 5 * 7).reshape(2, 3, 5 * 7).tolist()
)
array = ak._v2.highlevel.Array(
ak._v2.operations.convert.from_iter(
np.arange(2 * 3 * 5 * 7).reshape(2, 3, 5, 7).tolist(), highlevel=False
)
).layout
assert (
ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=1)
)
== np.arange(2 * 3 * 5 * 7).reshape(2 * 3, 5, 7).tolist()
)
assert (
ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=2)
)
== np.arange(2 * 3 * 5 * 7).reshape(2, 3 * 5, 7).tolist()
)
assert (
ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=3)
)
== np.arange(2 * 3 * 5 * 7).reshape(2, 3, 5 * 7).tolist()
)
array = ak._v2.highlevel.Array(np.arange(2 * 3 * 5 * 7).reshape(2, 3, 5, 7)).layout
assert (
ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=1)
)
== np.arange(2 * 3 * 5 * 7).reshape(2 * 3, 5, 7).tolist()
)
assert (
ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=2)
)
== np.arange(2 * 3 * 5 * 7).reshape(2, 3 * 5, 7).tolist()
)
assert (
ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=3)
)
== np.arange(2 * 3 * 5 * 7).reshape(2, 3, 5 * 7).tolist()
)
def test_flatten_IndexedArray():
array = ak._v2.highlevel.Array(
[[1.1, 2.2, None, 3.3], None, [], None, [4.4, 5.5], None]
).layout
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array)
) == [
1.1,
2.2,
None,
3.3,
4.4,
5.5,
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array[1:])
) == [4.4, 5.5]
array = ak._v2.highlevel.Array(
[
[[0.0, 1.1, 2.2], None, None, [3.3, 4.4]],
[],
[[5.5]],
[[], [6.6, 7.7, 8.8, 9.9]],
]
).layout
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=2)
) == [
[0.0, 1.1, 2.2, 3.3, 4.4],
[],
[5.5],
[6.6, 7.7, 8.8, 9.9],
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array[1:], axis=2)
) == [
[],
[5.5],
[6.6, 7.7, 8.8, 9.9],
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array[:, 1:], axis=2)
) == [
[3.3, 4.4],
[],
[],
[6.6, 7.7, 8.8, 9.9],
]
array = ak._v2.highlevel.Array(
[
[[0.0, 1.1, 2.2], [3.3, 4.4]],
[],
[[5.5]],
None,
None,
[[], [6.6, 7.7, 8.8, 9.9]],
]
).layout
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=2)
) == [
[0.0, 1.1, 2.2, 3.3, 4.4],
[],
[5.5],
None,
None,
[6.6, 7.7, 8.8, 9.9],
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array[1:], axis=2)
) == [
[],
[5.5],
None,
None,
[6.6, 7.7, 8.8, 9.9],
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array[:, 1:], axis=2)
) == [
[3.3, 4.4],
[],
[],
None,
None,
[6.6, 7.7, 8.8, 9.9],
]
array = ak._v2.highlevel.Array(
[
[[0.0, 1.1, None, 2.2], None, [], None, [3.3, 4.4]],
None,
[],
[[5.5]],
None,
[[], [6.6, None, 7.7, 8.8, 9.9], None],
]
).layout
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array)
) == [
[0.0, 1.1, None, 2.2],
None,
[],
None,
[3.3, 4.4],
[5.5],
[],
[6.6, None, 7.7, 8.8, 9.9],
None,
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=2)
) == [
[0.0, 1.1, None, 2.2, 3.3, 4.4],
None,
[],
[5.5],
None,
[6.6, None, 7.7, 8.8, 9.9],
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array[1:], axis=2)
) == [
None,
[],
[5.5],
None,
[6.6, None, 7.7, 8.8, 9.9],
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array[:, 1:], axis=2)
) == [
[3.3, 4.4],
None,
[],
[],
None,
[6.6, None, 7.7, 8.8, 9.9],
]
content = ak._v2.operations.convert.from_iter(
[[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9]], highlevel=False
)
index = ak._v2.index.Index64(np.array([2, 1, 0, 3, 3, 4], dtype=np.int64))
array = ak._v2.contents.IndexedArray(index, content)
assert to_list(array) == [
[3.3, 4.4],
[],
[0.0, 1.1, 2.2],
[5.5],
[5.5],
[6.6, 7.7, 8.8, 9.9],
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array)
) == [
3.3,
4.4,
0.0,
1.1,
2.2,
5.5,
5.5,
6.6,
7.7,
8.8,
9.9,
]
content = ak._v2.operations.convert.from_iter(
[[[0.0, 1.1, 2.2], [], [3.3, 4.4]], [], [[5.5]], [[], [6.6, 7.7, 8.8, 9.9]]],
highlevel=False,
)
index = ak._v2.index.Index64(np.array([2, 2, 1, 0, 3], dtype=np.int64))
array = ak._v2.contents.IndexedArray(index, content)
assert to_list(array) == [
[[5.5]],
[[5.5]],
[],
[[0.0, 1.1, 2.2], [], [3.3, 4.4]],
[[], [6.6, 7.7, 8.8, 9.9]],
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=2)
) == [
[5.5],
[5.5],
[],
[0.0, 1.1, 2.2, 3.3, 4.4],
[6.6, 7.7, 8.8, 9.9],
]
def test_flatten_RecordArray():
array = ak._v2.highlevel.Array(
[
{"x": [], "y": [[3, 3, 3]]},
{"x": [[1]], "y": [[2, 2]]},
{"x": [[2], [2]], "y": [[1]]},
{"x": [[3], [3], [3]], "y": [[]]},
]
).layout
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array, axis=2)
) == [
{"x": [], "y": [3, 3, 3]},
{"x": [1], "y": [2, 2]},
{"x": [2, 2], "y": [1]},
{"x": [3, 3, 3], "y": []},
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array[1:], axis=2)
) == [
{"x": [1], "y": [2, 2]},
{"x": [2, 2], "y": [1]},
{"x": [3, 3, 3], "y": []},
]
assert ak._v2.operations.convert.to_list(
ak._v2.operations.structure.flatten(array[:, 1:], axis=2)
) == [
{"x": [], "y": []},
{"x": [], "y": []},
{"x": [2], "y": []},
{"x": [3, 3], "y": []},
]
def test_flatten_UnionArray():
content1 = ak._v2.operations.convert.from_iter(
[[1.1], [2.2, 2.2], [3.3, 3.3, 3.3]], highlevel=False
)
content2 = ak._v2.operations.convert.from_iter(
[[[3, 3, 3], [3, 3, 3], [3, 3, 3]], [[2, 2], [2, 2]], [[1]]], highlevel=False
)
tags = ak._v2.index.Index8(
|
np.array([0, 1, 0, 1, 0, 1], dtype=np.int8)
|
numpy.array
|
"""
Tests that are in the documentation. If they break the documentation is wrong!
If there is any change to the code please also change the examples to
accommodate the changes.
"""
from autode.species import Species
from autode.species import Molecule
from autode.config import Config
from autode.mol_graphs import split_mol_across_bond
from autode.atoms import Atom
import numpy as np
import os
here = os.path.dirname(os.path.abspath(__file__))
def test_species():
species = Species(name='species', atoms=None, charge=0, mult=1)
assert species.n_atoms == 0
h2 = Species(name='H2', charge=0, mult=1, atoms=[Atom('H'), Atom('H')])
assert h2.n_atoms == 2
# Expecting both atoms to be initialised at the origin
assert np.linalg.norm(h2.atoms[0].coord - h2.atoms[1].coord) < 1E-6
atom1, atom2 = h2.atoms
atom1.translate(vec=
|
np.array([1.0, 0.0, 0.0])
|
numpy.array
|
'''
This file will be used to generate voxel data sets from the meshes by voxelizing the vertices.
The high level overview is that we need to supply a list of directory paths to where the stl files are housed. We will then extract the vertices data, and we will create voxels in this file.
'''
import os
from coord_change import Global2Local_Coord
import scipy.io as sio
import skimage
import numpy as np
import trimesh
import pandas as pd
import sys
from scipy import ndimage
import h5py
import pickle
def stl_load_to_vertex_array(path_to_stl, bone):
'''
This function will take the path to the frame location according to laterality, and it will return a mesh object loaded according to what laterality and patient has been specified in path.
input:
path_to_stl --> the path to specific laterality ~/data/activity/patient/laterality
bone --> either 'Tibia' or 'Femur' to specify which bone to load
output:
mesh --> returns a trimesh mesh object that has been loaded
'''
last_path_split = path_to_stl.split(os.sep)[-1]
new_path = os.path.join(os.path.normpath(path_to_stl[:-3]), 'stl')
if last_path_split.lower() == 'lt':
if bone.lower() == 'femur':
mesh = trimesh.load(os.path.join(new_path, 'LFemur.stl'))
elif bone.lower() == 'tibia':
mesh = trimesh.load(os.path.join(new_path, 'LTibia.stl'))
elif last_path_split.lower() == 'rt':
if bone.lower() == 'femur':
mesh = trimesh.load(os.path.join(new_path, 'RFemur.stl'))
elif bone.lower() == 'tibia':
mesh = trimesh.load(os.path.join(new_path, 'RTibia.stl'))
return mesh
def voxel_from_array(mesh_vertices, spacing=0.5):
'''
This function will take in a matrix of the location of mesh vertices. It will then take the vertices and transform them into a binary voxel data set with a 1 located in the bin if a corresponding point is to be found. It will return the voxelized matrix.
input:
mesh_vertices --> expects np.array of locations of mesh vertices
spacing --> the spacing of the voxels in mm
output:
bin_mat --> a binary voxelized matrix wtih 1's corresponding to points with a corresponding vertex
'''
mesh_min_vec = np.min(mesh_vertices, axis=0)
mesh_min_mat = mesh_vertices - mesh_min_vec
range_vec = mesh_vertices.max(axis=0) - mesh_vertices.min(axis=0)
bins_vec = np.ceil(range_vec / spacing)
bin_mat = np.zeros(bins_vec.astype('int32') + 2)
for indx in range(mesh_vertices.shape[0]):
# print(int(np.floor(mesh_min_mat[indx, 0] / spacing)))
# print(int(np.floor(mesh_min_mat[indx, 1] / spacing)))
# print(int(np.floor(mesh_min_mat[indx, 2] / spacing)))
# print(type(int(np.floor(mesh_min_mat[indx, 0] / spacing))))
# print(type(int(np.floor(mesh_min_mat[indx, 1] / spacing))))
# print(type(int(np.floor(mesh_min_mat[indx, 2] / spacing))))
bin_mat[int(np.floor(mesh_min_mat[indx, 0] / spacing)):int(np.ceil(mesh_min_mat[indx, 0] / spacing)) + 1, int(np.floor(mesh_min_mat[indx, 1] / spacing)):int(np.ceil(mesh_min_mat[indx, 1] / spacing)) + 1, int(np.floor(mesh_min_mat[indx, 2] / spacing)):int(np.ceil(mesh_min_mat[indx, 2] / spacing)) + 1] = 1
return bin_mat.astype('int8')
def extract_stl_to_voxel(mesh_obj, PTS_file, voxelize_dim=0.5):
'''
In sum, this function will:
1) take an STL file loaded as a mesh object and take the PTS file loaded as a pandas object
2) using the PTS file, determine local coordinate frame and shift STL point cloud to new local coordinate frame
3) voxelize the vertices of the point cloud to binary, depending on if a vertex would be in the corresponding voxel
4) return an array of both 3D voxel models for loaded model
function extract_stl_to_voxel(path_to_frame, voxelize_dim=0.5)
input:
mesh_obj --> loaded trimesh mesh object (stl file)
PTS_file --> loaded pandas
voxelize_dim --> the scale of creating new voxel map
output:
3D binary voxel model as an array
This function assumes it will be passed the loaded trimesh mesh as an argument. This function will produce an a NumPy array of the binary voxel data.
In doing so, this function will also translate the points in the stl file to the local coordinate frame defined through the PTS files:
PTS file: ---> defines the new X and Z directions of the local coordinate system
X Coordinate system: From PTS row 1 to PTS row 0
Z Coordinate system: From PTS row 3 to PTS row 2
The origin of the new coordinate system is defined to be halfway between the two anatomical points, which demarcate the x-axis.
From these two coordinates, we can determine the Y axis, via the cross product of the unit vectors: Y = cross(z,x)
'''
PTS_file = np.array(PTS_file)
X_vec = np.array(PTS_file[0, :] - PTS_file[1, :])
Z_vec_pre =
|
np.array(PTS_file[2, :] - PTS_file[3, :])
|
numpy.array
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
def output_hist(out):
hist, _ = np.histogram(out, range=(-5, 10))
hist = hist.astype("float32")
hist /= float(out.size)
prob = 0.1 * np.ones((10))
return hist, prob
def output_hist_diag(out):
diag_num = min(out.shape)
for i in range(diag_num):
assert abs(out[i][i] - 1.0) < 1e-9
# ignore diagonal elements
out[i][i] = 100
hist, _ = np.histogram(out, range=(-5, 10))
hist = hist.astype("float32")
hist /= float(out.size)
prob = 0.1 * np.ones((10))
return hist, prob
class TestUniformRandomOp(OpTest):
def setUp(self):
self.op_type = "uniform_random"
self.inputs = {}
self.init_attrs()
self.outputs = {"Out": np.zeros((1000, 784)).astype("float32")}
def init_attrs(self):
self.attrs = {
"shape": [1000, 784],
"min": -5.0,
"max": 10.0,
"seed": 10
}
self.output_hist = output_hist
def test_check_output(self):
self.check_output_customized(self.verify_output)
def verify_output(self, outs):
hist, prob = self.output_hist(np.array(outs[0]))
self.assertTrue(
np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
class TestUniformRandomOpWithDiagInit(TestUniformRandomOp):
def init_attrs(self):
self.attrs = {
"shape": [1000, 784],
"min": -5.0,
"max": 10.0,
"seed": 10,
"diag_num": 784,
"diag_step": 784,
"diag_val": 1.0
}
self.output_hist = output_hist_diag
class TestUniformRandomOpSelectedRows(unittest.TestCase):
def get_places(self):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
return places
def test_check_output(self):
for place in self.get_places():
self.check_with_place(place)
def check_with_place(self, place):
scope = core.Scope()
out = scope.var("X").get_selected_rows()
op = Operator(
"uniform_random",
Out="X",
shape=[4, 784],
min=-5.0,
max=10.0,
seed=10)
op.run(scope, place)
self.assertEqual(out.get_tensor().shape(), [4, 784])
hist, prob = output_hist(np.array(out.get_tensor()))
self.assertTrue(
np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
class TestUniformRandomOpSelectedRowsWithDiagInit(
TestUniformRandomOpSelectedRows):
def check_with_place(self, place):
scope = core.Scope()
out = scope.var("X").get_selected_rows()
op = Operator(
"uniform_random",
Out="X",
shape=[4, 784],
min=-5.0,
max=10.0,
seed=10,
diag_num=4,
diag_step=784,
diag_val=1.0)
op.run(scope, place)
self.assertEqual(out.get_tensor().shape(), [4, 784])
hist, prob = output_hist_diag(np.array(out.get_tensor()))
self.assertTrue(
np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
class TestUniformRandomOpApi(unittest.TestCase):
def test_api(self):
x = fluid.layers.data('x', shape=[16], dtype='float32', lod_level=1)
y = fluid.layers.fc(x,
size=16,
param_attr=fluid.initializer.Uniform(
low=-0.5,
high=0.5,
seed=10,
diag_num=16,
diag_step=16,
diag_val=1.0))
place = fluid.CPUPlace()
x_tensor = fluid.create_lod_tensor(
|
np.random.rand(3, 16)
|
numpy.random.rand
|
# coding: utf-8
import datetime
import ssl
import random
from PIL import ImageFilter, Image
import pytesseract
import numpy
import simplejson as json
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
from io import open
class EntrustProp(object):
Limit = 'limit'
Market = 'market'
class Ssl3HttpAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1)
def file2dict(path):
with open(path, encoding='utf-8') as f:
return json.load(f)
def get_stock_type(stock_code):
"""判断股票ID对应的证券市场
匹配规则
['50', '51', '60', '90', '110'] 为 sh
['00', '13', '18', '15', '16', '18', '20', '30', '39', '115'] 为 sz
['5', '6', '9'] 开头的为 sh, 其余为 sz
:param stock_code:股票ID, 若以 'sz', 'sh' 开头直接返回对应类型,否则使用内置规则判断
:return 'sh' or 'sz'"""
assert type(stock_code) is str, 'stock code need str type'
if stock_code.startswith(('sh', 'sz')):
return stock_code[:2]
if stock_code.startswith(
('50', '51', '60', '73', '90', '110', '113', '132', '204', '78')):
return 'sh'
if stock_code.startswith(
('00', '13', '18', '15', '16', '18', '20', '30', '39', '115', '1318')):
return 'sz'
if stock_code.startswith(('5', '6', '9')):
return 'sh'
return 'sz'
def recognize_verify_code(image_path, broker='ht'):
"""识别验证码,返回识别后的字符串,使用 tesseract 实现
:param image_path: 图片路径
:param broker: 券商 ['ht', 'yjb', 'gf', 'yh']
:return recognized: verify code string"""
if broker in ['ht', 'yjb']:
return detect_image_result(image_path)
elif broker == 'gf':
return detect_gf_result(image_path)
elif broker == 'yh':
return detect_yh_result(image_path)
def detect_gf_result(image_path):
img = Image.open(image_path)
if hasattr(img, "width"):
width, height = img.width, img.height
else:
width, height = img.size
for x in range(width):
for y in range(height):
if img.getpixel((x, y)) < (100, 100, 100):
img.putpixel((x, y), (256, 256, 256))
gray = img.convert('L')
two = gray.point(lambda x: 0 if 68 < x < 90 else 256)
min_res = two.filter(ImageFilter.MinFilter)
med_res = min_res.filter(ImageFilter.MedianFilter)
for _ in range(2):
med_res = med_res.filter(ImageFilter.MedianFilter)
res = pytesseract.image_to_string(med_res)
return res.replace(' ', '')
def detect_image_result(image_path):
img = Image.open(image_path)
for x in range(img.width):
for y in range(img.height):
(r, g, b) = img.getpixel((x, y))
if r > 100 and g > 100 and b > 100:
img.putpixel((x, y), (256, 256, 256))
res = pytesseract.image_to_string(img)
return res
def detect_yh_result(image_path):
img = Image.open(image_path)
brightness = list()
for x in range(img.width):
for y in range(img.height):
(r, g, b) = img.getpixel((x, y))
brightness.append(r + g + b)
avgBrightness = int(
|
numpy.mean(brightness)
|
numpy.mean
|
"""QA plots for UM model."""
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.ticker import NullFormatter
from astroML.stats import binned_statistic_2d
from scipy.ndimage.filters import gaussian_filter
import corner
__all__ = ['plot_logmh_sig_logms_tot', 'plot_logmh_logms_tot', 'display_obs_smf',
'show_smf', 'plot_mtot_minn_smf', 'plot_dsigma_profiles',
'plot_best_fit_scatter_relation', 'plot_best_fit_shmr',
'plot_mcmc_trace', 'plot_mcmc_corner', 'plot_mass_scatter_fsat_trends']
ORG = plt.get_cmap('OrRd')
ORG_2 = plt.get_cmap('YlOrRd')
BLU = plt.get_cmap('PuBu')
BLK = plt.get_cmap('Greys')
PUR = plt.get_cmap('Purples')
GRN = plt.get_cmap('Greens')
plt.rcParams['figure.dpi'] = 100.0
plt.rc('text', usetex=True)
def plot_logmh_sig_logms_tot(logmh_cen, sig_logms_tot, sigms_a, sigms_b):
"""Log Mh v.s. sig(Log Ms_tot)."""
fig = plt.figure(figsize=(6, 6))
fig.subplots_adjust(left=0.19, right=0.995,
bottom=0.13, top=0.995,
wspace=0.00, hspace=0.00)
ax1 = fig.add_subplot(111)
ax1.grid(linestyle='--', linewidth=2, alpha=0.4, zorder=0)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
ax1.plot(logmh_cen,
sigms_a * (np.asarray(logmh_cen) - 15.3) + sigms_b,
linewidth=3.0, linestyle='--', alpha=0.5)
ax1.scatter(logmh_cen, sig_logms_tot, s=70, alpha=0.8,
edgecolor='k')
ax1.text(0.25, 0.09, r"$a=%5.2f\ b=%5.2f$" % (sigms_a, sigms_b),
verticalalignment='bottom',
horizontalalignment='center',
fontsize=20,
transform=ax1.transAxes)
ax1.set_xlabel(r'$\log M_{\mathrm{vir}}$', fontsize=25)
ax1.set_ylabel(r'$\sigma_{\log M_{\star, \rm Total}}$',
fontsize=28)
return fig
def plot_logmh_logms_tot(logmh, logms_tot, shmr_a, shmr_b):
"""Log Mh v.s. Log Ms_tot."""
fig = plt.figure(figsize=(6, 6))
fig.subplots_adjust(left=0.19, right=0.995,
bottom=0.13, top=0.995,
wspace=0.00, hspace=0.00)
ax1 = fig.add_subplot(111)
ax1.grid(linestyle='--', linewidth=2, alpha=0.4, zorder=0)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
hexbin = ax1.hexbin(logmh, logms_tot, gridsize=(45, 30),
alpha=0.7, bins='log',
mincnt=10, edgecolor='none', cmap='Oranges')
cbar_ax = fig.add_axes([0.22, 0.92, 0.5, 0.05])
cbar = fig.colorbar(hexbin, cax=cbar_ax, orientation="horizontal")
cbar.set_label(r'$\log \mathrm{N}$')
cbar.solids.set_edgecolor("face")
logmh_cen = np.linspace(np.nanmin(logmh), np.nanmax(logmh), 50)
ax1.plot(logmh_cen, shmr_a * logmh_cen + shmr_b,
linewidth=3.0, linestyle='--', alpha=0.5)
ax1.text(0.75, 0.09, r"$a=%5.2f\ b=%5.2f$" % (shmr_a, shmr_b),
verticalalignment='bottom',
horizontalalignment='center',
fontsize=20,
transform=ax1.transAxes)
ax1.set_xlabel(r'$\log M_{\mathrm{vir}}$', fontsize=25)
ax1.set_ylabel(r'$\log M_{\star, \rm Total}$', fontsize=25)
return fig
def display_obs_smf(obs_smf_mtot, obs_smf_minn, obs_smf_full=None,
label_mtot=r'$M_{\star,\ 100\mathrm{kpc}}$',
label_mfull=r'$M_{\star,\ \mathrm{S82}}$',
label_minn=r'$M_{\star,\ 10\mathrm{kpc}}$'):
"""Display observed stellar mass functions."""
if obs_smf_full is None:
smf_list = [obs_smf_mtot, obs_smf_minn]
label_list = [label_mtot, label_minn]
else:
obs_smf_full['logm_mean'] += 0.1
smf_list = [obs_smf_mtot, obs_smf_minn, obs_smf_full]
label_list = [label_mtot, label_minn, label_mfull]
return show_smf(smf_list, label_list,
text=r'$\mathrm{HSC}$')
def show_smf(smf_list, label_list=None, text=None, loc=1,
legend_fontsize=20):
"""Plot stellar mass functions."""
fig = plt.figure(figsize=(7, 6))
fig.subplots_adjust(left=0.17, right=0.994,
bottom=0.12, top=0.994,
wspace=0.00, hspace=0.00)
ax1 = fig.add_subplot(111)
ax1.grid(linestyle='--', linewidth=2, alpha=0.5, zorder=0)
m_list = ['o', '+', 's', 'h', 'x', 'H', '8', 'v', '<', '>']
s_list = [15, 30, 20, 20, 30, 15, 15, 20, 20, 20]
a_list = [0.4, 0.25, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3]
c_list = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
if label_list is not None:
assert len(smf_list) == len(label_list)
for ii, smf in enumerate(smf_list):
if label_list is not None:
label_use = label_list[ii]
else:
label_use = '__no_lable__'
ax1.fill_between(smf['logm_mean'],
np.log10(smf['smf_low']),
np.log10(smf['smf_upp']),
alpha=a_list[ii],
facecolor=c_list[ii],
label=label_use)
ax1.scatter(smf['logm_mean'],
np.log10(smf['smf']),
marker=m_list[ii], c=c_list[ii],
s=s_list[ii], label='__no_label')
ax1.set_xlim(11.19, 12.35)
ax1.set_ylim(-7.9, -2.4)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
ax1.legend(fontsize=legend_fontsize, loc=loc)
ax1.set_xlabel(r'$\log (M_{\star}/M_{\odot})$',
fontsize=25)
ax1.set_ylabel((r'$\mathrm{d}N/\mathrm{d}\log M_{\star}\ $'
r'$[{\mathrm{Mpc}^{-3}}{\mathrm{dex}^{-1}}]$'),
size=25)
if text is not None:
ax1.text(0.15, 0.06, text,
verticalalignment='bottom',
horizontalalignment='center',
fontsize=25,
transform=ax1.transAxes)
return fig
def plot_mtot_minn_smf(obs_smf_tot, obs_smf_inn, obs_logms_tot, obs_logms_inn,
um_smf_tot, um_smf_inn, logms_mod_tot, logms_mod_inn,
obs_smf_full=None, shmr_a=None, shmr_b=None,
sigms_a=None, sigms_b=None, um_smf_tot_all=None,
not_table=False, x_label='Tot', y_label='Inn'):
"""Plot the UM predicted M100-M10 plane and their SMFs."""
fig, axes = plt.subplots(2, figsize=(7, 9))
fig.subplots_adjust(left=0.145, right=0.995,
bottom=0.085, top=0.995,
wspace=0.00, hspace=0.21)
ax1 = axes[0]
ax2 = axes[1]
# Scatter plot
if len(logms_mod_tot) > len(obs_logms_tot):
ax1.scatter(logms_mod_tot, logms_mod_inn,
label=r'$\mathrm{Model}$',
s=10, alpha=0.6, marker='o',
c='royalblue')
ax1.scatter(obs_logms_tot, obs_logms_inn,
label=r'$\mathrm{Data}$',
s=15, alpha=0.5, marker='+',
c='lightsalmon')
else:
ax1.scatter(obs_logms_tot, obs_logms_inn,
label=r'$\mathrm{Data}$',
s=10, alpha=0.6, marker='o',
c='lightsalmon')
ax1.scatter(logms_mod_tot, logms_mod_inn,
label=r'$\mathrm{Model}$',
s=15, alpha=0.5, marker='+',
c='royalblue')
ax1.legend(fontsize=20, loc='lower right')
ax1.grid(linestyle='--', linewidth=2, alpha=0.3, zorder=0)
ax1.set_xlabel(r'$\log M_{\star,\ \mathrm{%s,\ UM}}$' % x_label, fontsize=25)
ax1.set_ylabel(r'$\log M_{\star,\ \mathrm{%s,\ UM}}$' % y_label, fontsize=25)
ax1.set_xlim(np.nanmin(logms_mod_tot) - 0.09,
np.nanmax(logms_mod_tot) + 0.09)
ax1.set_ylim(np.nanmin(logms_mod_inn) - 0.02,
np.nanmax(logms_mod_inn) + 0.09)
if shmr_a is not None and shmr_b is not None:
seg1 = (r'$\log M_{\star} = %6.3f \times$' % shmr_a)
seg2 = (r'$\log M_{\rm halo} + %6.3f$' % shmr_b)
ax1.text(0.26, 0.91, (seg1 + seg2),
verticalalignment='bottom',
horizontalalignment='center',
fontsize=12,
transform=ax1.transAxes)
if sigms_a is not None and sigms_b is not None:
seg1 = (r'$\sigma(\log M_{\star}) = %6.3f \times$' % sigms_a)
seg2 = (r'$\log M_{\rm halo} + %6.3f$' % sigms_b)
ax1.text(0.26, 0.83, (seg1 + seg2),
verticalalignment='bottom',
horizontalalignment='center',
fontsize=12,
transform=ax1.transAxes)
# Full SMF in the background if available
# +0.1 dex is a magic number to convert S82 SMF from BC03 to
# FSPS model
ax2.grid(linestyle='--', linewidth=2, alpha=0.3, zorder=0)
if obs_smf_full is not None:
ax2.errorbar(obs_smf_full['logm_mean'] + 0.15,
np.log10(obs_smf_full['smf']),
(np.log10(obs_smf_full['smf_upp']) -
np.log10(obs_smf_full['smf'])),
fmt='o', color='seagreen',
ecolor='seagreen',
alpha=0.9, marker='s',
label=r'$\mathrm{Data:\ PRIMUS}$',
zorder=0)
if um_smf_tot_all is not None:
ax2.plot(um_smf_tot_all['logm_mean'],
np.log10(um_smf_tot_all['smf']),
linewidth=1.5, linestyle='--',
c='royalblue',
label='__no_label__')
# SMF plot
ax2.fill_between(obs_smf_tot['logm_mean'],
np.log10(obs_smf_tot['smf_low']),
np.log10(obs_smf_tot['smf_upp']),
facecolor='steelblue',
edgecolor='none',
interpolate=True,
alpha=0.4,
label=r'$\mathrm{Data:\ }M_{\star,\ {\rm %s}}$' % x_label)
ax2.fill_between(obs_smf_inn['logm_mean'],
np.log10(obs_smf_inn['smf_low']),
np.log10(obs_smf_inn['smf_upp']),
facecolor='lightsalmon',
edgecolor='none',
interpolate=True,
alpha=0.4,
label=r'$\mathrm{Data:\ }M_{\star,\ {\rm %s}}$' % y_label)
ax2.scatter(obs_smf_inn['logm_mean'],
np.log10(obs_smf_inn['smf']),
marker='h',
c='lightsalmon',
s=20, label='__no_label__',
alpha=1.0)
ax2.scatter(obs_smf_tot['logm_mean'],
np.log10(obs_smf_tot['smf']),
marker='h',
c='steelblue',
s=20, label='__no_label__',
alpha=1.0)
if isinstance(um_smf_inn, (list,)):
for ii, smf in enumerate(um_smf_inn):
if ii == 0:
if not_table:
ax2.plot(obs_smf_inn['logm_mean'],
np.log10(smf),
linewidth=1, linestyle='-',
c='salmon', alpha=0.7,
label=r'$\mathrm{UM:\ Minn}$')
else:
ax2.plot(smf['logm_mean'],
np.log10(smf['smf']),
linewidth=1, linestyle='-',
c='salmon', alpha=0.7,
label=r'$\mathrm{UM:\ Minn}$')
else:
if not_table:
ax2.plot(obs_smf_inn['logm_mean'],
np.log10(smf),
linewidth=1, linestyle='-',
c='salmon', alpha=0.7,
label='__no_label__')
else:
ax2.plot(smf['logm_mean'],
np.log10(smf['smf']),
linewidth=1, linestyle='-',
c='salmon', alpha=0.7,
label='__no_label__')
else:
if not_table:
ax2.plot(obs_smf_inn['logm_mean'],
np.log10(um_smf_inn),
linewidth=4, linestyle='--',
c='salmon',
label=r'$\mathrm{UM:\ }M_{\star,\ {\rm %s}}$' % y_label)
else:
ax2.plot(um_smf_inn['logm_mean'],
np.log10(um_smf_inn['smf']),
linewidth=4, linestyle='--',
c='salmon',
label=r'$\mathrm{UM:\ }M_{\star,\ {\rm %s}}$' % y_label)
if isinstance(um_smf_tot, (list,)):
for ii, smf in enumerate(um_smf_tot):
if ii == 0:
if not_table:
ax2.plot(obs_smf_tot['logm_mean'],
np.log10(smf),
linewidth=1, linestyle='-',
c='royalblue', alpha=0.7,
label=r'$\mathrm{UM:\ }M_{\star,\ {\rm %s}}$' % x_label)
else:
ax2.plot(smf['logm_mean'],
np.log10(smf['smf']),
linewidth=1, linestyle='-',
c='royalblue', alpha=0.7,
label=r'$\mathrm{UM:\ }M_{\star,\ {\rm %s}}$' % x_label)
else:
if not_table:
ax2.plot(obs_smf_tot['logm_mean'],
np.log10(smf),
linewidth=1, linestyle='-',
c='royalblue', alpha=0.7,
label='__no_label__')
else:
ax2.plot(smf['logm_mean'],
np.log10(smf['smf']),
linewidth=1, linestyle='-',
c='royalblue', alpha=0.7,
label='__no_label__')
else:
if not_table:
ax2.plot(obs_smf_tot['logm_mean'],
np.log10(um_smf_tot),
linewidth=4, linestyle='--',
c='royalblue',
label=r'$\mathrm{UM:\ }M_{\star,\ {\rm %s}}$' % x_label)
else:
ax2.plot(um_smf_tot['logm_mean'],
np.log10(um_smf_tot['smf']),
linewidth=4, linestyle='--',
c='royalblue',
label=r'$\mathrm{UM:\ }M_{\star,\ {\rm %s}}$' % x_label)
ax2.legend(fontsize=15, loc='upper right')
ax2.set_xlabel(r'$\log (M_{\star}/M_{\odot})$',
fontsize=25)
ax2.set_ylabel((r'$\mathrm{d}N/\mathrm{d}\log M_{\star}\ $'
r'$[{\mathrm{Mpc}^{-3}}{\mathrm{dex}^{-1}}]$'),
size=25)
mask_inn = np.log10(obs_smf_inn['smf']) > -7.5
mask_tot = np.log10(obs_smf_tot['smf']) > -7.5
ax2.set_xlim(np.nanmin(obs_smf_inn[mask_inn]['logm_mean']) - 0.15,
np.nanmax(obs_smf_tot[mask_tot]['logm_mean']) + 0.45)
if obs_smf_full is not None:
ax2.set_ylim(np.nanmin(np.log10(obs_smf_inn[mask_inn]['smf']))
- 0.2,
np.nanmax(np.log10(obs_smf_full['smf'])))
else:
ax2.set_ylim(np.nanmin(np.log10(obs_smf_inn[mask_inn]['smf']))
- 0.2,
np.nanmax(np.log10(obs_smf_tot[mask_tot]['smf']))
+ 0.8)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax2.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax2.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
# fig.savefig('asap_mtot_minn_smf.pdf', dpi=100)
return fig
def plot_dsigma_profiles(obs_wl_dsigma, um_wl_profs, um_mhalo=None,
each_col=3, reference=None):
"""Plot the UM predicted weak lensing profiles."""
obs_wl_n_bin = len(obs_wl_dsigma)
if obs_wl_n_bin <= each_col:
n_row = obs_wl_n_bin
n_col = 1
else:
n_row = each_col
n_col = int(np.ceil(obs_wl_n_bin / each_col))
fig = plt.figure(figsize=(4 * n_col, 3.5 * n_row))
fig.subplots_adjust(left=0.08, right=0.995, bottom=0.09, top=0.995,
wspace=0.00, hspace=0.00)
gs = gridspec.GridSpec(n_row, n_col)
gs.update(wspace=0.0, hspace=0.00)
y_min_arr = np.array([np.nanmin(prof['dsigma']) for prof in obs_wl_dsigma])
y_min_arr = np.where(y_min_arr <= 0.0, np.nan, y_min_arr)
y_max_arr = np.array([np.nanmax(prof['dsigma']) for prof in obs_wl_dsigma])
y_min = np.nanmin(y_min_arr) * 0.8
y_max = np.nanmax(y_max_arr) * 1.9
if reference is not None:
ref_prof = obs_wl_dsigma[reference]
else:
ref_prof = None
for ii in range(obs_wl_n_bin):
col_id = int(np.floor(ii / n_row))
row_id = int(n_row - (ii + 1 - col_id * n_row))
ax = plt.subplot(gs[row_id, col_id])
ax.loglog()
ax.grid(linestyle='--', linewidth=1.5, alpha=0.4, zorder=0)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(25)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(25)
if ref_prof is not None:
ax.plot(ref_prof['r_mpc'], ref_prof['dsigma'], linewidth=3.0,
color=GRN(0.8), linestyle='--', alpha=0.9)
# Observed WL profile
obs_prof = obs_wl_dsigma[ii]
ax.errorbar(
obs_prof['r_mpc'], obs_prof['dsigma'], obs_prof['dsigma_err'],
fmt='o', color='salmon', ecolor='lightsalmon', markersize=9, alpha=0.9)
ax.plot(
obs_prof['r_mpc'], obs_prof['dsigma'], linewidth=1.5, color='salmon', alpha=0.5)
if reference is not None and reference == ii:
ax.text(0.04, 0.41, r'$\mathrm{Ref}$',
verticalalignment='center', horizontalalignment='left',
fontsize=23.0, transform=ax.transAxes, color=GRN(0.8),
alpha=1.0)
# Label the mass range
ax.text(0.04, 0.29, r'${\rm Bin: %d}$' % (ii + 1),
verticalalignment='center', horizontalalignment='left',
fontsize=23.0, transform=ax.transAxes, color='k', alpha=1.0)
ax.text(
0.04, 0.18,
r"$\log M_{\rm tot}:[%5.2f,%5.2f]$" % (
obs_prof['min_logm1'], obs_prof['max_logm1']),
verticalalignment='center', horizontalalignment='left',
fontsize=17.0, transform=ax.transAxes, color='k', alpha=1.0)
ax.text(
0.04, 0.08, r"$\log M_{\rm inn}:[%5.2f,%5.2f]$" % (
obs_prof['min_logm2'], obs_prof['max_logm2']),
verticalalignment='center', horizontalalignment='left',
fontsize=17.0, transform=ax.transAxes, color='k', alpha=1.0)
# Predicted WL profile
if isinstance(um_wl_profs[0], (list,)):
for dsig in um_wl_profs:
ax.plot(obs_prof['r_mpc'], dsig[ii],
linewidth=2.5, color='royalblue', alpha=0.7)
else:
ax.scatter(obs_prof['r_mpc'], um_wl_profs[ii], marker='h',
s=20, c='b', alpha=0.9)
ax.plot(obs_prof['r_mpc'], um_wl_profs[ii],
linewidth=4.0, color='royalblue', alpha=0.7)
if um_mhalo is not None:
ax.text(0.54, 0.90, r"$[%5.2f \pm %5.2f]$" % um_mhalo[ii],
verticalalignment='center', horizontalalignment='left',
fontsize=20.0, transform=ax.transAxes, color='royalblue')
# X, Y Limits
x_min = np.min(obs_prof['r_mpc']) * 0.2
x_max = np.max(obs_prof['r_mpc']) * 1.8
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
if col_id != 0:
ax.yaxis.set_major_formatter(NullFormatter())
else:
ax.set_ylabel(r'$\Delta\Sigma$ $[M_{\odot}/{\rm pc}^2]$', fontsize=30)
if row_id == (n_row - 1):
ax.set_xlabel(r'$r_{\rm p}$ ${\rm [Mpc]}$', fontsize=30)
else:
ax.xaxis.set_major_formatter(NullFormatter())
# fig.savefig('asap_dsigma_profs.pdf', dpi=120)
return fig
def plot_best_fit_scatter_relation(sigms_a, sigms_b, min_scatter=0.02):
"""Log Mh v.s. sig(Log Ms_tot)."""
fig = plt.figure(figsize=(6, 6))
fig.subplots_adjust(left=0.19, right=0.995,
bottom=0.13, top=0.995,
wspace=0.00, hspace=0.00)
ax1 = fig.add_subplot(111)
ax1.grid(linestyle='--', linewidth=2, alpha=0.4, zorder=0)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
logmh_cen = np.linspace(11.5, 15.4, 1000)
sig_ms = sigms_a * (np.asarray(logmh_cen) - 15.3) + sigms_b
sig_ms = np.where(sig_ms <= min_scatter, min_scatter, sig_ms)
ax1.plot(logmh_cen, sig_ms,
linewidth=4.0, linestyle='--', alpha=0.8)
ax1.text(0.25, 0.09, r"$a=%5.2f\ b=%5.2f$" % (sigms_a, sigms_b),
verticalalignment='bottom',
horizontalalignment='center',
fontsize=20,
transform=ax1.transAxes)
ax1.set_xlabel(r'$\log M_{\mathrm{vir}}$', fontsize=25)
ax1.set_ylabel(r'$\sigma_{\log M_{\star, \rm Total}}$',
fontsize=28)
return fig
def plot_best_fit_shmr(shmr_a, shmr_b):
"""Log Mh v.s. Log Ms_tot."""
fig = plt.figure(figsize=(6, 6))
fig.subplots_adjust(left=0.19, right=0.995,
bottom=0.13, top=0.995,
wspace=0.00, hspace=0.00)
ax1 = fig.add_subplot(111)
ax1.grid(linestyle='--', linewidth=2, alpha=0.4, zorder=0)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
logmh_cen = np.linspace(11.5, 15.0, 50)
ax1.plot(logmh_cen, shmr_a * logmh_cen + shmr_b,
linewidth=5.0, linestyle='--', alpha=0.8)
ax1.text(0.75, 0.09, r"$a=%5.2f\ b=%5.2f$" % (shmr_a, shmr_b),
verticalalignment='bottom',
horizontalalignment='center',
fontsize=20,
transform=ax1.transAxes)
ax1.set_xlabel(r'$\log M_{\mathrm{vir}}$', fontsize=25)
ax1.set_ylabel(r'$\log M_{\star, \rm Total}$', fontsize=25)
return fig
def plot_mcmc_trace(mcmc_chains, mcmc_labels, mcmc_best=None, figsize=None,
mcmc_burnin=None, burnin_alpha=0.2, trace_alpha=0.2):
"""Traceplot for MCMC results."""
if figsize is None:
if mcmc_burnin is not None:
fig = plt.figure(figsize=(12, 15))
else:
fig = plt.figure(figsize=(10, 15))
else:
fig = plt.figure(figsize=figsize)
fig.subplots_adjust(hspace=0.0, wspace=0.0, bottom=0.027, top=0.97,
left=0.06, right=0.94)
# I want the plot of individual walkers to span 2 columns
nparam = len(mcmc_labels)
if mcmc_burnin is not None:
gs = gridspec.GridSpec(nparam, 5)
else:
gs = gridspec.GridSpec(nparam, 3)
if mcmc_best is not None:
assert len(mcmc_best) == len(mcmc_labels)
for ii, param in enumerate(mcmc_labels):
# Getthe chains from burn-in process and the final sampling process
param_chain = mcmc_chains[:, :, ii]
if mcmc_burnin is not None:
param_burnin = mcmc_burnin[:, :, ii]
# Get the range of Y-axis
y_min = np.min([np.min(param_chain), np.min(param_burnin)])
y_max = np.max([np.max(param_chain), np.max(param_burnin)])
else:
y_min = np.min(param_chain)
y_max = np.max(param_chain)
# Maximum variance of the walkers
max_var = max(np.var(param_chain[:, :], axis=1))
# Trace plot
if mcmc_burnin is None:
ax1 = plt.subplot(gs[ii, :2])
else:
ax1 = plt.subplot(gs[ii, 2:4])
ax1.yaxis.grid(linewidth=1.5, linestyle='--', alpha=0.5)
for walker in param_chain:
ax1.plot(np.arange(len(walker)), walker, alpha=trace_alpha,
drawstyle="steps", color=ORG_2(1.0 - np.var(walker) / max_var))
if mcmc_burnin is None:
ax1.set_ylabel(param, fontsize=28, labelpad=18, color='k')
# Don't show ticks on the y-axis
ax1.tick_params(labelleft=False)
# For the plot on the bottom, add an x-axis label. Hide all others
if ii != (nparam - 1):
ax1.tick_params(labelbottom=False)
else:
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
# Posterior histograms
ax2 = plt.subplot(gs[ii, -1])
ax2.grid(linewidth=1.5, linestyle='--', alpha=0.5)
ax2.hist(
|
np.ravel(param_chain[:, :])
|
numpy.ravel
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import test_utils as tu
from tensorflow.compiler.tests import xla_test
from tensorflow.python.platform import googletest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.keras import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import gradient_descent
class ConvGraphCachingTest(xla_test.XLATestCase):
def testConvolutionsMatch(self):
with self.session() as sess:
with ops.device("/device:IPU:0"):
x = array_ops.placeholder(np.float32, shape=[1, 4, 4, 2])
with variable_scope.variable_scope("vs", use_resource=True):
y = layers.Conv2D(2,
1,
use_bias=False,
kernel_initializer=init_ops.ones_initializer())(x)
y = layers.Conv2D(2,
1,
use_bias=False,
kernel_initializer=init_ops.ones_initializer())(y)
report = tu.ReportJSON(self, sess)
sess.run(variables.global_variables_initializer())
report.reset()
sess.run(y, {x: np.zeros([1, 4, 4, 2])})
report.parse_log()
# Would fail if there were two convolutions in the graph as they would be
# called conv2d and conv2d_1
ok = [
'__seed*', 'Copy_', 'vs/conv2d/Conv2D/convolution.*/Conv_1x1',
'Copy_'
]
report.assert_all_compute_sets_and_list(ok)
self.assertAllEqual(report.get_ml_type_counts(), [2, 0, 0, 0])
def testConvolutionsDontMatchDifferentTypes(self):
with self.session() as sess:
with ops.device("/device:IPU:0"):
x = array_ops.placeholder(np.float32, shape=[1, 4, 4, 2])
with variable_scope.variable_scope("vs", use_resource=True):
y = layers.Conv2D(2,
1,
use_bias=False,
kernel_initializer=init_ops.ones_initializer(),
dtype=np.float32)(x)
y = math_ops.cast(y, np.float16)
y = layers.Conv2D(2,
1,
use_bias=False,
kernel_initializer=init_ops.ones_initializer(),
dtype=np.float16)(y)
report = tu.ReportJSON(self, sess)
sess.run(variables.global_variables_initializer())
report.reset()
sess.run(y, {x: np.zeros([1, 4, 4, 2])})
report.parse_log()
# Matches two convolutions
ok = [
'__seed*', 'Copy_*weightsRearranged', 'Copy_',
'Copy_vs/*/OnTileCopy-0', 'vs/conv2d/Conv2D/convolution.*/Conv_1x1',
'vs/Cast/convert.*/Cast', 'vs/conv2d_1/Conv2D/convolution.*/Conv_1x1'
]
report.assert_all_compute_sets_and_list(ok)
self.assertAllEqual(report.get_ml_type_counts(), [2, 0, 0, 0])
def testConvolutionsDontMatchDifferentShapes(self):
with self.session() as sess:
with ops.device("/device:IPU:0"):
x = array_ops.placeholder(np.float32, shape=[1, 4, 4, 2])
with variable_scope.variable_scope("vs", use_resource=True):
y = layers.Conv2D(2,
1,
use_bias=False,
kernel_initializer=init_ops.ones_initializer())(x)
y = array_ops.reshape(y, [1, 2, 8, 2])
y = layers.Conv2D(2,
1,
use_bias=False,
kernel_initializer=init_ops.ones_initializer())(y)
report = tu.ReportJSON(self, sess)
sess.run(variables.global_variables_initializer())
report.reset()
sess.run(y, {x:
|
np.zeros([1, 4, 4, 2])
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 4 23:31:45 2021
@author: <NAME>
"""
import numpy as np
from hypergraph_propagation import propagate, prepare_hypergraph_propagation
from utils.retrieval_component import connect_nodup
#retrieval_dataset='roxford'
#retrieval_dataset='rparis'
retrieval_dataset='R1Moxford'
#retrieval_dataset='R1Mparis'
HYPERGRAPH_PROPAGATION=1
#COMMUNITY_SELECTION=0
COMMUNITY_SELECTION=1 # only calculate the uncertainty score, but do not apply geometric verification
#Mention: to use the option COMMUNITY_SELECTION=2, you should extract the local features for all the dataset image firstly.
#COMMUNITY_SELECTION=2 # calculate the uncertainty score. IF the score is large, use geometric verification to find a new start point
#load the features
if retrieval_dataset=='roxford':
vecs=np.load('features/roxford_np_delg_features/a_global_vecs.npy').T # (2048,4993)
qvecs=np.load('features/roxford_np_delg_features/a_global_qvecs.npy').T #(2048,70)
elif retrieval_dataset=='rparis':
vecs=np.load('features/rparis_np_delg_features/a_global_vecs.npy').T
qvecs=np.load('features/rparis_np_delg_features/a_global_qvecs.npy').T
elif retrieval_dataset=='R1Moxford':
distractors=np.load('features/distractor_np_delg_features/a_global_vecs.npy').T
vecs=np.load('features/roxford_np_delg_features/a_global_vecs.npy').T # (2048,4993)
vecs=np.concatenate((vecs,distractors),axis=1) #()
del distractors
qvecs=np.load('features/roxford_np_delg_features/a_global_qvecs.npy').T #(2048,70)
elif retrieval_dataset=='R1Mparis':
distractors=
|
np.load('features/distractor_np_delg_features/a_global_vecs.npy')
|
numpy.load
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## Project: SCRIPT - March 2018
## Contact: <NAME> - <EMAIL>
import sys
import os
import glob
import os
from argparse import ArgumentParser
import soundfile as sf
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from tqdm import tqdm
import subprocess
import scipy.interpolate
import numpy as np
#from generate import read_est_file
empty_array = np.zeros((0,0)) # TODO: const
def interpolate_through_unvoiced(data, vuv=empty_array):
assert len(data.shape) == 2, 'interpolate_through_unvoiced only accepts 2D arrays'
if vuv.size == empty_array.size:
assert data.shape[1] == 1, 'To find voicing from the data itself, use data with only a single channel'
voiced_ix = np.where( data > 0.0 )[0] ## equiv to np.nonzero(y)
else:
voiced_ix = np.where( vuv > 0.0 )[0]
mean_voiced = data[voiced_ix, ...].mean(axis=0) ## using fill_value='extrapolate' creates very extreme values where there are long initial/final silences
### TODO: this seems to affect denormalisation rather than training, look at extracintg stats and even training without regard to interpolated values?
interpolator = scipy.interpolate.interp1d(voiced_ix, data[voiced_ix, ...], kind='linear', \
axis=0, bounds_error=False, fill_value=mean_voiced)
data_interpolated = interpolator(np.arange(data.shape[0])) # .reshape((-1,1)))
voicing_flag =
|
np.zeros((data.shape[0], 1))
|
numpy.zeros
|
"""
Input/Output tools for working with binary data.
The Stata input tools were originally written by <NAME> as part of PyDTA.
You can find more information here http://presbrey.mit.edu/PyDTA
See also
---------
numpy.lib.io
"""
from struct import unpack, calcsize, pack
from struct import error as struct_error
import datetime
import sys
import numpy as np
from numpy.lib._iotools import _is_string_like, easy_dtype
from statsmodels.compatnp.py3k import asbytes, asstr
import statsmodels.tools.data as data_util
from pandas import isnull
def is_py3():
import sys
if sys.version_info[0] == 3:
return True
return False
PY3 = is_py3()
_date_formats = ["%tc", "%tC", "%td", "%tw", "%tm", "%tq", "%th", "%ty"]
def _datetime_to_stata_elapsed(date, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
date : datetime.datetime
The date to convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
if not isinstance(date, datetime.datetime):
raise ValueError("date should be datetime.datetime format")
stata_epoch = datetime.datetime(1960, 1, 1)
if fmt in ["%tc", "tc"]:
delta = date - stata_epoch
return (delta.days * 86400000 + delta.seconds*1000 +
delta.microseconds/1000)
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.")
return date
elif fmt in ["%td", "td"]:
return (date- stata_epoch).days
elif fmt in ["%tw", "tw"]:
return (52*(date.year-stata_epoch.year) +
(date - datetime.datetime(date.year, 1, 1)).days / 7)
elif fmt in ["%tm", "tm"]:
return (12 * (date.year - stata_epoch.year) + date.month - 1)
elif fmt in ["%tq", "tq"]:
return 4*(date.year-stata_epoch.year) + int((date.month - 1)/3)
elif fmt in ["%th", "th"]:
return 2 * (date.year - stata_epoch.year) + int(date.month > 6)
elif fmt in ["%ty", "ty"]:
return date.year
else:
raise ValueError("fmt %s not understood" % fmt)
def _stata_elapsed_date_to_datetime(date, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
date : int
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Examples
--------
>>> _stata_elapsed_date_to_datetime(52, "%tw") datetime.datetime(1961, 1, 1, 0, 0)
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
#NOTE: we could run into overflow / loss of precision situations here
# casting to int, but I'm not sure what to do. datetime won't deal with
# numpy types and numpy datetime isn't mature enough / we can't rely on
# pandas version > 0.7.1
#TODO: IIRC relative delta doesn't play well with np.datetime?
date = int(date)
stata_epoch = datetime.datetime(1960, 1, 1)
if fmt in ["%tc", "tc"]:
from dateutil.relativedelta import relativedelta
return stata_epoch + relativedelta(microseconds=date*1000)
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.")
return date
elif fmt in ["%td", "td"]:
return stata_epoch + datetime.timedelta(int(date))
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = datetime.datetime(stata_epoch.year + date // 52, 1, 1)
day_delta = (date % 52 ) * 7
return year + datetime.timedelta(int(day_delta))
elif fmt in ["%tm", "tm"]:
year = stata_epoch.year + date // 12
month_delta = (date % 12 ) + 1
return datetime.datetime(year, month_delta, 1)
elif fmt in ["%tq", "tq"]:
year = stata_epoch.year + date // 4
month_delta = (date % 4) * 3 + 1
return datetime.datetime(year, month_delta, 1)
elif fmt in ["%th", "th"]:
year = stata_epoch.year + date // 2
month_delta = (date % 2) * 6 + 1
return datetime.datetime(year, month_delta, 1)
elif fmt in ["%ty", "ty"]:
if date > 0:
return datetime.datetime(date, 1, 1)
else: # don't do negative years bc can't mix dtypes in column
raise ValueError("Year 0 and before not implemented")
else:
raise ValueError("Date fmt %s not understood" % fmt)
### Helper classes for StataReader ###
class _StataMissingValue(object):
"""
An observation's missing value.
Parameters
-----------
offset
value
Attributes
----------
string
value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
"""
def __init__(self, offset, value):
self._value = value
if type(value) is int or type(value) is long:
self._str = value-offset is 1 and \
'.' or ('.' + chr(value-offset+96))
else:
self._str = '.'
string = property(lambda self: self._str, doc="The Stata representation of \
the missing value: '.', '.a'..'.z'")
value = property(lambda self: self._value, doc='The binary representation \
of the missing value.')
def __str__(self): return self._str
__str__.__doc__ = string.__doc__
class _StataVariable(object):
"""
A dataset variable. Not intended for public use.
Parameters
----------
variable_data
Attributes
-----------
format : str
Stata variable format. See notes for more information.
index : int
Zero-index column index of variable.
label : str
Data Label
name : str
Variable name
type : str
Stata data type. See notes for more information.
value_format : str
Value format.
Notes
-----
More information: http://www.stata.com/help.cgi?format
"""
def __init__(self, variable_data):
self._data = variable_data
def __int__(self):
return self.index
def __str__(self):
return self.name
index = property(lambda self: self._data[0], doc='the variable\'s index \
within an observation')
type = property(lambda self: self._data[1], doc='the data type of \
variable\n\nPossible types are:\n{1..244:string, b:byte, h:int, l:long, \
f:float, d:double)')
name = property(lambda self: self._data[2], doc='the name of the variable')
format = property(lambda self: self._data[4], doc='the variable\'s Stata \
format')
value_format = property(lambda self: self._data[5], doc='the variable\'s \
value format')
label = property(lambda self: self._data[6], doc='the variable\'s label')
__int__.__doc__ = index.__doc__
__str__.__doc__ = name.__doc__
class StataReader(object):
"""
Stata .dta file reader.
Provides methods to return the metadata of a Stata .dta file and
a generator for the data itself.
Parameters
----------
file : file-like
A file-like object representing a Stata .dta file.
missing_values : bool
If missing_values is True, parse missing_values and return a
Missing Values object instead of None.
encoding : string, optional
Used for Python 3 only. Encoding to use when reading the .dta file.
Defaults to `locale.getpreferredencoding`
See also
--------
statsmodels.lib.io.genfromdta
Notes
-----
This is known only to work on file formats 113 (Stata 8/9), 114
(Stata 10/11), and 115 (Stata 12). Needs to be tested on older versions.
Known not to work on format 104, 108. If you have the documentation for
older formats, please contact the developers.
For more information about the .dta format see
http://www.stata.com/help.cgi?dta
http://www.stata.com/help.cgi?dta_113
"""
_header = {}
_data_location = 0
_col_sizes = ()
_has_string_data = False
_missing_values = False
#type code
#--------------------
#str1 1 = 0x01
#str2 2 = 0x02
#...
#str244 244 = 0xf4
#byte 251 = 0xfb (sic)
#int 252 = 0xfc
#long 253 = 0xfd
#float 254 = 0xfe
#double 255 = 0xff
#--------------------
#NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
DTYPE_MAP = dict(zip(range(1,245), ['a' + str(i) for i in range(1,245)]) + \
[(251, np.int16),(252, np.int32),(253, int),
(254, np.float32), (255, np.float64)])
TYPE_MAP = range(251)+list('bhlfd')
#NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
MISSING_VALUES = { 'b': (-127,100), 'h': (-32767, 32740), 'l':
(-2147483647, 2147483620), 'f': (-1.701e+38, +1.701e+38), 'd':
(-1.798e+308, +8.988e+307) }
def __init__(self, fname, missing_values=False, encoding=None):
if encoding == None:
import locale
self._encoding = locale.getpreferredencoding()
else:
self._encoding = encoding
self._missing_values = missing_values
self._parse_header(fname)
def file_headers(self):
"""
Returns all .dta file headers.
out: dict
Has keys typlist, data_label, lbllist, varlist, nvar, filetype,
ds_format, nobs, fmtlist, vlblist, time_stamp, srtlist, byteorder
"""
return self._header
def file_format(self):
"""
Returns the file format.
Returns
-------
out : int
Notes
-----
Format 113: Stata 8/9
Format 114: Stata 10/11
Format 115: Stata 12
"""
return self._header['ds_format']
def file_label(self):
"""
Returns the dataset's label.
Returns
-------
out: string
"""
return self._header['data_label']
def file_timestamp(self):
"""
Returns the date and time Stata recorded on last file save.
Returns
-------
out : str
"""
return self._header['time_stamp']
def variables(self):
"""
Returns a list of the dataset's StataVariables objects.
"""
return map(_StataVariable, zip(range(self._header['nvar']),
self._header['typlist'], self._header['varlist'],
self._header['srtlist'],
self._header['fmtlist'], self._header['lbllist'],
self._header['vlblist']))
def dataset(self, as_dict=False):
"""
Returns a Python generator object for iterating over the dataset.
Parameters
----------
as_dict : bool, optional
If as_dict is True, yield each row of observations as a dict.
If False, yields each row of observations as a list.
Returns
-------
Generator object for iterating over the dataset. Yields each row of
observations as a list by default.
Notes
-----
If missing_values is True during instantiation of StataReader then
observations with _StataMissingValue(s) are not filtered and should
be handled by your applcation.
"""
try:
self._file.seek(self._data_location)
except Exception:
pass
if as_dict:
vars = map(str, self.variables())
for i in range(len(self)):
yield dict(zip(vars, self._next()))
else:
for i in range(self._header['nobs']):
yield self._next()
### Python special methods
def __len__(self):
"""
Return the number of observations in the dataset.
This value is taken directly from the header and includes observations
with missing values.
"""
return self._header['nobs']
def __getitem__(self, k):
"""
Seek to an observation indexed k in the file and return it, ordered
by Stata's output to the .dta file.
k is zero-indexed. Prefer using R.data() for performance.
"""
if not (type(k) is int or type(k) is long) or k < 0 or k > len(self)-1:
raise IndexError(k)
loc = self._data_location + sum(self._col_size()) * k
if self._file.tell() != loc:
self._file.seek(loc)
return self._next()
### Private methods
def _null_terminate(self, s, encoding):
if PY3: # have bytes not strings, so must decode
null_byte = asbytes('\x00')
try:
s = s.lstrip(null_byte)[:s.index(null_byte)]
except:
pass
return s.decode(encoding)
else:
null_byte = asbytes('\x00')
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _parse_header(self, file_object):
self._file = file_object
encoding = self._encoding
# parse headers
self._header['ds_format'] = unpack('b', self._file.read(1))[0]
if self._header['ds_format'] not in [113, 114, 115]:
raise ValueError("Only file formats >= 113 (Stata >= 9)"
" are supported. Got format %s. Please report "
"if you think this error is incorrect." %
self._header['ds_format'])
byteorder = self._header['byteorder'] = unpack('b',
self._file.read(1))[0]==0x1 and '>' or '<'
self._header['filetype'] = unpack('b', self._file.read(1))[0]
self._file.read(1)
nvar = self._header['nvar'] = unpack(byteorder+'h',
self._file.read(2))[0]
self._header['nobs'] = unpack(byteorder+'i', self._file.read(4))[0]
self._header['data_label'] = self._null_terminate(self._file.read(81),
encoding)
self._header['time_stamp'] = self._null_terminate(self._file.read(18),
encoding)
# parse descriptors
typlist =[ord(self._file.read(1)) for i in range(nvar)]
self._header['typlist'] = [self.TYPE_MAP[typ] for typ in typlist]
self._header['dtyplist'] = [self.DTYPE_MAP[typ] for typ in typlist]
self._header['varlist'] = [self._null_terminate(self._file.read(33),
encoding) for i in range(nvar)]
self._header['srtlist'] = unpack(byteorder+('h'*(nvar+1)),
self._file.read(2*(nvar+1)))[:-1]
if self._header['ds_format'] <= 113:
self._header['fmtlist'] = \
[self._null_terminate(self._file.read(12), encoding) \
for i in range(nvar)]
else:
self._header['fmtlist'] = \
[self._null_terminate(self._file.read(49), encoding) \
for i in range(nvar)]
self._header['lbllist'] = [self._null_terminate(self._file.read(33),
encoding) for i in range(nvar)]
self._header['vlblist'] = [self._null_terminate(self._file.read(81),
encoding) for i in range(nvar)]
# ignore expansion fields
# When reading, read five bytes; the last four bytes now tell you the
# size of the next read, which you discard. You then continue like
# this until you read 5 bytes of zeros.
while True:
data_type = unpack(byteorder+'b', self._file.read(1))[0]
data_len = unpack(byteorder+'i', self._file.read(4))[0]
if data_type == 0:
break
self._file.read(data_len)
# other state vars
self._data_location = self._file.tell()
self._has_string_data = len(filter(lambda x: type(x) is int,
self._header['typlist'])) > 0
self._col_size()
def _calcsize(self, fmt):
return type(fmt) is int and fmt or \
calcsize(self._header['byteorder']+fmt)
def _col_size(self, k = None):
"""Calculate size of a data record."""
if len(self._col_sizes) == 0:
self._col_sizes = map(lambda x: self._calcsize(x),
self._header['typlist'])
if k == None:
return self._col_sizes
else:
return self._col_sizes[k]
def _unpack(self, fmt, byt):
d = unpack(self._header['byteorder']+fmt, byt)[0]
if fmt[-1] in self.MISSING_VALUES:
nmin, nmax = self.MISSING_VALUES[fmt[-1]]
if d < nmin or d > nmax:
if self._missing_values:
return _StataMissingValue(nmax, d)
else:
return None
return d
def _next(self):
typlist = self._header['typlist']
if self._has_string_data:
data = [None]*self._header['nvar']
for i in range(len(data)):
if type(typlist[i]) is int:
data[i] = self._null_terminate(self._file.read(typlist[i]),
self._encoding)
else:
data[i] = self._unpack(typlist[i],
self._file.read(self._col_size(i)))
return data
else:
return map(lambda i: self._unpack(typlist[i],
self._file.read(self._col_size(i))),
range(self._header['nvar']))
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
#if 'b' not in fname.mode:
return fname
if PY3:
return open(fname, "wb", encoding=encoding)
else:
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _dtype_to_stata_type(dtype):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
251 - chr(251) - for int8 and int16, byte
252 - chr(252) - for int32, int
253 - chr(253) - for int64, long
254 - chr(254) - for float32, float
255 - chr(255) - double, double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
#TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we do?
return chr(244)
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int64:
return chr(253)
elif dtype == np.int32:
return chr(252)
elif dtype == np.int8 or dtype == np.int16: # ok to assume bytes?
return chr(251)
else: # pragma : no cover
raise ValueError("Data type %s not currently understood. "
"Please report an error to the developers." % dtype)
def _dtype_to_default_stata_fmt(dtype):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
string -> "%DDs" where DD is the length of the string
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%9.0g"
int16 -> "%9.0g"
int8 -> "%8.0g"
"""
#TODO: expand this to handle a default datetime format?
if dtype.type == np.string_:
return "%" + str(dtype.itemsize) + "s"
elif dtype.type == np.object_:
return "%244s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int64:
return "%9.0g"
elif dtype == np.int32:
return "%8.0g"
elif dtype == np.int8 or dtype == np.int16: # ok to assume bytes?
return "%8.0g"
else: # pragma : no cover
raise ValueError("Data type %s not currently understood. "
"Please report an error to the developers." % dtype)
def _pad_bytes(name, length):
"""
Takes a char string and pads it wih null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _default_names(nvar):
"""
Returns default Stata names v1, v2, ... vnvar
"""
return ["v%d" % i for i in range(1,nvar+1)]
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise ValueError("fmt %s not understood" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key) : convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convery_dates key is not in varlist "
"and is not an int")
new_dict.update({key : convert_dates[key]})
return new_dict
_type_converters = {253 : np.long, 252 : int}
class StataWriter(object):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : file path or buffer
Where to save the dta file.
data : array-like
Array-like input to save. Pandas objects are also accepted.
convert_dates : dict
Dictionary mapping column of datetime types to the stata internal
format that you want to use for the dates. Options are
'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a
number or a name.
encoding : str
Default is latin-1. Note that Stata does not support unicode.
byteorder : str
Can be ">", "<", "little", or "big". The default is None which uses
`sys.byteorder`
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', date, {2 : 'tw'})
>>> writer.write_file()
"""
#type code
#--------------------
#str1 1 = 0x01
#str2 2 = 0x02
#...
#str244 244 = 0xf4
#byte 251 = 0xfb (sic)
#int 252 = 0xfc
#long 253 = 0xfd
#float 254 = 0xfe
#double 255 = 0xff
#--------------------
#NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
DTYPE_MAP = dict(zip(range(1,245), ['a' + str(i) for i in range(1,245)]) + \
[(251, np.int16),(252, np.int32),(253, int),
(254, np.float32), (255, np.float64)])
TYPE_MAP = range(251)+list('bhlfd')
MISSING_VALUES = { 'b': 101,
'h': 32741,
'l' : 2147483621,
'f': 1.7014118346046923e+38,
'd': 8.98846567431158e+307}
def __init__(self, fname, data, convert_dates=None, encoding="latin-1",
byteorder=None):
self._convert_dates = convert_dates
# attach nobs, nvars, data, varlist, typlist
if data_util._is_using_pandas(data, None):
self._prepare_pandas(data)
elif data_util._is_array_like(data, None):
data = np.asarray(data)
if data_util._is_structured_ndarray(data):
self._prepare_structured_array(data)
else:
if convert_dates is not None:
raise ValueError("Not able to convert dates in a plain"
" ndarray.")
self._prepare_ndarray(data)
else: # pragma : no cover
raise ValueError("Type %s for data not understood" % type(data))
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._encoding = encoding
self._file = _open_file_binary_write(fname, encoding)
def _write(self, to_write):
"""
Helper to call asbytes before writing to file for Python 3 compat.
"""
self._file.write(asbytes(to_write))
def _prepare_structured_array(self, data):
self.nobs = len(data)
self.nvar = len(data.dtype)
self.data = data
self.datarows = iter(data)
dtype = data.dtype
descr = dtype.descr
if dtype.names is None:
varlist = _default_names(nvar)
else:
varlist = dtype.names
# check for datetime and change the type
convert_dates = self._convert_dates
if convert_dates is not None:
convert_dates = _maybe_convert_to_int_keys(convert_dates,
varlist)
self._convert_dates = convert_dates
for key in convert_dates:
descr[key] = (
descr[key][0],
_convert_datetime_to_stata_type(convert_dates[key])
)
dtype = np.dtype(descr)
self.varlist = varlist
self.typlist = [_dtype_to_stata_type(dtype[i])
for i in range(self.nvar)]
self.fmtlist = [_dtype_to_default_stata_fmt(dtype[i])
for i in range(self.nvar)]
# set the given format for the datetime cols
if convert_dates is not None:
for key in convert_dates:
self.fmtlist[key] = convert_dates[key]
def _prepare_ndarray(self, data):
if data.ndim == 1:
data = data[:,None]
self.nobs, self.nvar = data.shape
self.data = data
self.datarows = iter(data)
#TODO: this should be user settable
dtype = data.dtype
self.varlist = _default_names(self.nvar)
self.typlist = [_dtype_to_stata_type(dtype) for i in range(self.nvar)]
self.fmtlist = [_dtype_to_default_stata_fmt(dtype)
for i in range(self.nvar)]
def _prepare_pandas(self, data):
#NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
class DataFrameRowIter(object):
def __init__(self, data):
self.data = data
def __iter__(self):
for i, row in data.iterrows():
yield row
data = data.reset_index()
self.datarows = DataFrameRowIter(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
convert_dates = self._convert_dates
if convert_dates is not None:
convert_dates = _maybe_convert_to_int_keys(convert_dates,
self.varlist)
self._convert_dates = convert_dates
for key in convert_dates:
new_type = _convert_datetime_to_stata_type(convert_dates[key])
dtypes[key] = np.dtype(new_type)
self.typlist = [_dtype_to_stata_type(dt) for dt in dtypes]
self.fmtlist = [_dtype_to_default_stata_fmt(dt) for dt in dtypes]
# set the given format for the datetime cols
if convert_dates is not None:
for key in convert_dates:
self.fmtlist[key] = convert_dates[key]
def write_file(self):
self._write_header()
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
if self._convert_dates is None:
self._write_data_nodates()
else:
self._write_data_dates()
#self._write_value_labels()
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._write(pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._write(pack(byteorder+"h", self.nvar)[:2])
# number of obs, 4 bytes
self._write(pack(byteorder+"i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._write(self._null_terminate(_pad_bytes("", 80),
self._encoding))
else:
self._write(self._null_terminate(_pad_bytes(data_label[:80],
80), self._encoding))
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime):
raise ValueError("time_stamp should be datetime type")
self._write(self._null_terminate(
time_stamp.strftime("%d %b %Y %H:%M"),
self._encoding))
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist, length 33*nvar, char array, null terminated
for name in self.varlist:
name = self._null_terminate(name, self._encoding)
name = _pad_bytes(asstr(name[:32]), 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", (2*(nvar+1)))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
#NOTE: this is where you could get fancy with pandas categorical type
for i in range(nvar):
self._write(_pad_bytes("", 33))
def _write_variable_labels(self, labels=None):
nvar = self.nvar
if labels is None:
for i in range(nvar):
self._write(_pad_bytes("", 81))
def _write_data_nodates(self):
data = self.datarows
byteorder = self._byteorder
TYPE_MAP = self.TYPE_MAP
typlist = self.typlist
for row in data:
#row = row.squeeze().tolist() # needed for structured arrays
for i,var in enumerate(row):
typ = ord(typlist[i])
if typ <= 244: # we've got a string
if len(var) < typ:
var = _pad_bytes(asstr(var), len(var) + 1)
self._write(var)
else:
try:
self._write(pack(byteorder+TYPE_MAP[typ], var))
except struct_error:
# have to be strict about type pack won't do any
# kind of casting
self._write(pack(byteorder+TYPE_MAP[typ],
_type_converters[typ](var)))
def _write_data_dates(self):
convert_dates = self._convert_dates
data = self.datarows
byteorder = self._byteorder
TYPE_MAP = self.TYPE_MAP
MISSING_VALUES = self.MISSING_VALUES
typlist = self.typlist
for row in data:
#row = row.squeeze().tolist() # needed for structured arrays
for i,var in enumerate(row):
typ = ord(typlist[i])
#NOTE: If anyone finds this terribly slow, there is
# a vectorized way to convert dates, see genfromdta for going
# from int to datetime and reverse it. will copy data though
if i in convert_dates:
var = _datetime_to_stata_elapsed(var, self.fmtlist[i])
if typ <= 244: # we've got a string
if isnull(var):
var = "" # missing string
if len(var) < typ:
var = _pad_bytes(var, len(var) + 1)
self._write(var)
else:
if isnull(var): # this only matters for floats
var = MISSING_VALUES[typ]
self._write(pack(byteorder+TYPE_MAP[typ], var))
def _null_terminate(self, s, encoding):
null_byte = '\x00'
if PY3:
s += null_byte
return s.encode(encoding)
else:
s += null_byte
return s
def genfromdta(fname, missing_flt=-999., encoding=None, pandas=False,
convert_dates=True):
"""
Returns an ndarray or DataFrame from a Stata .dta file.
Parameters
----------
fname : str or filehandle
Stata .dta file.
missing_flt : numeric
The numeric value to replace missing values with. Will be used for
any numeric value.
encoding : string, optional
Used for Python 3 only. Encoding to use when reading the .dta file.
Defaults to `locale.getpreferredencoding`
pandas : bool
Optionally return a DataFrame instead of an ndarray
convert_dates : bool
If convert_dates is True, then Stata formatted dates will be converted
to datetime types according to the variable's format.
"""
if isinstance(fname, basestring):
fhd = StataReader(open(fname, 'rb'), missing_values=False,
encoding=encoding)
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = StataReader(fname, missing_values=False, encoding=encoding)
# validate_names = np.lib._iotools.NameValidator(excludelist=excludelist,
# deletechars=deletechars,
# case_sensitive=case_sensitive)
#TODO: This needs to handle the byteorder?
header = fhd.file_headers()
types = header['dtyplist']
nobs = header['nobs']
numvars = header['nvar']
varnames = header['varlist']
fmtlist = header['fmtlist']
dataname = header['data_label']
labels = header['vlblist'] # labels are thrown away unless DataArray
# type is used
data = np.zeros((nobs,numvars))
stata_dta = fhd.dataset()
dt = np.dtype(zip(varnames, types))
data = np.zeros((nobs), dtype=dt) # init final array
for rownum,line in enumerate(stata_dta):
# doesn't handle missing value objects, just casts
# None will only work without missing value object.
if None in line:
for i,val in enumerate(line):
#NOTE: This will only be scalar types because missing strings
# are empty not None in Stata
if val is None:
line[i] = missing_flt
data[rownum] = tuple(line)
if pandas:
from pandas import DataFrame
data = DataFrame.from_records(data)
if convert_dates:
cols = np.where(map(lambda x : x in _date_formats, fmtlist))[0]
for col in cols:
i = col
col = data.columns[col]
data[col] = data[col].apply(_stata_elapsed_date_to_datetime,
args=(fmtlist[i],))
elif convert_dates:
#date_cols = np.where(map(lambda x : x in _date_formats,
# fmtlist))[0]
# make the dtype for the datetime types
cols = np.where(map(lambda x : x in _date_formats, fmtlist))[0]
dtype = data.dtype.descr
dtype = [(dt[0], object) if i in cols else dt for i,dt in
enumerate(dtype)]
data = data.astype(dtype) # have to copy
for col in cols:
def convert(x):
return _stata_elapsed_date_to_datetime(x, fmtlist[col])
data[data.dtype.names[col]] = map(convert,
data[data.dtype.names[col]])
return data
def savetxt(fname, X, names=None, fmt='%.18e', delimiter=' '):
"""
Save an array to a text file.
This is just a copy of numpy.savetxt patched to support structured arrays
or a header of names. Does not include py3 support now in savetxt.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
names : list, optional
If given names will be the column header in the text file. If None and
X is a structured or recarray then the names are taken from
X.dtype.names.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> savetxt('test.out', x, delimiter=',') # x is an array
>>> savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
if _is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
fh = file(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X =
|
np.atleast_2d(X)
|
numpy.atleast_2d
|
import math
import functools
import numpy as np
from pylsd import lsd
import cv2 as cv
from mex_files.alignments_slow_python import use_alignments_slow
from mex_files.alignments_fast_python import use_alignments_fast
from gmm_mml import GmmMml
import matplotlib.pyplot as plt
import matplotlib
import scipy
import matplotlib.lines as mlines
from sklearn.cluster import AgglomerativeClustering
# there is a problem with sign function in python so this is a workaround
# https://stackoverflow.com/questions/1986152/why-doesnt-python-have-a-sign-function
#sign = functools.partial(math.copysign, 1)
class params:
def __init__(self, w, h, focal_ratio=1.05455933619402):
self.w = w
self.h = h
self.LENGTH_THRESHOLD = 30.71 # param to change
# self.LENGTH_THRESHOLD = math.sqrt(self.w + self.h)/self.LENGTH_THRESHOLD
self.GMM_KS = [5,5,5]
self.REFINE_THRESHOLD = 0.375 # theta
self.VARIATION_THRESHOLD = 0.15 # (\zeta)
self.DUPLICATES_THRESHOLD = 0.1# (\delta)
self.MAX_POINTS_ACCELERATION = 100 # use acceleration if number of points is larger than this
self.MANHATTAN = True
self.ppd = [2, 2] # principal point = [W,H]/prms.ppd ; values taken from YUD
self.FOCAL_RATIO = focal_ratio # default for YUD
def detect_vps_given_lines(frame_gray, prms,lines, frame_to_draw=None, old_straight=[], old_twisted=[]):
"""
given lines with shape [n x 4] computes vps
"""
denoised_lanes = denoise_lanes(lines, prms)
if(len(denoised_lanes) > 0):
points_staright, points_twisted = convert_to_PClines(denoised_lanes, prms)
else:
points_staright , points_twisted = [], [], []
#
# if(len(old_straight) > 0 and len(old_twisted) >0):
# print('before appending ', np.shape(points_staright), np.shape(points_twisted), np.shape(old_straight), np.shape(old_twisted))
# if(len(points_staright)>0):
# points_staright = np.r_[points_staright, old_straight]
# else:
# points_staright = old_straight
# if(len(points_twisted) > 0):
# points_twisted = np.r_[points_twisted, points_twisted]
# print('after appending ', np.shape(points_staright), np.shape(points_twisted), np.shape(old_straight), np.shape(old_twisted))
# print(np.shape(points_staright),np.shape(points_twisted))
if(len(points_staright) == 0 or len(points_twisted) == 0): return []
detections_straight, m1, b1 = find_detections(points_staright, prms)
detections_twisted, m2, b2 = find_detections(points_twisted, prms)
print('detections', np.shape(detections_straight),np.shape(detections_twisted))
if(len(detections_straight) == 0 and len(detections_twisted) == 0):
return [], [], []
# gather initial vanishing point detections
mvp_all, NFAs = read_detections_as_vps(detections_straight, m1, b1, detections_twisted, m2 ,b2, prms)
print('\n\n\nbefore appending ', np.shape(points_staright), np.shape(points_twisted), np.shape(mvp_all))
# refine detections, this returns 2 x ? array ? is the vps left after refining
mvp_all = refine_detections(mvp_all, lines, prms)
print('after appending ', np.shape(points_staright), np.shape(points_twisted), np.shape(mvp_all))
mvp_all,NFAs = remove_dublicates(mvp_all, NFAs, prms)
for i in range(len(mvp_all[0])):
p1 = np.int32(mvp_all[:, i])
cv.circle(frame_to_draw,tuple(p1),5,(0,255,0),3)
return mvp_all.T, points_staright, points_twisted # return as n x2 shape where n is the number of vps
def detect_vps(frame_gray, prms, frame_to_draw=None, points_staright_old=[], points_twisted_old=[]):
lines = lsd.lsd(np.array(frame_gray, np.float32))
lines = lines[:,0:4]
for i in range(lines.shape[0]):
pt1 = (int(lines[i, 0]), int(lines[i, 1]))
pt2 = (int(lines[i, 2]), int(lines[i, 3]))
if(not frame_to_draw is None):
cv.line(frame_to_draw, pt1,pt2, (0, 0, 255), 1)
denoised_lanes = denoise_lanes(lines, prms)
if(len(denoised_lanes) > 0):
points_staright, points_twisted = convert_to_PClines(denoised_lanes, prms)
else:
points_staright , points_twisted = [], []
detections_straight, m1, b1 = find_detections(points_staright, prms)
detections_twisted, m2, b2 = find_detections(points_twisted, prms)
if(len(detections_straight) == 0 and len(detections_twisted) == 0):
return []
# gather initial vanishing point detections
mvp_all, NFAs = read_detections_as_vps(detections_straight, m1, b1, detections_twisted, m2 ,b2, prms)
# refine detections, this returns 2 x ? array ? is the vps left after refining
mvp_all = refine_detections(mvp_all, lines, prms)
mvp_all,NFAs = remove_dublicates(mvp_all, NFAs, prms)
for i in range(len(mvp_all[0])):
p1 = np.int32(mvp_all[:, i])
cv.circle(frame_to_draw,tuple(p1),5,(0,255,0),3)
return mvp_all.T# return as n x2 shape where n is the number of vps
print("please finish manhattan world")
# TO DO the manhattan world
#def compute_horizon_line_manhattan(mvp_all, NFAs, lines_lsd, prms):
# # computes horizontal line from vps using the NFA values to apply
# # orthogonality constraintes
# # this is conversion of matlab code written by <NAME>
# # Converting author : Majidov Ikhtiyor
# H = prms.w
# W = prms.h
#
# # york urban parameters (given)
# # focal = 6.05317058975369
# # pixelSize = 0.00896875
# # pp = [307.551305282635, 251.454244960136]
# pp = np.array([W, H])/prms.ppd
# FOCAL_RATIO = prms.FOCAL_RATIO #
# my_vps = image_to_gaussian_sphere(mvp_all, W, H, FOCAL_RATIO, pp)
#
def remove_dublicates(vps, NFAs, prms):
# vps is 2 x n array n is the number of points
THRESHOLD = prms.DUPLICATES_THRESHOLD
#agglomerative clustering using single link
if(len(vps[0]) == 1):
return vps, NFAs
clus,n_clus = aggclus(vps.T, THRESHOLD)
final_vps = []
final_NFAs = []
for i in range(n_clus):
args =
|
np.where(clus == i)
|
numpy.where
|
import numpy as np
from nitime.lazy import scipy
from nitime.lazy import scipy_signal as signal
from nitime.lazy import scipy_fftpack as fftpack
from nitime import descriptors as desc
from nitime import utils as tsu
from nitime import algorithms as tsa
from nitime import timeseries as ts
from .base import BaseAnalyzer
class SpectralAnalyzer(BaseAnalyzer):
""" Analyzer object for spectral analysis"""
def __init__(self, input=None, method=None, BW=None, adaptive=False,
low_bias=False):
"""
The initialization of the
Parameters
----------
input: time-series objects
method: dict (optional),
The method spec used in calculating 'psd' see
:func:`algorithms.get_spectra` for details.
BW: float (optional),
In 'spectrum_multi_taper' The bandwidth of the windowing function
will determine the number tapers to use. This parameters represents
trade-off between frequency resolution (lower main lobe BW for the
taper) and variance reduction (higher BW and number of averaged
estimates).
adaptive : {True/False}
In 'spectrum_multi_taper', use an adaptive weighting routine to
combine the PSD estimates of different tapers.
low_bias: {True/False}
In spectrum_multi_taper, use bias correction
Examples
--------
>>> np.set_printoptions(precision=4) # for doctesting
>>> t1 = ts.TimeSeries(data = np.arange(0,1024,1).reshape(2,512),
... sampling_rate=np.pi)
>>> s1 = SpectralAnalyzer(t1)
>>> s1.method['this_method']
'welch'
>>> s1.method['Fs'] # doctest: +ELLIPSIS
3.1415926535... Hz
>>> f,s = s1.psd
>>> f
array([ 0. , 0.0491, 0.0982, 0.1473, 0.1963, 0.2454, 0.2945,
0.3436, 0.3927, 0.4418, 0.4909, 0.54 , 0.589 , 0.6381,
0.6872, 0.7363, 0.7854, 0.8345, 0.8836, 0.9327, 0.9817,
1.0308, 1.0799, 1.129 , 1.1781, 1.2272, 1.2763, 1.3254,
1.3744, 1.4235, 1.4726, 1.5217, 1.5708])
>>> s[0,0] # doctest: +ELLIPSIS
1128276.92538360...
"""
BaseAnalyzer.__init__(self, input)
self.method = method
if self.method is None:
self.method = {'this_method': 'welch',
'Fs': self.input.sampling_rate}
self.BW = BW
self.adaptive = adaptive
self.low_bias = low_bias
@desc.setattr_on_read
def psd(self):
"""
The standard output for this analyzer is a tuple f,s, where: f is the
frequency bands associated with the discrete spectral components
and s is the PSD calculated using :func:`mlab.psd`.
"""
NFFT = self.method.get('NFFT', 64)
Fs = self.input.sampling_rate
detrend = self.method.get('detrend', tsa.mlab.detrend_none)
window = self.method.get('window', tsa.mlab.window_hanning)
n_overlap = self.method.get('n_overlap', int(np.ceil(NFFT / 2.0)))
if np.iscomplexobj(self.input.data):
psd_len = NFFT
dt = complex
else:
psd_len = NFFT / 2.0 + 1
dt = float
#If multi-channel data:
if len(self.input.data.shape) > 1:
psd_shape = (self.input.shape[:-1] + (psd_len,))
flat_data = np.reshape(self.input.data, (-1,
self.input.data.shape[-1]))
flat_psd = np.empty((flat_data.shape[0], psd_len), dtype=dt)
for i in range(flat_data.shape[0]):
#'f' are the center frequencies of the frequency bands
#represented in the psd. These are identical in each iteration
#of the loop, so they get reassigned into the same variable in
#each iteration:
temp, f = tsa.mlab.psd(flat_data[i],
NFFT=NFFT,
Fs=Fs,
detrend=detrend,
window=window,
noverlap=n_overlap)
flat_psd[i] = temp.squeeze()
psd = np.reshape(flat_psd, psd_shape).squeeze()
else:
psd, f = tsa.mlab.psd(self.input.data,
NFFT=NFFT,
Fs=Fs,
detrend=detrend,
window=window,
noverlap=n_overlap)
return f, psd
@desc.setattr_on_read
def cpsd(self):
"""
This outputs both the PSD and the CSD calculated using
:func:`algorithms.get_spectra`.
Returns
-------
(f,s): tuple
f: Frequency bands over which the psd/csd are calculated and
s: the n by n by len(f) matrix of PSD (on the main diagonal) and CSD
(off diagonal)
"""
self.welch_method = self.method
self.welch_method['this_method'] = 'welch'
self.welch_method['Fs'] = self.input.sampling_rate
f, spectrum_welch = tsa.get_spectra(self.input.data,
method=self.welch_method)
return f, spectrum_welch
@desc.setattr_on_read
def periodogram(self):
"""
This is the spectrum estimated as the FFT of the time-series
Returns
-------
(f,spectrum): f is an array with the frequencies and spectrum is the
complex-valued FFT.
"""
return tsa.periodogram(self.input.data,
Fs=self.input.sampling_rate)
@desc.setattr_on_read
def spectrum_fourier(self):
"""
This is the spectrum estimated as the FFT of the time-series
Returns
-------
(f,spectrum): f is an array with the frequencies and spectrum is the
complex-valued FFT.
"""
data = self.input.data
sampling_rate = self.input.sampling_rate
fft = fftpack.fft
if np.any(np.iscomplex(data)):
# Get negative frequencies, as well as positive:
f = np.linspace(-sampling_rate/2., sampling_rate/2., data.shape[-1])
spectrum_fourier = np.fft.fftshift(fft(data))
else:
f = tsu.get_freqs(sampling_rate, data.shape[-1])
spectrum_fourier = fft(data)[..., :f.shape[0]]
return f, spectrum_fourier
@desc.setattr_on_read
def spectrum_multi_taper(self):
"""
The spectrum and cross-spectra, computed using
:func:`multi_taper_csd'
"""
if np.iscomplexobj(self.input.data):
psd_len = self.input.shape[-1]
dt = complex
else:
psd_len = self.input.shape[-1] / 2 + 1
dt = float
#Initialize the output
spectrum_multi_taper = np.empty((self.input.shape[:-1] + (psd_len,)),
dtype=dt)
#If multi-channel data:
if len(self.input.data.shape) > 1:
for i in range(self.input.data.shape[0]):
# 'f' are the center frequencies of the frequency bands
# represented in the MT psd. These are identical in each
# iteration of the loop, so they get reassigned into the same
# variable in each iteration:
f, spectrum_multi_taper[i], _ = tsa.multi_taper_psd(
self.input.data[i],
Fs=self.input.sampling_rate,
BW=self.BW,
adaptive=self.adaptive,
low_bias=self.low_bias)
else:
f, spectrum_multi_taper, _ = tsa.multi_taper_psd(self.input.data,
Fs=self.input.sampling_rate,
BW=self.BW,
adaptive=self.adaptive,
low_bias=self.low_bias)
return f, spectrum_multi_taper
class FilterAnalyzer(desc.ResetMixin):
""" A class for performing filtering operations on time-series and
producing the filtered versions of the time-series
Parameters
----------
time_series: A nitime TimeSeries object.
lb,ub: float (optional)
Lower and upper band of a pass-band into which the data will be
filtered. Default: 0, Nyquist
boxcar_iterations: int (optional)
For box-car filtering, how many times to iterate over the data while
convolving with a box-car function. Default: 2
gpass: float (optional)
For iir filtering, the pass-band maximal ripple loss (default: 1)
gstop: float (optional)
For iir filtering, the stop-band minimal attenuation (default: 60).
filt_order: int (optional)
For iir/fir filtering, the order of the filter. Note for fir filtering,
this needs to be an even number. Default: 64
iir_ftype: str (optional)
The type of filter to be used in iir filtering (see
scipy.signal.iirdesign for details). Default 'ellip'
fir_win: str
The window to be used in fir filtering (see scipy.signal.firwin for
details). Default: 'hamming'
Note
----
All filtering methods used here keep the original DC component of the
signal.
"""
def __init__(self, time_series, lb=0, ub=None, boxcar_iterations=2,
filt_order=64, gpass=1, gstop=60, iir_ftype='ellip',
fir_win='hamming'):
#Initialize all the local variables you will need for all the different
#filtering methods:
self.data = time_series.data
self.sampling_rate = time_series.sampling_rate
self.ub = ub
self.lb = lb
self.time_unit = time_series.time_unit
self._boxcar_iterations = boxcar_iterations
self._gstop = gstop
self._gpass = gpass
self._filt_order = filt_order
self._ftype = iir_ftype
self._win = fir_win
def filtfilt(self, b, a, in_ts=None):
"""
Zero-phase delay filtering (either iir or fir).
Parameters
----------
a,b: filter coefficients
in_ts: time-series object.
This allows to replace the input. Instead of analyzing this
analyzers input data, analyze some other time-series object
Note
----
This is a wrapper around scipy.signal.filtfilt
"""
# Switch in the new in_ts:
if in_ts is not None:
data = in_ts.data
Fs = in_ts.sampling_rate
else:
data = self.data
Fs = self.sampling_rate
#filtfilt only operates channel-by-channel, so we need to loop over the
#channels, if the data is multi-channel data:
if len(data.shape) > 1:
out_data = np.empty(data.shape, dtype=data.dtype)
for i in range(data.shape[0]):
out_data[i] = signal.filtfilt(b, a, data[i])
#Make sure to preserve the DC:
dc = np.mean(data[i])
out_data[i] -= np.mean(out_data[i])
out_data[i] += dc
else:
out_data = signal.filtfilt(b, a, data)
#Make sure to preserve the DC:
dc = np.mean(data)
out_data -= np.mean(out_data)
out_data += dc
return ts.TimeSeries(out_data,
sampling_rate=Fs,
time_unit=self.time_unit)
@desc.setattr_on_read
def fir(self):
"""
Filter the time-series using an FIR digital filter. Filtering is done
back and forth (using scipy.signal.filtfilt) to achieve zero phase
delay
"""
#Passband and stop-band are expressed as fraction of the Nyquist
#frequency:
if self.ub is not None:
ub_frac = self.ub / (self.sampling_rate / 2.)
else:
ub_frac = 1.0
lb_frac = self.lb / (self.sampling_rate / 2.)
if lb_frac < 0 or ub_frac > 1:
e_s = "The lower-bound or upper bound used to filter"
e_s += " are beyond the range 0-Nyquist. You asked for"
e_s += " a filter between"
e_s += "%s and %s percent of" % (lb_frac * 100, ub_frac * 100)
e_s += "the Nyquist frequency"
raise ValueError(e_s)
n_taps = self._filt_order + 1
#This means the filter order you chose was too large (needs to be
#shorter than a 1/3 of your time-series )
if n_taps > self.data.shape[-1] * 3:
e_s = "The filter order chosen is too large for this time-series"
raise ValueError(e_s)
# a is always 1:
a = [1]
sig = ts.TimeSeries(data=self.data, sampling_rate=self.sampling_rate)
#Lowpass:
if ub_frac < 1:
b = signal.firwin(n_taps, ub_frac, window=self._win)
sig = self.filtfilt(b, a, sig)
#High-pass
if lb_frac > 0:
#Includes a spectral inversion:
b = -1 * signal.firwin(n_taps, lb_frac, window=self._win)
b[n_taps / 2] = b[n_taps / 2] + 1
sig = self.filtfilt(b, a, sig)
return sig
@desc.setattr_on_read
def iir(self):
"""
Filter the time-series using an IIR filter. Filtering is done back and
forth (using scipy.signal.filtfilt) to achieve zero phase delay
"""
#Passband and stop-band are expressed as fraction of the Nyquist
#frequency:
if self.ub is not None:
ub_frac = self.ub / (self.sampling_rate / 2.)
else:
ub_frac = 1.0
lb_frac = self.lb / (self.sampling_rate / 2.)
# For the band-pass:
if lb_frac > 0 and ub_frac < 1:
wp = [lb_frac, ub_frac]
ws = [np.max([lb_frac - 0.1, 0]),
np.min([ub_frac + 0.1, 1.0])]
# For the lowpass:
elif lb_frac == 0:
wp = ub_frac
ws =
|
np.min([ub_frac + 0.1, 0.9])
|
numpy.min
|
# automatically create pdf containing seperate images and merged images.
# seperate images under ./plot_seperate_xt
# merged under ./
# shortage1: memory consuming(3.83G on my computer), but can be improved
# shortage2: parallel not supported yet
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import functions as fn
import os
from PyPDF2 import PdfFileWriter, PdfFileReader
#from matplotlib.backends.backend_pdf import PdfPages
if not os.path.exists("plot_seperate_xt"):
os.makedirs("plot_seperate_xt")
# Creating a routine that appends files to the output file
def append_pdf(input,output):
[output.addPage(input.getPage(page_num)) for page_num in range(input.numPages)]
# Creating an object where pdf pages are appended to
output = PdfFileWriter()
tdata, ttarget, tlabel = fn.get_training_data()
for i in range(3000):
tempdata = np.array([[0,0,0]])
for j in range(300):
if (tdata[i][j] == 0).all():
continue
temp = np.expand_dims(tdata[i][j], axis=0)
tempdata = np.append(tempdata, temp, axis=0)
tempdata = np.delete(tempdata, 0, 0)
t_data = tempdata.transpose((1, 0))
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('t')
ax.set_zlabel('y')
ax.view_init(-90, 0)
ax.set_title(i)
x = t_data[0]
y = t_data[1]
t = t_data[2]
x_target = np.linspace(ttarget[i][0], ttarget[i][0], 1000)
y_target = np.linspace(np.mean(t_data[1]), np.mean(t_data[1]), 1000)
#y_target = np.linspace(ttarget[i][1], ttarget[i][1], 1000)
t_target = np.linspace(
|
np.min(t_data[2])
|
numpy.min
|
import numpy as np
"""
Coordinate transformation module. All methods accept arrays as input
with each row as a position.
"""
a = 6378137
b = 6356752.3142
esq = 6.69437999014 * 0.001
e1sq = 6.73949674228 * 0.001
def geodetic2ecef(geodetic, radians=False):
geodetic = np.array(geodetic)
input_shape = geodetic.shape
geodetic = np.atleast_2d(geodetic)
ratio = 1.0 if radians else (np.pi / 180.0)
lat = ratio*geodetic[:,0]
lon = ratio*geodetic[:,1]
alt = geodetic[:,2]
xi = np.sqrt(1 - esq * np.sin(lat)**2)
x = (a / xi + alt) * np.cos(lat) * np.cos(lon)
y = (a / xi + alt) * np.cos(lat) * np.sin(lon)
z = (a / xi * (1 - esq) + alt) * np.sin(lat)
ecef = np.array([x, y, z]).T
return ecef.reshape(input_shape)
def ecef2geodetic(ecef, radians=False):
"""
Convert ECEF coordinates to geodetic using ferrari's method
"""
# Save shape and export column
ecef = np.atleast_1d(ecef)
input_shape = ecef.shape
ecef =
|
np.atleast_2d(ecef)
|
numpy.atleast_2d
|
import h5py
import numpy as np
from pathlib import Path
import json
def test_unpack_again():
files = [ Path("data_{0:06d}.npy".format(i)) for i in range(10) ]
assert all([f.exists() for f in files])
arrays = [ np.load(f) for f in files ]
output = Path("test.h5")
h5o = h5py.File(str(output))
for fi,ar in zip(files[3:5],arrays[3:5]):
h5o.create_dataset(str(fi),
data=ar,
compression="lzf")
h5o.close()
assert output.exists()
assert output.stat().st_size > 0
assert output.stat().st_size < sum([ f.stat().st_size for f in files[3:5]])
print(output.stat().st_size, sum([ f.stat().st_size for f in files[3:5]]))
output.unlink()
def test_reloaded():
files = [ Path("data_{0:06d}.npy".format(i)) for i in range(10) ]
assert all([f.exists() for f in files])
arrays = [ np.load(f) for f in files ]
output = Path("test.h5")
h5o = h5py.File(str(output))
for fi,ar in zip(files[3:5],arrays[3:5]):
h5o.create_dataset(str(fi),
data=ar,
compression="lzf")
h5o.close()
del h5o
reh5 = h5py.File(str(output))
assert str(files[3]) in reh5
arr3 = reh5[str(files[3])]
assert
|
np.array_equal(arr3,arrays[3])
|
numpy.array_equal
|
print("Importing libraries")
import time
from skimage.io import imread
from skimage.transform import resize
from sknn.mlp import Classifier, Convolution, Layer
import glob
import os
import pickle
import numpy as np
from sklearn.cross_validation import StratifiedKFold as KFold
from sklearn.metrics import classification_report, log_loss
import warnings
from skimage import morphology
from skimage import measure
warnings.filterwarnings("ignore")
print("Modules Imported")
start_time = time.time()
# -------------------------------------------------------------------------------------------------------------- #
print("Loading and preparing features datasets")
dir_names = list(set(glob.glob(os.path.join("competition_data","train", "*"))\
).difference(set(glob.glob(os.path.join("competition_data","train","*.*")))))
dir_names = sorted(dir_names)
# Calculate the number of images in the folder
numberofImages = 0
for folder in dir_names:
for fileNames in os.walk(folder):
add = len(fileNames[-1])
numberofImages += add
# ----------------------------------------------------------------------------------------------------------- #
# Calculates the Y vector of labels and the X matrix of features as the differences btw images and the classes mean
pix = 25
X = np.zeros((numberofImages, pix**2), dtype=float)
y = np.zeros((numberofImages))
namesClasses = list()
label = 0
i = 0
for folder in dir_names:
currentClass = folder.split(os.pathsep)[-1] # Creates a list of classes names as strings
namesClasses.append(currentClass) # Idem
for fileNameDir in os.walk(folder):
for fileName in fileNameDir[2]:
if fileName[-4:] != ".jpg":
continue
nameFileImage = "{0}{1}{2}".format(fileNameDir[0], os.sep, fileName)
image = imread(nameFileImage, as_grey=True)
image = resize(image, (pix, pix)) # Resizing is done
image = np.array(image, dtype=np.float)
image =
|
np.reshape(image, (1, pix ** 2))
|
numpy.reshape
|
#!/usr/bin/env python
'''
Training code made by <NAME> <<EMAIL>>
Based on many other examples around Internet
Visit our website at www.theconstruct.ai
'''
import sys
import gym
import numpy
import time
import numpy as np
from gym import wrappers
from std_msgs.msg import Float64
# ROS packages required
import rospy
import rospkg
# import our training environment
import cart_pole_3d_env
from rl_algorithms.ddqn import DQNAgent
if __name__ == '__main__':
rospy.init_node('cart_pole_3d_gym', anonymous=True, log_level=rospy.WARN)
reward_publisher = rospy.Publisher('/cart_pole_3d/reward', Float64, queue_size=1)
# Create the Gym environment
env = gym.make('CartPole3D-v0')
rospy.loginfo("Gym environment done")
# Set the logging system
# Where we define all of output training to stored.
# rospack = rospkg.RosPack()
# pkg_path = rospack.get_path('cart_pole_3d_training_pkg')
# outdir = pkg_path + '/training_results'
# env = wrappers.Monitor(env, outdir, force=True)
# rospy.loginfo("Monitor Wrapper started")
last_time_steps = numpy.ndarray(0) #define last time step.
# Loads parameters from the ROS param server
Alpha = rospy.get_param("/cart_pole_3d/alpha")
Epsilon = rospy.get_param("/cart_pole_3d/epsilon")
Gamma = rospy.get_param("/cart_pole_3d/gamma")
epsilon_discount = rospy.get_param("/cart_pole_3d/epsilon_discount")
nepisodes = rospy.get_param("/cart_pole_3d/nepisodes")
nsteps = rospy.get_param("/cart_pole_3d/nsteps")
running_step = rospy.get_param("/cart_pole_3d/running_step")
batch_size = 32
state_size = 3
action_size = env.action_space.n
agent = DQNAgent(state_size=state_size, action_size=action_size)
start_time = time.time()
highest_reward = 0
scores = []
episodes = []
# Starts the main training loop: the one about the episodes to do
for episode in range(nepisodes):
rospy.logwarn(">>>>>>>>>> START EPISODE ==>" + str(episode)+ " <<<<<<<<<<<<<")
done = False
score = 0
step = 0
# rospy.logwarn("length: " + str(len(agent.memory)))
# Initialize the environment and get first state of the robot
observation = env.reset()
state = np.reshape(observation, [1, state_size])
episode_time = rospy.get_rostime().to_sec()
# for each episode, we test the robot for nsteps
while not done:
# rospy.logwarn("############### Start Step=>" + str(step))
step +=1
# Pick an action based on the current state
action = agent.act(state)
# Execute the action in the environment and get feedback
observation, reward, done, info = env.step(action)
next_state =
|
np.reshape(observation, [1, state_size])
|
numpy.reshape
|
import numpy as np
import pylab as plt
from scipy.signal import find_peaks
from astropy.io import fits
from astropy import constants as c
from astropy import units as u
from astropy.coordinates import SkyCoord, GCRS
from astropy.time import Time
from astropy.table import Table
import os
from src.BAGLE import model
from src.BAGLE import model_fitter
from src.BAGLE.fake_data import *
import time
import pdb
import pytest
from astropy.time import Time
from astropy.coordinates import solar_system_ephemeris, EarthLocation, spherical_to_cartesian, cartesian_to_spherical
from astropy.coordinates import get_body_barycentric, get_body, get_moon, get_body_barycentric_posvel
# Always generate the same fake data.
np.random.seed(0)
def test_PSPL_other():
mL = 10.0 # msun
t0 = 57000.00
xS0 = np.array([0.000, 0.000])
# beta = -0.4 # mas
beta = 1.4 # mas
muS = np.array([8.0, 0.0])
# muL = np.array([-7.0, 0.00])
muL = np.array([0.00, 0.00])
dL = 4000.0
dS = 8000.0
b_sff = 1.0
mag_src = 19.0
run_test_PSPL(mL, t0, xS0, beta, muS, muL, dL, dS, b_sff, mag_src,
outdir='tests/test_pspl_other/')
return
def test_PSPL_belokurov():
# Scenario from Belokurov and Evans 2002 (Figure 1)
# Note that this isn't a direct comparison; because we don't have parallax.
mL = 0.5 # msun
t0 = 57160.00
xS0 = np.array([0.000, 0.000])
beta = -7.41 # mas
muS = np.array([-2.0, 7.0])
muL = np.array([90.00, -24.71])
dL = 150.0
dS = 1500.0
b_sff = 1.0
mag_src = 19.0
run_test_PSPL(mL, t0, xS0, beta, muS, muL, dL, dS, b_sff, mag_src,
outdir='tests/test_pspl_belokurov/')
return
def run_test_PSPL(mL, t0, xS0, beta, muS, muL, dL, dS, b_sff, mag_src,
outdir=''):
if (outdir != '') and (outdir != None):
os.makedirs(outdir, exist_ok=True)
pspl = model.PSPL_PhotAstrom_noPar_Param1(mL,
t0,
beta,
dL,
dL / dS,
xS0[0],
xS0[1],
muL[0],
muL[1],
muS[0],
muS[1],
[b_sff],
[mag_src])
t = np.arange(t0 - 3000, t0 + 3000, 1)
dt = t - pspl.t0
A = pspl.get_amplification(t)
shift = pspl.get_centroid_shift(t)
shift_amp = np.linalg.norm(shift, axis=1)
# Plot the amplification
plt.figure(1)
plt.clf()
plt.plot(dt, 2.5 * np.log10(A), 'k.')
plt.xlabel('t - t0 (MJD)')
plt.ylabel('2.5 * log(A)')
plt.savefig(outdir + 'amp_v_time.png')
# Plot the positions of everything
lens_pos = pspl.xL0 + np.outer(dt / model.days_per_year, pspl.muL) * 1e-3
srce_pos = pspl.xS0 + np.outer(dt / model.days_per_year, pspl.muS) * 1e-3
imag_pos = srce_pos + (shift * 1e-3)
plt.figure(2)
plt.clf()
plt.plot(lens_pos[:, 0], lens_pos[:, 1], 'r--', mfc='none', mec='red')
plt.plot(srce_pos[:, 0], srce_pos[:, 1], 'b--', mfc='none', mec='blue')
plt.plot(imag_pos[:, 0], imag_pos[:, 1], 'b-')
lim = 0.005
plt.xlim(lim, -lim) # arcsec
plt.ylim(-lim, lim)
plt.xlabel('dRA (arcsec)')
plt.ylabel('dDec (arcsec)')
plt.title('Zoomed-in')
plt.savefig(outdir + 'on_sky_zoomed.png')
plt.figure(3)
plt.clf()
plt.plot(lens_pos[:, 0], lens_pos[:, 1], 'r--', mfc='none', mec='red')
plt.plot(srce_pos[:, 0], srce_pos[:, 1], 'b--', mfc='none', mec='blue')
plt.plot(imag_pos[:, 0], imag_pos[:, 1], 'b-')
lim = 0.05
plt.xlim(lim, -lim) # arcsec
plt.ylim(-lim, lim)
plt.xlabel('dRA (arcsec)')
plt.ylabel('dDec (arcsec)')
plt.title('Zoomed-out')
plt.savefig(outdir + 'on_sky.png')
plt.figure(4)
plt.clf()
plt.plot(dt, shift_amp)
plt.xlabel('t - t0 (MJD)')
plt.ylabel('Astrometric Shift (mas)')
plt.savefig(outdir + 'shift_amp_v_t.png')
plt.figure(5)
plt.clf()
plt.plot(shift[:, 0], shift[:, 1])
plt.gca().invert_xaxis()
plt.xlabel('RA Shift (mas)')
plt.ylabel('Dec Shift (mas)')
plt.xlim(1.5, -1.5)
plt.ylim(-0.5, 2.5)
plt.savefig(outdir + 'shift_on_sky.png')
plt.close(6)
f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)
f.subplots_adjust(hspace=0)
ax1.plot(dt / pspl.tE, shift[:, 0] / pspl.thetaE_amp, 'k-')
ax2.plot(dt / pspl.tE, shift[:, 1] / pspl.thetaE_amp, 'k-')
ax3.plot(dt / pspl.tE, shift_amp / pspl.thetaE_amp, 'k-')
ax3.set_xlabel('(t - t0) / tE)')
ax1.set_ylabel(r'dX / $\theta_E$')
ax2.set_ylabel(r'dY / $\theta_E$')
ax3.set_ylabel(r'dT / $\theta_E$')
ax1.set_ylim(-0.4, 0.4)
ax2.set_ylim(-0.4, 0.4)
ax3.set_ylim(0, 0.4)
plt.savefig(outdir + 'shift_v_t.png')
print('Einstein radius: ', pspl.thetaE_amp)
print('Einstein crossing time: ', pspl.tE)
return pspl
def test_pspl_parallax_belokurov():
outdir = 'tests/test_pspl_parallax_belokurov/'
dim_ang = u.dimensionless_angles()
# Scenario from Belokurov and Evans 2002 (Figure 1)
# Parameters specified in the paper:
mL = 0.5 * u.M_sun
dL = 150.0 * u.pc
dS = 1500.0 * u.pc
vL = 70 * u.km / u.s # assuming this is \tilde{v}
u0 = 0.303 # Einstein radii
# Parameters we calculate from above or guess.
# raL = 17.5 * 15.0 # in decimal degrees
# decL = -30.0
raL = 10.5 * 15.0 # in decimal degrees
decL = 20.0
imag = 19.0
muS = np.array([-1.75, 6.0]) # Measured from Figure 1
# muL = (vL / dL).to(u.mas/u.yr, equivalencies=dim_ang) # mas/yr
muRelAmp = (vL / dL).to(u.mas / u.yr, equivalencies=dim_ang)
# Note this is in the literature convention of muRel = muL - muS.
# We typically use the opposite.
muL = np.array(
[((muRelAmp.value ** 2 - muS[1] ** 2) ** 0.5 + muS[0]), 0.0])
# muL = np.array([-muL.value, 0.0])
# muL = np.array([-muL.value, 0.0])
thetaE = ((4.0 * c.G * mL / c.c ** 2) * ((1. / dL) - (1. / dS))) ** 0.5
thetaE = thetaE.to(u.mas, equivalencies=dim_ang) # mas
xS0amp = u0 * thetaE # in mas
xS0 = (muL / np.linalg.norm(muL))[::-1] * xS0amp
xS0 = np.array([0.0, 0.0]) * u.mas
# xS0_E = -0.5
# xS0 = np.array([xS0_E, -(1.0**2 - xS0_E**2)**0.5]) * xS0amp
piRel = (u.AU / dL) - (u.AU / dS)
piRel = piRel.to(u.mas, equivalencies=dim_ang)
# vtilde = 1 * u.AU / (piE * tE)
# vtilde = 1 * u.AU / (piE * (thetaE / muRel))
# vtilde = 1 * u.AU * muRel / (piE * thetaE)
# vtilde = 1 * u.AU * muRel / ((piRel / thetaE) * thetaE)
# vtilde = 1 * u.AU * muRel / piRel
# muRelAmp = vtilde * piRel / (1 * u.AU)
# muRel = muL - muS
# muRelAmp = vL * piRel / u.AU
muRelAmp = muRelAmp.to(u.mas / u.yr)
muRel = muL - muS # opposite sign to our convention
print('mu_rel = [{0:4.2f}, {1:4.2f}] (opposite to our convention)'.format(muRel[0], muRel[1]))
print('mu_rel_amp = {0:4.2f}'.format(muRelAmp))
print('mu_rel_amp = {0:4.2f}'.format(np.linalg.norm(muRel)))
print('v_tilde = {0:4.2f}'.format(
(muRelAmp * dL).to(u.km / u.s, equivalencies=dim_ang)))
print('mu_L = [{0:4.2f}, {1:4.2f}], '.format(muL[0], muL[1]))
print('mu_S = [{0:4.2f}, {1:4.2f}], '.format(muS[0], muS[1]))
print('thetaE = {0:4.2f}'.format(thetaE))
print('piRel = {0:4.2f}'.format(piRel))
print('xS0amp = {0:4.2f}'.format(xS0amp))
print('xS0 = [{0:4.2f}, {1:4.2f}], '.format(xS0[0], xS0[1]))
beta = -xS0amp # mas
# t0 = 57160.00 # MJD
t0 = 57290.00 # MJD
# muS = np.array([-2.0, 7.0])
# muL = np.array([90.00, -24.71])
# Convert out of astropy units
mL = mL.value
xS0 = xS0.value / 1e3
beta = beta.value
dL = dL.value
dS = dS.value
# muL = np.array([0, 0])
b_sff = 1.0
run_test_pspl_parallax(raL, decL, mL, t0, xS0, beta, muS, muL, dL, dS,
b_sff, imag, outdir='tests/test_pspl_parallax_belokurov/')
# Modify some axis limits to match the published figure.
plt.figure(2)
plt.gca().set_aspect('auto')
plt.arrow(1, 10, muL[0] / 50.0, muL[1] / 50.0, head_width=0.8,
head_length=0.5, color='black')
plt.arrow(1, 10, muS[0] / 3.0, muS[1] / 3.0, head_width=0.3, head_length=1,
color='blue')
plt.text(3.5, 7, r'$\mu_L$ = {0:.1f} mas/yr'.format(np.linalg.norm(muL)),
color='black', fontsize=12)
plt.text(0, 12, r'$\mu_S$ = {0:.1f} mas/yr'.format(np.linalg.norm(muS)),
color='blue', fontsize=12)
plt.gcf().set_size_inches(8, 5)
plt.subplots_adjust(bottom=0.2)
plt.ylim(-16, 16)
plt.xlim(4, -4)
plt.xlabel(r'$\Delta \alpha^*$ (mas)')
plt.ylabel(r'$\Delta \delta$ (mas)')
plt.legend(loc='lower right', fontsize=12)
plt.savefig(outdir + 'pspl_parallax_belokurov.png')
return
def test_pspl_parallax_han2000():
# Scenario from Han+ 2000 (Figure 1)
raL = 80.89375 # LMC R.A.
decL = -69.75611 # LMC Dec.
mL = 0.5 # msun
dL = 10000.0
dS = 50000.0
xS0 = np.array([0.000, 0.0001]) # arcsec?
tE = 100.0 # days
u0amp = 0.2
inv_dist_diff = (1.0 / (dL * u.pc)) - (1.0 / (dS * u.pc))
thetaE = u.rad * np.sqrt(
(4.0 * c.G * mL * u.M_sun / c.c ** 2) * inv_dist_diff)
thetaE_amp = thetaE.to('mas').value # mas
muRelAmp = thetaE_amp / (tE / 365.25)
print(thetaE_amp, muRelAmp)
beta = -u0amp * thetaE_amp # mas
muS = np.array([muRelAmp / 2 ** 0.5, -muRelAmp / 2 ** 0.5])
muL = np.array([0.0, 0.0])
t0 = 57190.00 # ??
b_sff = 1.0
imag = 19.0
run_test_pspl_parallax(raL, decL, mL, t0, xS0, beta, muS, muL, dL, dS,
b_sff, imag, outdir='tests/test_pspl_parallax_han2000/')
return
def test_pspl_parallax_bulge1():
# Scenario from Belokurov and Evans 2002 (Figure 1)
raL = 17.5 * 15.0 # in degrees
decL = -30.0
mL = 10.0 # msun
t0 = 57650.0
xS0 = np.array([0.000, 0.000])
beta = 3.0 # mas
muS = np.array([-4.0, -4.0])
muL = np.array([-6.0, -10.0])
dL = 3000.0
dS = 6000.0
b_sff = 1.0
imag = 19.0
run_test_pspl_parallax(raL, decL, mL, t0, xS0, beta, muS, muL, dL, dS,
b_sff, imag, outdir='tests/test_pspl_par_bulge1/')
return
def run_test_pspl_parallax(raL, decL, mL, t0, xS0, beta, muS, muL, dL, dS,
b_sff, mag_src, outdir=''):
if (outdir != '') and (outdir != None):
os.makedirs(outdir, exist_ok=True)
# No parallax
pspl_n = model.PSPL_PhotAstrom_noPar_Param1(mL,
t0,
beta,
dL,
dL / dS,
xS0[0],
xS0[1],
muL[0],
muL[1],
muS[0],
muS[1],
[b_sff],
[mag_src])
print('pspl_n.u0', pspl_n.u0)
print('pspl_n.muS', pspl_n.muS)
print('pspl_n.u0_hat', pspl_n.u0_hat)
print('pspl_n.thetaE_hat', pspl_n.thetaE_hat)
# With parallax
pspl_p = model.PSPL_PhotAstrom_Par_Param1(mL,
t0,
beta,
dL,
dL / dS,
xS0[0],
xS0[1],
muL[0],
muL[1],
muS[0],
muS[1],
[b_sff],
[mag_src],
raL=raL,
decL=decL)
t = np.arange(t0 - 1000, t0 + 1000, 1)
dt = t - pspl_n.t0
A_n = pspl_n.get_amplification(t)
A_p = pspl_p.get_amplification(t)
xS_n = pspl_n.get_astrometry(t)
xS_p_unlens = pspl_p.get_astrometry_unlensed(t)
xS_p_lensed = pspl_p.get_astrometry(t)
xL_p = pspl_p.get_lens_astrometry(t)
# Plot the amplification
fig1 = plt.figure(1)
plt.clf()
f1_1 = fig1.add_axes((0.20, 0.3, 0.75, 0.6))
plt.plot(dt, 2.5 * np.log10(A_n), 'b-', label='No parallax')
plt.plot(dt, 2.5 * np.log10(A_p), 'r-', label='Parallax')
plt.legend(fontsize=10)
plt.ylabel('2.5 * log(A)')
f1_1.set_xticklabels([])
f2_1 = fig1.add_axes((0.20, 0.1, 0.75, 0.2))
plt.plot(dt, 2.5 * (np.log10(A_p) - np.log10(A_n)), 'k-',
label='Par - No par')
plt.axhline(0, linestyle='--', color='k')
plt.legend(fontsize=10)
plt.ylabel('Diff')
plt.xlabel('t - t0 (MJD)')
plt.savefig(outdir + 'amp_v_time.png')
print("save to " + outdir)
# Plot the positions of everything
fig2 = plt.figure(2)
plt.clf()
plt.plot(xS_n[:, 0] * 1e3, xS_n[:, 1] * 1e3, 'r--',
mfc='none', mec='red', label='Src, No parallax model')
plt.plot(xS_p_unlens[:, 0] * 1e3, xS_p_unlens[:, 1] * 1e3, 'b--',
mfc='none', mec='blue',
label='Src, Parallax model, unlensed')
plt.plot(xL_p[:, 0] * 1e3, xL_p[:, 1] * 1e3, 'k--',
mfc='none', mec='grey', label='Lens')
plt.plot(xS_p_lensed[:, 0] * 1e3, xS_p_lensed[:, 1] * 1e3, 'b-',
label='Src, Parallax model, lensed')
plt.legend(fontsize=10)
plt.gca().invert_xaxis()
plt.xlabel('R.A. (mas)')
plt.ylabel('Dec. (mas)')
plt.axis('equal')
lim = 20
print('LIM = ', lim)
# plt.xlim(lim, -lim) # arcsec
# plt.ylim(-lim, lim)
# plt.axis('tight')
# plt.xlim(0.7, -0.7)
# plt.ylim(-0.7, 0.7)
plt.savefig(outdir + 'on_sky.png')
# Check just the astrometric shift part.
shift_n = pspl_n.get_centroid_shift(t) # mas
shift_p = (xS_p_lensed - xS_p_unlens) * 1e3 # mas
shift_n_amp = np.linalg.norm(shift_n, axis=1)
shift_p_amp = np.linalg.norm(shift_p, axis=1)
fig3 = plt.figure(3)
plt.clf()
f1_3 = fig3.add_axes((0.20, 0.3, 0.75, 0.6))
plt.plot(dt, shift_n_amp, 'r--', label='No parallax model')
plt.plot(dt, shift_p_amp, 'b--', label='Parallax model')
plt.ylabel('Astrometric Shift (mas)')
plt.legend(fontsize=10)
f1_3.set_xticklabels([])
f2_3 = fig3.add_axes((0.20, 0.1, 0.75, 0.2))
plt.plot(dt, shift_p_amp - shift_n_amp, 'k-', label='Par - No par')
plt.legend(fontsize=10)
plt.axhline(0, linestyle='--', color='k')
plt.ylabel('Diff (mas)')
plt.xlabel('t - t0 (MJD)')
plt.savefig(outdir + 'shift_amp_v_t.png')
fig4 = plt.figure(4)
plt.clf()
plt.plot(shift_n[:, 0], shift_n[:, 1], 'r-', label='No parallax')
plt.plot(shift_p[:, 0], shift_p[:, 1], 'b-', label='Parallax')
plt.axhline(0, linestyle='--')
plt.axvline(0, linestyle='--')
plt.gca().invert_xaxis()
plt.legend(fontsize=10)
plt.xlabel('Shift RA (mas)')
plt.ylabel('Shift Dec (mas)')
plt.axis('equal')
plt.savefig(outdir + 'shift_on_sky.png')
print('Einstein radius: ', pspl_n.thetaE_amp, pspl_p.thetaE_amp)
print('Einstein crossing time: ', pspl_n.tE, pspl_n.tE)
return
def test_pspl_parallax_paczynski1998(t0=57000):
"""
I can't quite get this one to match!!! Why not? Maybe they kept in the parallax of the source?
i.e. just removed proper motions.
"""
outdir = 'tests/test_pspl_parallax_paczynski1998/'
if (outdir != '') and (outdir != None):
os.makedirs(outdir, exist_ok=True)
# Scenarios from Paczynski 1998
raL = 80.89375 # LMC R.A.
raL = 240.0 # LMC R.A.
# decL = -69.75611 # LMC Dec.
decL = -71.74 # LMC Dec. This is the sin \beta = -0.99 where \beta =
mL = 0.3 # msun
# t0 = 57000.00
xS0 = np.array([0.000, 0.088e-3]) # arcsec
beta = 0.088 # mas
# muS = np.array([-3.18, -0.28])
# muL = np.array([0.0, 0.0])
muS = np.array([-4.18, -0.28])
muL = np.array([0.0, 0.0])
# muS = np.array([-2.4, -0.00000001])
# muL = np.array([0.0, 0.0])
dL = 10e3 # 10 kpc
dS = 50e3 # 50 kpc in LMC
b_sff = 1.0
mag_src = 19.0
# No parallax
pspl_n = model.PSPL_PhotAstrom_noPar_Param1(mL,
t0,
beta,
dL,
dL / dS,
xS0[0],
xS0[1],
muL[0],
muL[1],
muS[0],
muS[1],
[b_sff],
[mag_src])
print('pspl_n.u0', pspl_n.u0)
print('pspl_n.muS', pspl_n.muS)
print('pspl_n.u0_hat', pspl_n.u0_hat)
print('pspl_n.thetaE_hat', pspl_n.thetaE_hat)
# With parallax
pspl_p = model.PSPL_PhotAstrom_Par_Param1(mL,
t0,
beta,
dL,
dL / dS,
xS0[0],
xS0[1],
muL[0],
muL[1],
muS[0],
muS[1],
[b_sff],
[mag_src],
raL=raL,
decL=decL)
print('pspl_p.u0', pspl_p.u0)
print('pspl_p.muS', pspl_p.muS)
print('pspl_p.u0_hat', pspl_p.u0_hat)
print('pspl_p.thetaE_hat', pspl_p.thetaE_hat)
# t = np.arange(56000, 58000, 1)
t = np.arange(t0 - 500, t0 + 500, 1)
dt = t - pspl_n.t0
A_n = pspl_n.get_amplification(t)
A_p = pspl_p.get_amplification(t)
xS_n = pspl_n.get_astrometry(t)
xS_p_unlens = pspl_p.get_astrometry_unlensed(t)
xS_p_lensed = pspl_p.get_astrometry(t)
xL_p_unlens = pspl_p.get_lens_astrometry(t)
thetaS = (xS_p_unlens - xL_p_unlens) * 1e3 # mas
u = thetaS / pspl_p.tE
thetaS_lensed = (xS_p_lensed - xL_p_unlens) * 1e3 # mas
shift_n = pspl_n.get_centroid_shift(t) # mas
shift_p = (xS_p_lensed - xS_p_unlens) * 1e3 # mas
shift_n_amp = np.linalg.norm(shift_n, axis=1)
shift_p_amp = np.linalg.norm(shift_p, axis=1)
# Plot the amplification
fig1 = plt.figure(1)
plt.clf()
f1_1 = fig1.add_axes((0.1, 0.3, 0.8, 0.6))
plt.plot(dt, 2.5 * np.log10(A_n), 'b-', label='No parallax')
plt.plot(dt, 2.5 * np.log10(A_p), 'r-', label='Parallax')
plt.legend()
plt.ylabel('2.5 * log(A)')
f1_1.set_xticklabels([])
f2_1 = fig1.add_axes((0.1, 0.1, 0.8, 0.2))
plt.plot(dt, 2.5 * (np.log10(A_p) - np.log10(A_n)), 'k-',
label='Par - No par')
plt.axhline(0, linestyle='--', color='k')
plt.legend()
plt.ylabel('Diff')
plt.xlabel('t - t0 (MJD)')
idx = np.argmin(np.abs(t - t0))
plt.savefig(outdir + 'fig1.png')
# Plot the positions of everything
fig2 = plt.figure(2)
plt.clf()
plt.plot(xS_n[:, 0], xS_n[:, 1], 'r--', mfc='none', mec='red',
label='No parallax model')
plt.plot(xS_p_unlens[:, 0], xS_p_unlens[:, 1], 'b--', mfc='blue',
mec='blue',
label='Parallax model, unlensed')
plt.plot(xS_p_lensed[:, 0], xS_p_lensed[:, 1], 'b-',
label='Parallax model, lensed')
plt.plot(xL_p_unlens[:, 0], xL_p_unlens[:, 1], 'g--', mfc='none',
mec='green',
label='Parallax model, Lens')
plt.plot(xS_n[idx, 0], xS_n[idx, 1], 'rx')
plt.plot(xS_p_unlens[idx, 0], xS_p_unlens[idx, 1], 'bx')
plt.plot(xS_p_lensed[idx, 0], xS_p_lensed[idx, 1], 'bx')
plt.plot(xL_p_unlens[idx, 0], xL_p_unlens[idx, 1], 'gx')
plt.legend()
plt.gca().invert_xaxis()
# lim = 0.05
# plt.xlim(lim, -lim) # arcsec
# plt.ylim(-lim, lim)
# plt.xlim(0.006, -0.006) # arcsec
# plt.ylim(-0.02, 0.02)
plt.xlabel('R.A. (")')
plt.ylabel('Dec. (")')
plt.savefig(outdir + 'fig2.png')
# Check just the astrometric shift part.
fig3 = plt.figure(3)
plt.clf()
f1_3 = fig3.add_axes((0.2, 0.3, 0.7, 0.6))
plt.plot(dt, shift_n_amp, 'r--', label='No parallax model')
plt.plot(dt, shift_p_amp, 'b--', label='Parallax model')
plt.legend(fontsize=10)
plt.ylabel('Astrometric Shift (mas)')
f1_3.set_xticklabels([])
f2_3 = fig3.add_axes((0.2, 0.1, 0.7, 0.2))
plt.plot(dt, shift_p_amp - shift_n_amp, 'k-', label='Par - No par')
plt.legend()
plt.axhline(0, linestyle='--', color='k')
plt.xlabel('t - t0 (MJD)')
plt.ylabel('Res.')
plt.savefig(outdir + 'fig3.png')
fig4 = plt.figure(4)
plt.clf()
plt.plot(shift_n[:, 0], shift_n[:, 1], 'r-', label='No parallax')
plt.plot(shift_p[:, 0], shift_p[:, 1], 'b-', label='Parallax')
plt.axhline(0, linestyle='--')
plt.axvline(0, linestyle='--')
plt.gca().invert_xaxis()
plt.legend(loc='upper left')
plt.xlabel('Shift RA (mas)')
plt.ylabel('Shift Dec (mas)')
plt.axis('equal')
plt.savefig(outdir + 'fig4.png')
plt.figure(5)
plt.clf()
plt.plot(thetaS[:, 0], shift_p[:, 0], 'r-', label='RA')
plt.plot(thetaS[:, 1], shift_p[:, 1], 'b-', label='Dec')
plt.xlabel('thetaS (")')
plt.ylabel('Shift (mas)')
plt.savefig(outdir + 'fig5.png')
plt.figure(6)
plt.clf()
plt.plot(thetaS[:, 0], thetaS[:, 1], 'r-', label='Unlensed')
plt.plot(thetaS_lensed[:, 0], thetaS_lensed[:, 1], 'b-', label='Lensed')
plt.axvline(0, linestyle='--', color='k')
plt.legend()
plt.xlabel('thetaS_E (")')
plt.ylabel('thetaS_N (")')
plt.savefig(outdir + 'fig6.png')
print('Einstein radius: ', pspl_n.thetaE_amp, pspl_p.thetaE_amp)
print('Einstein crossing time: ', pspl_n.tE, pspl_n.tE)
return
def test_pspl_parallax_boden1998(t0=57000):
"""
I can get this one to match Figure 6 of Boden et al. 1998.
"""
# Scenarios from Paczynski 1998
raL = 80.89375 # LMC R.A.
decL = -71.74 # LMC Dec. This is the sin \beta = -0.99 where \beta =
mL = 0.1 # msun
xS0 = np.array([0.000, 0.088e-3]) # arcsec
beta = -0.16 # mas same as p=0.4
muS = np.array([-2.0, 1.5])
muL = np.array([0.0, 0.0])
dL = 8e3 # 10 kpc
dS = 50e3 # 50 kpc in LMC
b_sff = 1.0
mag_src = 19.0
# No parallax
pspl_n = model.PSPL_PhotAstrom_noPar_Param1(mL,
t0,
beta,
dL,
dL / dS,
xS0[0],
xS0[1],
muL[0],
muL[1],
muS[0],
muS[1],
[b_sff],
[mag_src])
print('pspl_n.u0', pspl_n.u0)
print('pspl_n.muS', pspl_n.muS)
print('pspl_n.u0_hat', pspl_n.u0_hat)
print('pspl_n.thetaE_hat', pspl_n.thetaE_hat)
# With parallax
pspl_p = model.PSPL_PhotAstrom_Par_Param1(mL,
t0,
beta,
dL,
dL / dS,
xS0[0],
xS0[1],
muL[0],
muL[1],
muS[0],
muS[1],
[b_sff],
[mag_src],
raL=raL,
decL=decL)
print('pspl_p.u0', pspl_p.u0)
print('pspl_p.muS', pspl_p.muS)
print('pspl_p.u0_hat', pspl_p.u0_hat)
print('pspl_p.thetaE_hat', pspl_p.thetaE_hat)
# t = np.arange(56000, 58000, 1)
t = np.arange(t0 - 500, t0 + 500, 1)
dt = t - pspl_n.t0
A_n = pspl_n.get_amplification(t)
A_p = pspl_p.get_amplification(t)
xS_n = pspl_n.get_astrometry(t)
xS_p_unlens = pspl_p.get_astrometry_unlensed(t)
xS_p_lensed = pspl_p.get_astrometry(t)
xL_p_unlens = pspl_p.get_lens_astrometry(t)
thetaS = (xS_p_unlens - xL_p_unlens) * 1e3 # mas
u = thetaS / pspl_p.tE
thetaS_lensed = (xS_p_lensed - xL_p_unlens) * 1e3 # mas
shift_n = pspl_n.get_centroid_shift(t) # mas
shift_p = (xS_p_lensed - xS_p_unlens) * 1e3 # mas
shift_n_amp = np.linalg.norm(shift_n, axis=1)
shift_p_amp = np.linalg.norm(shift_p, axis=1)
# Plot the amplification
fig1 = plt.figure(1)
plt.clf()
f1_1 = fig1.add_axes((0.1, 0.3, 0.8, 0.6))
plt.plot(dt, 2.5 * np.log10(A_n), 'b-', label='No parallax')
plt.plot(dt, 2.5 * np.log10(A_p), 'r-', label='Parallax')
plt.legend()
plt.ylabel('2.5 * log(A)')
f1_1.set_xticklabels([])
f2_1 = fig1.add_axes((0.1, 0.1, 0.8, 0.2))
plt.plot(dt, 2.5 * (np.log10(A_p) - np.log10(A_n)), 'k-',
label='Par - No par')
plt.axhline(0, linestyle='--', color='k')
plt.legend()
plt.ylabel('Diff')
plt.xlabel('t - t0 (MJD)')
idx = np.argmin(np.abs(t - t0))
# Plot the positions of everything
fig2 = plt.figure(2)
plt.clf()
plt.plot(xS_n[:, 0], xS_n[:, 1], 'r--', mfc='none', mec='red',
label='No parallax model')
plt.plot(xS_p_unlens[:, 0], xS_p_unlens[:, 1], 'b--', mfc='blue',
mec='blue',
label='Parallax model, unlensed')
plt.plot(xS_p_lensed[:, 0], xS_p_lensed[:, 1], 'b-',
label='Parallax model, lensed')
plt.plot(xL_p_unlens[:, 0], xL_p_unlens[:, 1], 'g--', mfc='none',
mec='green',
label='Parallax model, Lens')
plt.plot(xS_n[idx, 0], xS_n[idx, 1], 'rx')
plt.plot(xS_p_unlens[idx, 0], xS_p_unlens[idx, 1], 'bx')
plt.plot(xS_p_lensed[idx, 0], xS_p_lensed[idx, 1], 'bx')
plt.plot(xL_p_unlens[idx, 0], xL_p_unlens[idx, 1], 'gx')
plt.legend()
plt.gca().invert_xaxis()
# lim = 0.05
# plt.xlim(lim, -lim) # arcsec
# plt.ylim(-lim, lim)
# plt.xlim(0.006, -0.006) # arcsec
# plt.ylim(-0.02, 0.02)
plt.xlabel('R.A. (")')
plt.ylabel('Dec. (")')
# Check just the astrometric shift part.
fig3 = plt.figure(3)
plt.clf()
f1_3 = fig3.add_axes((0.2, 0.3, 0.7, 0.6))
plt.plot(dt, shift_n_amp, 'r--', label='No parallax model')
plt.plot(dt, shift_p_amp, 'b--', label='Parallax model')
plt.legend(fontsize=10)
plt.ylabel('Astrometric Shift (mas)')
f1_3.set_xticklabels([])
f2_3 = fig3.add_axes((0.2, 0.1, 0.7, 0.2))
plt.plot(dt, shift_p_amp - shift_n_amp, 'k-', label='Par - No par')
plt.legend()
plt.axhline(0, linestyle='--', color='k')
plt.xlabel('t - t0 (MJD)')
plt.ylabel('Res.')
fig4 = plt.figure(4)
plt.clf()
plt.plot(shift_n[:, 0], shift_n[:, 1], 'r-', label='No parallax')
plt.plot(shift_p[:, 0], shift_p[:, 1], 'b-', label='Parallax')
plt.axhline(0, linestyle='--')
plt.axvline(0, linestyle='--')
plt.gca().invert_xaxis()
plt.legend(loc='upper left')
plt.xlabel('Shift RA (mas)')
plt.ylabel('Shift Dec (mas)')
plt.axis('equal')
plt.figure(5)
plt.clf()
plt.plot(thetaS[:, 0], shift_p[:, 0], 'r-', label='RA')
plt.plot(thetaS[:, 1], shift_p[:, 1], 'b-', label='Dec')
plt.xlabel('thetaS (")')
plt.ylabel('Shift (mas)')
plt.figure(6)
plt.clf()
plt.plot(thetaS[:, 0], thetaS[:, 1], 'r-', label='Unlensed')
plt.plot(thetaS_lensed[:, 0], thetaS_lensed[:, 1], 'b-', label='Lensed')
plt.axvline(0, linestyle='--', color='k')
plt.legend()
plt.xlabel('thetaS_E (")')
plt.ylabel('thetaS_N (")')
print('Einstein radius: ', pspl_n.thetaE_amp, pspl_p.thetaE_amp)
print('Einstein crossing time: ', pspl_n.tE, pspl_n.tE)
return
def test_PSPL_phot_Lu2016():
"""
Compare observed photometry to model for
OB120169 as listed in Table 6 (photometry
solution #1.
"""
raL = (17.0 + (49.0 / 60.) + (51.38 / 3600.0)) * 15.0 # degrees
decL = -35 + (22.0 / 60.0) + (28.0 / 3600.0)
t0 = 56026.03
u0_amp = -0.222
tE = 135.0
piE_E = -0.058
piE_N = 0.11
b_sff = [1.1]
mag_src = [19.266]
# Read in the OGLE I-band photometry.
tests_dir = os.path.dirname(os.path.realpath(__file__))
dat = Table.read(tests_dir + '/OB120169_phot.dat', format='ascii')
dat['col1'] -= 2400000.5
dat.rename_column('col1', 'mjd')
dat.rename_column('col2', 'I')
dat.rename_column('col3', 'Ierr')
t_mod = np.arange(dat['mjd'].min(), dat['mjd'].max(), 10)
def plot_data_model(dat, t_mod, I_mod, I_mod_at_tobs, fig_num=1, title=''):
plt.clf()
f, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=False, num=fig_num,
gridspec_kw={'height_ratios': [3, 1]})
plt.subplots_adjust(hspace=0)
ax1.errorbar(dat['mjd'], dat['I'], yerr=dat['Ierr'],
fmt='.', alpha=0.5, color='red')
ax1.plot(t_mod, I_mod, color='black')
ax2.errorbar(dat['mjd'], dat['I'] - I_mod_at_tobs, yerr=dat['Ierr'],
fmt='.', alpha=0.5, color='red')
ax2.axhline(0, color='black')
ax1.invert_yaxis()
ax1.set_ylabel('I-band')
ax2.set_ylabel('Resid.')
ax2.set_xlabel('Time (MJD)')
ax1.set_title(title)
return
##########
# Test #0: PSPL_phot - no blending
##########
mod = model.PSPL_Phot_noPar_Param1(t0,
u0_amp,
tE,
piE_E,
piE_N,
[1.0],
mag_src)
I_mod = mod.get_photometry(t_mod)
I_mod_at_tobs = mod.get_photometry(dat['mjd'])
plt.figure(1)
plot_data_model(dat, t_mod, I_mod, I_mod_at_tobs,
fig_num=1, title='PSPL_phot b_sff=0')
##########
# Test #1: PSPL_phot
##########
mod = model.PSPL_Phot_noPar_Param1(t0,
u0_amp,
tE,
piE_E,
piE_N,
b_sff,
mag_src)
I_mod = mod.get_photometry(t_mod)
I_mod_at_tobs = mod.get_photometry(dat['mjd'])
plt.figure(2)
plot_data_model(dat, t_mod, I_mod, I_mod_at_tobs,
fig_num=2, title='PSPL_phot')
##########
# Test #1: PSPL_phot_parallax
##########
mod = model.PSPL_Phot_Par_Param1(t0,
u0_amp,
tE,
piE_E,
piE_N,
b_sff,
mag_src,
raL=raL,
decL=decL)
I_mod = mod.get_photometry(t_mod)
I_mod_at_tobs = mod.get_photometry(dat['mjd'])
plt.figure(3)
plot_data_model(dat, t_mod, I_mod, I_mod_at_tobs,
fig_num=3, title='PSPL_phot_parallax')
return
def test_pspl_parallax2_bulge():
outdir = 'tests/test_pspl_par2_bulge1/'
if (outdir != '') and (outdir != None):
os.makedirs(outdir, exist_ok=True)
# Scenario from Belokurov and Evans 2002 (Figure 1)
raL = 17.5 * 15.0 # in degrees
decL = -30.0
mL = 10.0 # msun
t0 = 57650.0
xS0 = np.array([0.000, 0.000])
beta = 3.0 # mas
muS = np.array([-4.0, -4.0])
muL = np.array([-6.0, -10.0])
dL = 3000.0
dS = 6000.0
b_sff = 1.0
mag_src = 19.0
pspl_par1 = model.PSPL_PhotAstrom_Par_Param1(mL,
t0,
beta,
dL,
dL / dS,
xS0[0],
xS0[1],
muL[0],
muL[1],
muS[0],
muS[1],
[b_sff],
[mag_src],
raL=raL,
decL=decL)
pspl_par2 = model.PSPL_PhotAstrom_Par_Param2(pspl_par1.t0,
pspl_par1.u0_amp,
pspl_par1.tE,
pspl_par1.thetaE_amp,
pspl_par1.piS,
pspl_par1.piE_E,
pspl_par1.piE_N,
pspl_par1.xS0[0],
pspl_par1.xS0[1],
pspl_par1.muS[0],
pspl_par1.muS[1],
[b_sff],
[mag_src],
raL=raL,
decL=decL)
members1 = vars(pspl_par1)
members2 = vars(pspl_par2)
# Check results with assertions
for kk in members1.keys():
if kk in members2.keys():
print('{0:13s} {1:25s} {2:25s}'.format(kk, str(members1[kk]),
str(members2[kk])))
np.testing.assert_almost_equal(members1[kk], members2[kk], 3)
t = np.arange(t0 - 1000, t0 + 1000, 1)
dt = t - pspl_par1.t0
mag_out1 = pspl_par1.get_photometry(t)
mag_out2 = pspl_par2.get_photometry(t)
plt.figure(1)
plt.clf()
plt.plot(t, mag_out1, 'k-', label='mod 1')
plt.plot(t, mag_out2, 'r-', label='mod 2')
plt.legend()
plt.gca().invert_yaxis()
plt.xlabel('Time (days)')
plt.ylabel('Magnitude')
plt.savefig(outdir + 'comp_mod_phot.png')
upos_out1 = pspl_par1.get_astrometry_unlensed(t)
upos_out2 = pspl_par2.get_astrometry_unlensed(t)
pos_out1 = pspl_par1.get_astrometry(t)
pos_out2 = pspl_par2.get_astrometry(t)
plt.figure(2)
plt.clf()
plt.plot(t, upos_out1[:, 0] * 1e3, 'k--', label='mod 1 unlens')
plt.plot(t, upos_out2[:, 0] * 1e3, 'r--', label='mod 2 unlens')
plt.plot(t, pos_out1[:, 0] * 1e3, 'k-', label='mod 1')
plt.plot(t, pos_out2[:, 0] * 1e3, 'r-', label='mod 2')
plt.legend()
plt.xlabel('Time (days)')
plt.ylabel(r'$\alpha^*$ (mas)')
plt.savefig(outdir + 'comp_mod_posX.png')
plt.figure(3)
plt.clf()
plt.plot(t, upos_out1[:, 1] * 1e3, 'k--', label='mod 1 unlens')
plt.plot(t, upos_out2[:, 1] * 1e3, 'r--', label='mod 2 unlens')
plt.plot(t, pos_out1[:, 1] * 1e3, 'k-', label='mod 1')
plt.plot(t, pos_out2[:, 1] * 1e3, 'r-', label='mod 2')
plt.legend()
plt.xlabel('Time (days)')
plt.ylabel(r'$\delta$ (mas)')
plt.savefig(outdir + 'comp_mod_posX.png')
plt.figure(4)
plt.clf()
plt.plot(upos_out1[:, 0] * 1e3, upos_out1[:, 1] * 1e3, 'k--',
label='mod 1 unlens')
plt.plot(upos_out2[:, 0] * 1e3, upos_out2[:, 1] * 1e3, 'r--',
label='mod 2 unlens')
plt.plot(pos_out1[:, 0] * 1e3, pos_out1[:, 1] * 1e3, 'k-', label='mod 1')
plt.plot(pos_out2[:, 0] * 1e3, pos_out2[:, 1] * 1e3, 'r-', label='mod 2')
plt.xlabel(r'$\alpha^*$ (mas)')
plt.ylabel(r'$\delta$ (mas)')
plt.legend()
plt.savefig(outdir + 'comp_mod_posX.png')
t = np.arange(t0 - 1000, t0 + 1000, 10)
dt = t - pspl_par1.t0
# Compare that we get some the same things out of the two models.
np.testing.assert_almost_equal(pspl_par1.mL, pspl_par2.mL, 3)
np.testing.assert_almost_equal(pspl_par1.dL, pspl_par2.dL)
np.testing.assert_almost_equal(pspl_par1.dS, pspl_par2.dS)
np.testing.assert_almost_equal(pspl_par1.piS, pspl_par2.piS)
np.testing.assert_almost_equal(pspl_par1.piL, pspl_par2.piL)
np.testing.assert_almost_equal(pspl_par1.muS, pspl_par2.muS)
np.testing.assert_almost_equal(pspl_par1.muL, pspl_par2.muL)
np.testing.assert_almost_equal(pspl_par1.muRel, pspl_par2.muRel)
A_1 = pspl_par1.get_amplification(t)
A_2 = pspl_par2.get_amplification(t)
np.testing.assert_almost_equal(A_1, A_2)
xS_1 = pspl_par1.get_astrometry(t)
xS_2 = pspl_par2.get_astrometry(t)
np.testing.assert_almost_equal(xS_1, xS_2)
xS_unlens_1 = pspl_par1.get_astrometry_unlensed(t)
xS_unlens_2 = pspl_par2.get_astrometry_unlensed(t)
np.testing.assert_almost_equal(xS_unlens_1, xS_unlens_2)
xL_1 = pspl_par1.get_lens_astrometry(t)
xL_2 = pspl_par2.get_lens_astrometry(t)
np.testing.assert_almost_equal(xL_1, xL_2)
# Check just the astrometric shift part.
shift_1 = pspl_par1.get_centroid_shift(t) # mas
shift_2 = pspl_par2.get_centroid_shift(t) # mas
np.testing.assert_almost_equal(shift_1, shift_2)
return
def compare_lumlens_parallax_bulge():
raL_in = 17.30 * 15. # Bulge R.A.
decL_in = -29.0
mL_in = 10.0 # msun
t0_in = 57000.0
xS0_in = np.array([0.000, 0.088e-3]) # arcsec
beta_in = 2.0 # mas same as p=0.4
muS_in = np.array([-5.0, 0.0])
muL_in = np.array([0.0, 0.0])
dL_in = 4000.0 # pc
dS_in = 8000.0 # pc
b_sff_in = 0.5
mag_src_in = 19.0
pspl_ll = model.PSPL_PhotAstrom_LumLens_Par_Param1(mL=mL_in,
t0=t0_in,
beta=beta_in,
dL=dL_in,
dL_dS=dL_in / dS_in,
xS0_E=xS0_in[0],
xS0_N=xS0_in[1],
muL_E=muL_in[0],
muL_N=muL_in[1],
muS_E=muS_in[0],
muS_N=muS_in[1],
raL=raL_in,
decL=decL_in,
b_sff=[b_sff_in],
mag_src=[mag_src_in])
pspl = model.PSPL_PhotAstrom_Par_Param1(mL=mL_in,
t0=t0_in,
beta=beta_in,
dL=dL_in,
dL_dS=dL_in / dS_in,
xS0_E=xS0_in[0],
xS0_N=xS0_in[1],
muL_E=muL_in[0],
muL_N=muL_in[1],
muS_E=muS_in[0],
muS_N=muS_in[1],
raL=raL_in,
decL=decL_in,
b_sff=[b_sff_in],
mag_src=[mag_src_in])
t = np.linspace(t0_in - 1500, t0_in + 1500, 1000)
mag = pspl.get_photometry(t)
pos = pspl.get_astrometry(t)
pos_src = pspl.get_astrometry_unlensed(t)
mag_ll = pspl_ll.get_photometry(t)
pos_ll = pspl_ll.get_astrometry(t)
pos_src_ll = pspl_ll.get_astrometry_unlensed(t)
plt.figure(1)
plt.clf()
plt.subplots_adjust(left=0.2)
# plt.plot(pos[:, 0], pos[:, 1], label='Dark Lens')
# plt.plot(pos_ll[:, 0], pos_ll[:, 1], label='Lum Lens')
# plt.plot(pos_src[:, 0], pos_src[:, 1], label='Dark Unlens')
# plt.plot(pos_src_ll[:, 0], pos_src_ll[:, 1], label='Lum Unlens')
plt.plot((pos[:, 0] - pos_src[:, 0])*1E3, (pos[:, 1] - pos_src[:, 1])*1E3, label='Dark Lens')
plt.plot((pos_ll[:, 0] - pos_src_ll[:, 0])*1E3, (pos_ll[:, 1] - pos_src_ll[:, 1])*1E3, label='Lum Lens')
plt.legend()
plt.axis('equal')
plt.xlabel('$\delta_{c,x}$ (mas)')
plt.ylabel('$\delta_{c,y}$ (mas)')
plt.show()
#
plt.figure(2)
plt.clf()
plt.plot(t, mag, label='Dark Lens')
plt.plot(t, mag_ll, label='Lum Lens')
plt.gca().invert_yaxis()
plt.xlabel('Time')
plt.ylabel('Mag')
plt.legend()
plt.show()
def test_parallax():
"""
Compare our parallax vector and motion equations with
Astropy (which now has it implemented and is well tested
with Gaia analysis.
"""
# Make a parallax model to use our code directly.
raL_in = 17.30 * 15. # Bulge R.A.
decL_in = -29.0
mL_in = 10.0 # msun
t0_in = 57000.0
xS0_in = np.array([0.000, 0.000]) # arcsec
beta_in = 2.0 # mas same as p=0.4
muS_in = np.array([0.0, 0.0])
muL_in = np.array([-5.0, 0.0])
dL_in = 4000.0 # pc
dS_in = 8000.0 # pc
b_sff_in = 1.0
mag_src_in = 19.0
##########
# BAGLE
##########
pspl = model.PSPL_PhotAstrom_Par_Param1(mL_in,
t0_in,
beta_in,
dL_in,
dL_in / dS_in,
xS0_in[0],
xS0_in[1],
muL_in[0],
muL_in[1],
muS_in[0],
muS_in[1],
[b_sff_in],
[mag_src_in],
raL=raL_in,
decL=decL_in)
# Fetch the astrometry for the unlensed source and lens.
# Note these are the positions in the observers geocentric frame; but
# fixed to stars reference.
t = np.linspace(-2 * pspl.tE, 2 * pspl.tE, 300) # in days (MJD)
t += t0_in
xS_bagle = pspl.get_astrometry_unlensed(t) * 1e3 * u.mas # in mas
xL_bagle = pspl.get_lens_astrometry(t) * 1e3 * u.mas # in mas
##########
# Astropy
##########
# Now make an Astropy coordinate for the same source object
# with the same position, proper motion, distance and calculate
# the source trajectory vs. time.
t_obj = Time(t, format='mjd')
#### REF
# First we need a reference point at the RA and Dec that will
# serve as the origin point for our relative coordinate system.
print(f'c0 coords = {raL_in:.10f}, {decL_in:.10f}')
c0 = SkyCoord(raL_in * u.deg, decL_in * u.deg,
pm_ra_cosdec=0.0 * u.mas / u.yr,
pm_dec=0.0 * u.mas / u.yr,
distance=1e6 * u.Mpc,
obstime=Time(t0_in, format='mjd'))
# Propogate the motion (velocity and parallax) to the correct times).
x0_astropy_icrs = c0.apply_space_motion(new_obstime=t_obj)
x0_astropy_gcrs = x0_astropy_icrs.transform_to('gcrs')
x0_apy_ra = x0_astropy_gcrs.ra
x0_apy_dec = x0_astropy_gcrs.dec
cosd = np.cos(x0_apy_dec.to('radian'))
#### SOURCE
# Define the source coordinates (with the correct proper motion and distance).
raS0 = (raL_in * u.deg) + (pspl.xS0[0] * u.deg / 3600)
decS0 = (decL_in * u.deg) + (pspl.xS0[1] * u.deg / 3600)
print(f'S0 coords = {raS0:.10f}, {decS0:.10f}')
cS = SkyCoord(raS0, decS0,
pm_ra_cosdec=muS_in[0] * u.mas / u.yr,
pm_dec=muS_in[1] * u.mas / u.yr,
distance=dS_in * u.pc,
obstime=Time(t0_in, format='mjd'))
# Propogate the motion (velocity and parallax) to the correct times).
xS_astropy_icrs = cS.apply_space_motion(new_obstime=t_obj)
xS_astropy_gcrs = xS_astropy_icrs.transform_to('gcrs')
xS_apy_ra = xS_astropy_gcrs.ra
xS_apy_dec = xS_astropy_gcrs.dec
dxS_apy = (xS_apy_ra - x0_apy_ra ).to('mas') * cosd
dyS_apy = (xS_apy_dec - x0_apy_dec).to('mas')
xS_astropy = np.vstack([dxS_apy.value, dyS_apy.value]).T * u.mas
#### LENS
raL0 = (raL_in * u.deg) + (pspl.xL0[0] * u.deg / 3600)
decL0 = (decL_in * u.deg) + (pspl.xL0[1] * u.deg / 3600)
print(f'L0 coords = {raL0:.10f}, {decL0:.10f}')
cL = SkyCoord(raL0, decL0,
pm_ra_cosdec=muL_in[0] * u.mas / u.yr,
pm_dec=muL_in[1] * u.mas / u.yr,
distance=dL_in * u.pc,
obstime=Time(t0_in, format='mjd'))
xL_astropy_icrs = cL.apply_space_motion(new_obstime=t_obj)
xL_astropy_gcrs = xL_astropy_icrs.transform_to('gcrs')
xL_apy_ra = xL_astropy_gcrs.ra
xL_apy_dec = xL_astropy_gcrs.dec
print(f'xL_apy_ra = {xL_apy_ra[0]}, {xL_apy_ra[-1]}')
print(f'xL_apy_dec = {xL_apy_dec[0]}, {xL_apy_dec[-1]}')
print(f'x0_apy_ra = {x0_apy_ra[0]}, {x0_apy_ra[-1]}')
print(f'x0_apy_dec = {x0_apy_dec[0]}, {x0_apy_dec[-1]}')
dxL_apy = (xL_apy_ra - x0_apy_ra ).to('mas') * cosd
dyL_apy = (xL_apy_dec - x0_apy_dec).to('mas')
xL_astropy = np.vstack([dxL_apy.value, dyL_apy.value]).T * u.mas
##########
# Casey's conversion.
##########
from microlens.jlu import helio_geo_conversion as hgc
print('!!! Casey conversions')
foo = hgc.convert_helio_to_geo_phot(raL_in, decL_in, t0_in, pspl.u0_amp, pspl.tE,
pspl.piE[0], pspl.piE[1], t0_in)
t0_geo_casey = foo[0]
u0_geo_casey = foo[1]
tE_geo_casey = foo[2]
piEE_geo_casey = foo[3]
piEN_geo_casey = foo[4]
##########
# Gould 2004 version:
##########
#
# Set the ephemeris
t0_obj = Time(t0_in, format='mjd')
solar_system_ephemeris.set('builtin')
# Get the position and velocity of the Earth in the barycentric frame.
Earth_t0 = get_body_barycentric_posvel('Earth', t0_obj)
Earth_t = get_body_barycentric_posvel('Earth', t_obj)
Earth_pos_t0 = Earth_t0[0].get_xyz()
Earth_vel_t0 = Earth_t0[1].get_xyz()
Earth_pos_t = Earth_t[0].get_xyz()
Earth_vel_t = Earth_t[1].get_xyz()
# This is the position of the Sun w.r.t. to the Earth at time t0 (in geocentric frame)
Sun_pos_t0 = -Earth_pos_t0
Sun_vel_t0 = -Earth_vel_t0
Sun_pos_t = -Earth_pos_t
Sun_vel_t = -Earth_vel_t
# Calculate the 3D delta-s(t) vector as defined in Gould 2004 (in IRCS rectilinear):
#
# ds(t) = s(t) - v_earth(t0) * (t - t0) - s(t0)
#
ds_gould_3d = Sun_pos_t - (Sun_vel_t0 *(t_obj - t0_obj)[:, None]).T
ds_gould_3d -= Sun_pos_t0[:, None]
# Project onto East and North -- identical code to PyLIMA
target_angles_in_the_sky = [raL_in * np.pi / 180, decL_in * np.pi / 180]
Target = np.array(
[np.cos(target_angles_in_the_sky[1]) * np.cos(target_angles_in_the_sky[0]),
np.cos(target_angles_in_the_sky[1]) * np.sin(target_angles_in_the_sky[0]),
np.sin(target_angles_in_the_sky[1])])
East = np.array([-np.sin(target_angles_in_the_sky[0]),
np.cos(target_angles_in_the_sky[0]),
0.0])
North = np.cross(Target, East)
Sun_pos_t0_EN = np.array([np.dot(Sun_pos_t0.value, East),
np.dot(Sun_pos_t0.value, North)]) * u.AU
Sun_vel_t0_EN = np.array([np.dot(Sun_vel_t0.value, East),
np.dot(Sun_vel_t0.value, North)]) * u.AU / u.day
Sun_pos_t_EN = np.zeros([len(t), 2], dtype=float)
Sun_vel_t_EN = np.zeros([len(t), 2], dtype=float)
ds_gould_2d = np.zeros([len(t), 2], dtype=float)
for tt in range(len(t)):
# Note, positions still in AU, velocities in AU/day.
Sun_pos_t_EN[tt] = np.array([np.dot(Sun_pos_t.value[:, tt], East),
np.dot(Sun_pos_t.value[:, tt], North)])
Sun_vel_t_EN[tt] = np.array([np.dot(Sun_vel_t.value[:, tt], East),
np.dot(Sun_vel_t.value[:, tt], North)])
ds_gould_2d[tt] = np.array([np.dot(ds_gould_3d.value[:, tt], East),
np.dot(ds_gould_3d.value[:, tt], North)])
Sun_pos_t_EN *= u.AU
Sun_vel_t_EN *= u.AU/u.day
ds_gould_2d *= u.AU
# ds_gould_2d = Sun_pos_t_EN - (Sun_vel_t0_EN *(t_obj.value - t0_obj.value)[:, None])
# ds_gould_2d -= Sun_pos_t0_EN[:, None].T
# Calculate d-tau and d-beta
dtau = ds_gould_2d[:, 0]/u.AU * pspl.piE[0] + ds_gould_2d[:, 1]/u.AU * pspl.piE[1]
dbeta = ds_gould_2d[:, 0]/u.AU * pspl.piE[1] - ds_gould_2d[:, 1]/u.AU * pspl.piE[0]
# dtau = np.dot(pspl.piE, ds_gould_2d)
# dbeta = np.cross(pspl.piE, ds_gould_2d.T)
# Need to convert to t0_geo at tr... pick a reference time (arbitrarily choose t0):
A = (pspl.muRel[0] / pspl.thetaE_amp) / u.yr - (pspl.piE_amp * Sun_vel_t0_EN[0] / u.AU)
B = (pspl.muRel[1] / pspl.thetaE_amp) / u.yr - (pspl.piE_amp * Sun_vel_t0_EN[1] / u.AU)
t0_obj_geotr = (-1 / (A**2 + B**2)) * ( A*pspl.u0[0]
+ B*pspl.u0[1]
- A*pspl.piE_amp*Sun_pos_t0_EN[0]/u.AU
- B*pspl.piE_amp*Sun_pos_t0_EN[1]/u.AU)
t0_obj_geotr += t0_obj
# Need to convert to u0_geo at tr
# u0_geotr = pspl.u0
u0_geotr = (((pspl.muRel / pspl.thetaE_amp) / u.yr) * (t0_obj_geotr.value - t0_obj.value) * u.day).to('')
u0_geotr -= (pspl.piE_amp * Sun_pos_t0_EN / u.AU)
u0_geotr += pspl.u0
# Need to convert u0 amplitude.
u0_amp_geotr = np.linalg.norm(u0_geotr)
# Need to convert to tE_geo at tr
tE_geotr = (pspl.tE * u.day * np.linalg.norm(pspl.muRel) * u.mas / u.yr) / (np.hypot(A, B) * pspl.thetaE_amp * u.mas)
# Just for comparison, lets also calculate piE and muRel in both frames.
muRel_geotr = (pspl.muRel * u.mas / u.yr) - (pspl.piE_amp * pspl.thetaE_amp * u.mas * Sun_vel_t0_EN / u.AU)
piE_geotr = pspl.piE_amp * muRel_geotr / np.linalg.norm(muRel_geotr)
print(f'pspl.muRel = {pspl.muRel}')
print(f'pspl.thetaE_amp = {pspl.thetaE_amp}')
print(f'A = {A}')
print(f'B = {B}')
print(f'Sun vel at t0 (East) = {Sun_vel_t0_EN[0]}')
print(f'u0E: pspl = {pspl.u0[0]:.3f}, geotr = {u0_geotr[0]:.3f}')
print(f'u0N: pspl = {pspl.u0[1]:.3f}, geotr = {u0_geotr[1]:.3f}')
print(f'u0_amp: pspl = {pspl.u0_amp:.3f}, geotr = {np.linalg.norm(u0_geotr):.3f}, geotr_c = {u0_geo_casey:.3f}')
print(f't0: pspl = {t0_obj.value:.2f}, geotr = {t0_obj_geotr.value:.2f}, geotr_c = {t0_geo_casey:.2f}')
print(f'tE: pspl = {pspl.tE:.3f}, geotr = {tE_geotr:.3f}, geotr_c = {tE_geo_casey:.3f}')
print(f'muRelE: pspl = {pspl.muRel[0]:.3f}, geotr = {muRel_geotr[0]:.3f}')
print(f'muRelN: pspl = {pspl.muRel[1]:.3f}, geotr = {muRel_geotr[1]:.3f}')
print(f'piEE: pspl = {pspl.piE[0]:.4f}, geotr = {piE_geotr[0]:.4f}, geotr_c = {piEE_geo_casey:.4f}')
print(f'piEN: pspl = {pspl.piE[1]:.4f}, geotr = {piE_geotr[1]:.4f}, geotr_c = {piEN_geo_casey:.4f}')
# Calculate tau (in relative proper motion direction) and beta (in u0 direction)
tau = ((t_obj.value - t0_obj_geotr.value) * u.day / tE_geotr) + dtau
beta = u0_amp_geotr + dbeta
tau_vec = tau * muRel_geotr.T / np.linalg.norm(muRel_geotr)
beta_vec = beta * u0_geotr.T / np.lingalg.norm(u0_geotr)
print('t = ', t[0:500:80])
print('tau = ', tau[0:500:80])
print('dtau = ', dtau[0:500:80])
print('beta = ', beta[0:500:80])
print('dbeta = ', dbeta[0:500:80])
u_bagel = (xS_bagle - xL_bagle) / (pspl.thetaE_amp * u.mas)
u_astropy = (xS_astropy - xL_astropy) / (pspl.thetaE_amp * u.mas)
u_gould = tau_vec + beta_vec
xL_gould = xL_bagle
xS_gould = (u_gould * pspl.thetaE_amp * u.mas) + xL_gould
# Position of source w.r.t. lens in Gould frame.
print('t = ', t[0:500:80])
print('xL_bagle = ', xL_bagle[0:500:80])
print('xL_astropy = ', xL_astropy[0:500:80])
print('xL_gould = ', xL_gould[0:500:80])
print('xS_bagle = ', xS_bagle[0:500:80])
print('xS_astropy = ', xS_astropy[0:500:80])
print('xS_gould = ', xS_gould[0:500:80])
print('lens pos (mas), vel (mas/yr) = ', pspl.xL0 * 1e3, pspl.muL)
print('sorc pos (mas), vel (mas/yr) = ', pspl.xS0 * 1e3, pspl.muS)
# Calculate the residuals.
resid = xS_astropy - xS_bagle # mas
plt.figure(1, figsize=(10, 3))
plt.subplots_adjust(wspace=0.7)
plt.subplot(1, 3, 1)
plt.plot(t, xS_bagle[:, 0], color='red')
plt.plot(t, xS_astropy[:, 0], linestyle='--', color='black')
plt.plot(t, xS_gould[:, 0], linestyle='-.', color='blue')
plt.xlabel('Time (MJD)')
plt.ylabel(r'$\Delta\alpha^*$ (mas)')
plt.subplot(1, 3, 2)
plt.plot(t, xS_bagle[:, 1], color='red')
plt.plot(t, xS_astropy[:, 1], linestyle='--', color='black')
plt.plot(t, xS_gould[:, 1], linestyle='-.', color='blue')
plt.xlabel('Time (MJD)')
plt.ylabel(r'$\Delta\delta$ (mas)')
plt.subplot(1, 3, 3)
plt.plot(xS_bagle[:, 0], xS_bagle[:, 1], color='red', label='Our code')
plt.plot(xS_astropy[:, 0], xS_astropy[:, 1], linestyle='-.',
color='black', label='Astropy')
plt.xlabel(r'$\Delta\alpha^*$ (mas)')
plt.ylabel(r'$\Delta\delta$ (mas)')
plt.legend(fontsize=10)
plt.axis('equal')
plt.figure(2, figsize=(10, 3))
plt.subplots_adjust(wspace=0.7)
plt.subplot(1, 3, 1)
plt.plot(t, u_bagel[:, 0], color='red')
plt.plot(t, u_astropy[:, 0], linestyle='--', color='black')
plt.plot(t, u_gould[:, 0], linestyle='-.', color='blue')
plt.xlabel('Time (MJD)')
plt.ylabel(r'$\Delta\alpha^*$ ($\theta_E$)')
plt.subplot(1, 3, 2)
plt.plot(t, u_bagel[:, 1], color='red')
plt.plot(t, u_astropy[:, 1], linestyle='--', color='black')
plt.plot(t, u_gould[:, 1], linestyle='-.', color='blue')
plt.xlabel('Time (MJD)')
plt.ylabel(r'$\Delta\delta$ ($\theta_E$)')
plt.subplot(1, 3, 3)
plt.plot(u_bagel[:, 0], u_bagel[:, 1], color='red', label='Our code')
plt.plot(u_astropy[:, 0], u_astropy[:, 1], linestyle='-.',
color='black', label='Astropy')
plt.xlabel(r'$\Delta\alpha^*$ ($\theta_E$)')
plt.ylabel(r'$\Delta\delta$ ($\theta_E$)')
plt.legend(fontsize=10)
plt.axis('equal')
plt.figure()
plt.plot(t, resid[:, 0], 'b--', label=r'$\Delta\alpha^*$ diff')
plt.plot(t, resid[:, 1], 'r--', label=r'$\Delta\delta$ diff')
plt.xlabel('Time (MJD)')
plt.ylabel('Residuals (mas)')
plt.legend(fontsize=10)
# Check that they return the same value to within 10 micro-arcsec
# np.testing.assert_almost_equal(xS_astropy, xS_bagle, decimal=2)
return
def testing_astropy_parallax():
from astropy.coordinates import SkyCoord, GCRS
from astropy.time import Time
import astropy.units as u
ra = 17.5 * 15.0 * u.deg
dec = -29 * u.deg
dist = 8000.0 * u.pc
sc = SkyCoord(ra, dec,
pm_ra_cosdec=0.0 * u.mas / u.yr,
pm_dec=0.0 * u.mas / u.yr,
distance=dist,
obstime=Time(57000.0, format='mjd'))
sc0 = SkyCoord(ra, dec,
pm_ra_cosdec=0.0 * u.mas / u.yr,
pm_dec=0.0 * u.mas / u.yr,
distance=1e6 * u.Mpc,
obstime=Time(57000.0, format='mjd'))
t = np.arange(56000, 58000)
t_obj = Time(t, format='mjd')
sc_t_icrs = sc.apply_space_motion(new_obstime=t_obj)
sc_t_gcrs = sc_t_icrs.transform_to('gcrs')
sc_t_icrs0 = sc0.apply_space_motion(new_obstime=t_obj)
sc_t_gcrs0 = sc_t_icrs0.transform_to('gcrs')
ra_t = sc_t_gcrs.ra
dec_t = sc_t_gcrs.dec
cosd_t = np.cos(dec_t.to('radian'))
ra0_t = sc_t_gcrs0.ra
dec0_t = sc_t_gcrs0.dec
# dra = ((ra_t - ra) * cosd_t).to('arcsec') # in arcsec
# ddec = (dec_t - dec).to('arcsec') # in arcsec
dra = ((ra_t - ra0_t) * cosd_t).to('arcsec') # in arcsec
ddec = (dec_t - dec0_t).to('arcsec') # in arcsec
plt.figure(1, figsize=(10, 3))
plt.subplots_adjust(wspace=0.7)
parallax_pred = 1.0 / dist.value
parallax_meas = np.max(np.hypot(dra.value, ddec.value))
print('Predicted parallax from manual calculation:')
print(' {0:.2f} mas'.format(parallax_pred * 1e3))
print('Total parallax from astrpy calculation:')
print(' {0:.2f} mas'.format(parallax_meas * 1e3))
plt.subplot(1, 3, 1)
plt.plot(t, dra.to('mas'), color='black')
plt.xlabel('Time (MJD)')
plt.ylabel(r'$\Delta \alpha^*$ (mas)')
plt.axhline(parallax_pred * 1e3, color='red', linestyle='--')
plt.subplot(1, 3, 2)
plt.plot(t, ddec.to('mas'), color='black')
plt.xlabel('Time (MJD)')
plt.ylabel(r'$\Delta \delta$ (mas)')
plt.title('Star Distance = ' + str(dist))
plt.subplot(1, 3, 3)
plt.plot(dra.to('mas'), ddec.to('mas'), color='black')
plt.xlabel(r'$\Delta \alpha^*$ (mas)')
plt.ylabel(r'$\Delta \delta$ (mas)')
plt.axis('equal')
return
def plot_PSBL(psbl, t_obs):
"""
Make some standard plots for PSBL.
"""
images, amps = psbl.get_all_arrays(t_obs)
##########
# Photometry
##########
phot = psbl.get_photometry(t_obs, amp_arr=amps)
# Plot the photometry
plt.figure(1)
plt.clf()
plt.plot(t_obs, phot, 'r-')
plt.ylabel('Photometry (mag)')
plt.xlabel('Time (MJD)')
plt.gca().invert_yaxis()
##########
# Astrometry
##########
if psbl.astrometryFlag:
# Find the points closest to t0
t0idx = np.argmin(np.abs(t_obs - psbl.t0))
xL1, xL2 = psbl.get_resolved_lens_astrometry(t_obs)
xL1 *= 1e3
xL2 *= 1e3
xS_unlens = psbl.get_astrometry_unlensed(t_obs) * 1e3
xS_lensed = psbl.get_astrometry(t_obs, image_arr=images, amp_arr=amps) * 1e3
dxS = (xS_lensed - xS_unlens)
# Plot the positions of everything
plt.figure(2)
plt.clf()
plt.plot(xS_unlens[:, 0], xS_unlens[:, 1], 'b--', mfc='blue',
mec='blue')
plt.plot(xS_lensed[:, 0], xS_lensed[:, 1], 'b-')
plt.plot(xL1[:, 0], xL1[:, 1], 'g--', mfc='none',
mec='green')
plt.plot(xL2[:, 0], xL2[:, 1], 'g--', mfc='none',
mec='dark green')
plt.plot(xS_unlens[t0idx, 0], xS_unlens[t0idx, 1], 'bx', mfc='blue',
mec='blue',
label='xS, unlensed')
plt.plot(xS_lensed[t0idx, 0], xS_lensed[t0idx, 1], 'bo',
label='xS, lensed')
plt.plot(xL1[t0idx, 0], xL1[t0idx, 1], 'gs', mfc='green',
mec='green',
label='Primary lens')
plt.plot(xL2[t0idx, 0], xL2[t0idx, 1], 'gs', mfc='none',
mec='green',
label='Secondary lens')
plt.legend()
plt.gca().invert_xaxis()
plt.xlabel('R.A. (mas)')
plt.ylabel('Dec. (mas)')
# Check just the astrometric shift part.
plt.figure(3)
plt.clf()
plt.plot(t_obs, dxS[:, 0], 'r--', label='R.A.')
plt.plot(t_obs, dxS[:, 1], 'b--', label='Dec.')
plt.legend(fontsize=10)
plt.ylabel('Astrometric Shift (mas)')
plt.xlabel('Time (MJD)')
plt.figure(4)
plt.clf()
plt.plot(dxS[:, 0], dxS[:, 1], 'r-')
plt.axhline(0, linestyle='--')
plt.axvline(0, linestyle='--')
plt.gca().invert_xaxis()
plt.xlabel('Shift RA (mas)')
plt.ylabel('Shift Dec (mas)')
plt.axis('equal')
print('Einstein radius: ', psbl.thetaE_amp)
print('Einstein crossing time: ', psbl.tE)
return
def plot_PSBL_compare(psbl1, label1, psbl2, label2, t_obs):
"""
Make some standard plots for PSBL.
"""
images1, amps1 = psbl1.get_all_arrays(t_obs)
images2, amps2 = psbl2.get_all_arrays(t_obs)
##########
# Photometry
##########
phot1 = psbl1.get_photometry(t_obs, amp_arr=amps1)
phot2 = psbl2.get_photometry(t_obs, amp_arr=amps2)
# Plot the photometry
plt.figure(1)
plt.clf()
plt.plot(t_obs, phot1, 'r-', label=label1)
plt.plot(t_obs, phot2, 'b-', label=label2)
plt.legend()
plt.ylabel('Photometry (mag)')
plt.xlabel('Time (MJD)')
plt.gca().invert_yaxis()
##########
# Astrometry
##########
if psbl1.astrometryFlag:
# Find the points closest to t0
t0idx1 = np.argmin(np.abs(t_obs - psbl1.t0))
t0idx2 = np.argmin(np.abs(t_obs - psbl2.t0))
xL1_1, xL2_1 = psbl1.get_resolved_lens_astrometry(t_obs)
xL1_1 *= 1e3
xL2_1 *= 1e3
xL1_2, xL2_2 = psbl2.get_resolved_lens_astrometry(t_obs)
xL1_2 *= 1e3
xL2_2 *= 1e3
xS_unlens1 = psbl1.get_astrometry_unlensed(t_obs) * 1e3
xS_unlens2 = psbl2.get_astrometry_unlensed(t_obs) * 1e3
xS_lensed1 = psbl1.get_astrometry(t_obs, image_arr=images1, amp_arr=amps1) * 1e3
xS_lensed2 = psbl2.get_astrometry(t_obs, image_arr=images2, amp_arr=amps2) * 1e3
dxS1 = (xS_lensed1 - xS_unlens1)
dxS2 = (xS_lensed2 - xS_unlens2)
# Plot the positions of everything
plt.figure(2)
plt.clf()
plt.plot(xS_unlens1[:, 0], xS_unlens1[:, 1],
'b--', mfc='blue', mec='blue')
plt.plot(xS_unlens2[:, 0], xS_unlens2[:, 1],
'b--', mfc='blue', mec='blue', alpha=0.2)
plt.plot(xS_lensed1[:, 0], xS_lensed1[:, 1], 'b-')
plt.plot(xS_lensed2[:, 0], xS_lensed2[:, 1], 'b-', alpha=0.2)
plt.plot(xL1_1[:, 0], xL1_1[:, 1], 'g--', mfc='none', mec='green')
plt.plot(xL1_2[:, 0], xL1_2[:, 1], 'g--', mfc='none', mec='green', alpha=0.2)
plt.plot(xL2_1[:, 0], xL2_1[:, 1], 'g--', mfc='none', mec='dark green')
plt.plot(xL2_2[:, 0], xL2_2[:, 1], 'g--', mfc='none', mec='dark green', alpha=0.2)
# Plot closest approach points.
plt.plot(xS_unlens1[t0idx1, 0], xS_unlens1[t0idx1, 1],
'bx', mfc='blue', mec='blue',
label='xS, unlensed, ' + label1)
plt.plot(xS_unlens2[t0idx2, 0], xS_unlens2[t0idx2, 1],
'bx', mfc='blue', mec='blue', alpha=0.2,
label='xS, unlensed, ' + label2)
plt.plot(xS_lensed1[t0idx1, 0], xS_lensed1[t0idx1, 1],
'bo',
label='xS, lensed, ' + label1)
plt.plot(xS_lensed2[t0idx2, 0], xS_lensed2[t0idx2, 1],
'bo', alpha=0.2,
label='xS, lensed, ' + label2)
plt.plot(xL1_1[t0idx1, 0], xL1_1[t0idx1, 1],
'gs', mfc='green', mec='green',
label='Primary lens, ' + label1)
plt.plot(xL1_2[t0idx2, 0], xL1_2[t0idx2, 1],
'gs', mfc='green', mec='green', alpha=0.2,
label='Primary lens, ' + label2)
plt.plot(xL2_1[t0idx1, 0], xL2_1[t0idx1, 1],
'gs', mfc='none', mec='green',
label='Secondary lens, ' + label1)
plt.plot(xL2_2[t0idx2, 0], xL2_2[t0idx2, 1],
'gs', mfc='none', mec='green', alpha=0.2,
label='Secondary lens, ' + label2)
plt.legend(fontsize=10)
plt.gca().invert_xaxis()
plt.xlabel('R.A. (mas)')
plt.ylabel('Dec. (mas)')
# Check just the astrometric shift part.
plt.figure(3)
plt.clf()
plt.plot(t_obs, dxS1[:, 0], 'r--', label='R.A., ' + label1)
plt.plot(t_obs, dxS1[:, 1], 'r-.', label='Dec., ' + label1)
plt.plot(t_obs, dxS2[:, 0], 'b--', label='R.A., ' + label2, alpha=0.2)
plt.plot(t_obs, dxS2[:, 1], 'b-.', label='Dec., ' + label2, alpha=0.2)
plt.legend(fontsize=10)
plt.ylabel('Astrometric Shift (mas)')
plt.xlabel('Time (MJD)')
plt.figure(4)
plt.clf()
plt.plot(dxS1[:, 0], dxS1[:, 1], 'r-', label=label1)
plt.plot(dxS2[:, 0], dxS2[:, 1], 'b-', label=label2, alpha=0.2)
plt.axhline(0, linestyle='--')
plt.axvline(0, linestyle='--')
plt.legend()
plt.gca().invert_xaxis()
plt.xlabel('Shift RA (mas)')
plt.ylabel('Shift Dec (mas)')
plt.axis('equal')
print('Einstein radius: ', psbl1.thetaE_amp, psbl2.thetaE_amp)
# Print some common stuff
print('tE: ', psbl1.tE, psbl2.tE)
print('u0_amp: ', psbl1.u0_amp, psbl2.u0_amp)
return
def test_PSBL_PhotAstrom_noPar_Param2():
"""
General testing of PSBL... caustic crossings.
"""
raL = 259.5
decL = -29.0
t0 = 57000
u0 = 0.3 # in units of Einstein radii
tE = 200.0
piE_E = 0.01
piE_N = -0.01
b_sff = np.array([1.0])
mag_src = np.array([18])
thetaE = 3.0 # in mas
xS0_E = 0.0
xS0_N = 0.01
muS_E = 3.0
muS_N = 0.0
piS = (1.0 / 8000.0) * 1e3 # mas
q = 0.8 # M2 / M1
sep = 3.0 # mas
alpha = 135.0
psbl = model.PSBL_PhotAstrom_noPar_Param2(t0, u0, tE,
thetaE, piS,
piE_E, piE_N,
xS0_E, xS0_N,
muS_E, muS_N,
q, sep, alpha,
b_sff, mag_src,
raL=raL, decL=decL,
root_tol=1e-4)
t_obs = np.arange(56000.0, 58000.0, 3)
plot_PSBL(psbl, t_obs)
# Check that we have some extreme magnifications since this
# is caustic crossing.
phot = psbl.get_photometry(t_obs)
assert phot.min() < 16
return
def test_PSBL_Phot_noPar_Param1():
"""
General testing of PSBL... caustic crossings.
"""
# NOTE this gives the same model as in test_PSBL_Phot_noPar_Param1()
raL = 259.5
decL = -29.0
t0 = 57000
u0 = 0.3 # in units of Einstein radii
tE = 200.0
piE_E = 0.01
piE_N = -0.01
b_sff = np.array([1.0])
mag_src = np.array([18])
q = 0.8 # M2 / M1
#sep = 3e-3 # in arcsec
sep = 1.0 # in Einstein radii
alpha = 135.0 # PA of binary on the sky
phi_piE = np.degrees(np.arctan2(piE_N, piE_E)) # PA of muRel on the sky
phi = alpha - phi_piE # relative angle between binary and muRel.
print('alpha = ', alpha, ' deg')
print('phi_piE = ', phi_piE, ' deg')
print('phi = ', phi, ' deg')
psbl = model.PSBL_Phot_noPar_Param1(t0, u0, tE,
piE_E, piE_N,
q, sep, phi,
b_sff, mag_src,
raL=raL, decL=decL,
root_tol=1e-4)
t_obs = np.arange(56000.0, 58000.0, 3)
plot_PSBL(psbl, t_obs)
# Check that we have some extreme magnifications since this
# is caustic crossing.
phot = psbl.get_photometry(t_obs)
assert phot.max() > 15
return
def test_PSBL_PhotAstrom_Par_Param2():
"""
General testing of PSBL... caustic crossings.
"""
raL = 259.5
decL = -29.0
t0 = 57000
u0 = 0.3 # in units of Einstein radii
tE = 200.0
piE_E = 0.01
piE_N = -0.01
b_sff = np.array([1.0])
mag_src = np.array([18])
thetaE = 3.0 # in mas
xS0_E = 0.0
xS0_N = 0.01
muS_E = 3.0
muS_N = 0.0
piS = (1.0 / 8000.0) * 1e3 # mas
q = 0.8 # M2 / M1
sep = 3.0 # mas
alpha = 135.0
psbl_n = model.PSBL_PhotAstrom_noPar_Param2(t0, u0, tE,
thetaE, piS,
piE_E, piE_N,
xS0_E, xS0_N,
muS_E, muS_N,
q, sep, alpha,
b_sff, mag_src,
raL=raL, decL=decL,
root_tol=1e-4)
psbl_p = model.PSBL_PhotAstrom_Par_Param2(t0, u0, tE,
thetaE, piS,
piE_E, piE_N,
xS0_E, xS0_N,
muS_E, muS_N,
q, sep, alpha,
b_sff, mag_src,
raL=raL, decL=decL,
root_tol=1e-4)
t_obs = np.arange(56000.0, 58000.0, 3)
plot_PSBL_compare(psbl_n, 'No Parallax', psbl_p, 'Parallax', t_obs)
# Check that we have some extreme magnifications since this
# is caustic crossing.
phot1 = psbl_n.get_photometry(t_obs)
phot2 = psbl_p.get_photometry(t_obs)
assert phot1.min() < 16
assert phot2.min() < 16
print('Sep (in thetaE, no par): ', psbl_n.sep / psbl_n.thetaE_amp)
print('Sep (in thetaE, with par): ', psbl_p.sep / psbl_p.thetaE_amp)
print('m1 (in thetaE**2, not mass): ', psbl_n.m1 / (psbl_n.thetaE_amp*1e-3)**2, psbl_p.m1 / (psbl_p.thetaE_amp*1e-3)**2)
print('m2 (in thetaE**2, not mass): ', psbl_n.m2 / (psbl_n.thetaE_amp*1e-3)**2, psbl_p.m2 / (psbl_p.thetaE_amp*1e-3)**2)
##########
# Recalculate u calculation from complex_pos() to debug.
##########
# Calculate the position of the source w.r.t. lens (in Einstein radii)
# Distance along muRel direction
tau = (t_obs - psbl_p.t0) / psbl_p.tE
tau = tau.reshape(len(tau), 1)
# Distance along u0 direction -- always constant with time.
u0 = psbl_p.u0.reshape(1, len(psbl_p.u0))
thetaE_hat = psbl_p.thetaE_hat.reshape(1, len(psbl_p.thetaE_hat))
# Total distance
u = u0 + tau * thetaE_hat
# Incorporate parallax
parallax_vec = model.parallax_in_direction(psbl_p.raL, psbl_p.decL, t_obs)
u -= psbl_p.piE_amp * parallax_vec
t0dx = np.argmin(np.abs(tau))
print('u = ')
print(u[t0dx-5:t0dx+5, :])
w, z1, z2 = psbl_p.get_complex_pos(t_obs)
comp = psbl_p.get_complex_pos(t_obs)
images_p, amps_p = psbl_p.get_all_arrays(t_obs)
amp_arr_msk = np.ma.masked_invalid(amps_p)
amp = np.sum(amp_arr_msk, axis=1)
#print(images_p[t0dx-5:t0dx+5])
# print(amps_p[t0dx-5:t0dx+5])
# Get the astrometry in the lens rest frame in units of thetaE
xL = psbl_p.get_lens_astrometry(t_obs) # in arcsec
xL1, xL2 = psbl_p.get_resolved_lens_astrometry(t_obs) # in arcsec
xS_u = psbl_p.get_astrometry_unlensed(t_obs) # in arcsec
u2 = (xS_u - xL) / (psbl_p.thetaE_amp * 1e-3) # -- this should basically be u
w_new = u2
z1_new = (xL1 - xL) / (psbl_p.thetaE_amp * 1e-3)
z2_new = (xL2 - xL) / (psbl_p.thetaE_amp * 1e-3)
print('w: ')
print(w[t0dx-5:t0dx+5] / (psbl_p.thetaE_amp * 1e-3))
print(w_new[t0dx-5:t0dx+5])
print('z1: ')
print(z1[t0dx-5:t0dx+5] / (psbl_p.thetaE_amp * 1e-3))
print(z1_new[t0dx-5:t0dx+5])
print('z12 ')
print(z2[t0dx-5:t0dx+5] / (psbl_p.thetaE_amp * 1e-3))
print(z2_new[t0dx-5:t0dx+5])
print('u2 = ')
print(u2[t0dx-5:t0dx+5])
return
def test_PSBL_Phot_Par_Param1():
"""
General testing of PSBL... caustic crossings.
"""
# NOTE this gives the same model as in test_PSBL_Phot_noPar_Param1()
raL = 259.5
decL = -29.0
t0 = 57000
u0 = 0.3 # in units of Einstein radii
tE = 200.0
piE_E = 0.01
piE_N = -0.01
b_sff = np.array([1.0])
mag_src = np.array([18])
q = 0.8 # M2 / M1
sep = 1.0 # in Einstein radii
alpha = 135.0 # PA of binary on the sky
phi_piE = np.degrees(np.arctan2(piE_N, piE_E)) # PA of muRel on the sky
phi = alpha - phi_piE # relative angle between binary and muRel.
print('alpha = ', alpha, ' deg')
print('phi_piE = ', phi_piE, ' deg')
print('phi = ', phi, ' deg')
psbl_n = model.PSBL_Phot_noPar_Param1(t0, u0, tE,
piE_E, piE_N,
q, sep, phi,
b_sff, mag_src,
raL=raL, decL=decL,
root_tol=1e-4)
psbl_p = model.PSBL_Phot_Par_Param1(t0, u0, tE,
piE_E, piE_N,
q, sep, phi,
b_sff, mag_src,
raL=raL, decL=decL,
root_tol=1e-4)
t_obs = np.arange(56000.0, 58000.0, 3)
plot_PSBL_compare(psbl_n, 'No Parallax', psbl_p, 'Parallax', t_obs)
# Check that we have some extreme magnifications since this
# is caustic crossing.
phot1 = psbl_n.get_photometry(t_obs)
phot2 = psbl_p.get_photometry(t_obs)
assert phot1.min() < 16
assert phot2.min() < 16
print('Sep (in thetaE, no par): ', psbl_n.sep)
print('Sep (in thetaE, with par): ', psbl_p.sep)
print('m1 (in thetaE**2, not mass): ', psbl_n.m1, psbl_p.m1)
print('m2 (in thetaE**2, not mass): ', psbl_n.m2, psbl_p.m2)
##########
# Recalculate u calculation from complex_pos() to debug.
##########
# Calculate the position of the source w.r.t. lens (in Einstein radii)
# Distance along muRel direction
tau = (t_obs - psbl_p.t0) / psbl_p.tE
tau = tau.reshape(len(tau), 1)
# Distance along u0 direction -- always constant with time.
u0 = psbl_p.u0.reshape(1, len(psbl_p.u0))
thetaE_hat = psbl_p.thetaE_hat.reshape(1, len(psbl_p.thetaE_hat))
# Total distance
u = u0 + tau * thetaE_hat
# Incorporate parallax
parallax_vec = model.parallax_in_direction(psbl_p.raL, psbl_p.decL, t_obs)
u -= psbl_p.piE_amp * parallax_vec
t0dx = np.argmin(np.abs(tau))
print('u = ')
print(u[t0dx-5:t0dx+5, :])
w, z1, z2 = psbl_p.get_complex_pos(t_obs)
images_p, amps_p = psbl_p.get_all_arrays(t_obs)
amp_arr_msk = np.ma.masked_invalid(amps_p)
amp = np.sum(amp_arr_msk, axis=1)
print('w: ')
print(w[t0dx-5:t0dx+5])
print('z1: ')
print(z1[t0dx-5:t0dx+5])
print('z2: ')
print(z2[t0dx-5:t0dx+5])
return
def test_PSBL_phot_vs_pyLIMA():
from pyLIMA import microlmodels
from pyLIMA import event
from pyLIMA import telescopes
from pyLIMA import microltoolbox
from pyLIMA import microlmodels
# Parameters -- common to ours and pyLIMA
t0 = 55775.0
u0_amp = 0.5
tE = 60.0
mag_src = 16
b_sff = 0.5
q = 1.0
sep = 0.6
phi = 125.0
res = plot_compare_vs_pylima(t0, u0_amp, tE, mag_src, b_sff, q, sep, phi)
t_mjd, mag_pyl, mag_our, max_delta = res
assert max_delta < 1e-6
# Change t0:
t0_new = 56000.0
res = plot_compare_vs_pylima(t0_new, u0_amp, tE, mag_src, b_sff, q, sep, phi)
t_mjd, mag_pyl, mag_our, max_delta = res
assert max_delta < 1e-6
# Change u0_amp:
u0_amp_new = 0.4
res = plot_compare_vs_pylima(t0_new, u0_amp_new, tE, mag_src, b_sff, q, sep, phi)
t_mjd, mag_pyl, mag_our, max_delta = res
assert max_delta < 1e-6
# Change tE:
tE_new = 120.0
res = plot_compare_vs_pylima(t0, u0_amp, tE_new, mag_src, b_sff, q, sep, phi)
t_mjd, mag_pyl, mag_our, max_delta = res
assert max_delta < 1e-6
# Change sep:
sep_new = 0.3
res = plot_compare_vs_pylima(t0, u0_amp, tE, mag_src, b_sff, q, sep_new, phi)
t_mjd, mag_pyl, mag_our, max_delta = res
assert max_delta < 1e-6
# Change phi:
phi_new = 0.3
res = plot_compare_vs_pylima(t0, u0_amp, tE, mag_src, b_sff, q, sep, phi_new)
t_mjd, mag_pyl, mag_our, max_delta = res
assert max_delta < 1e-6
# Change mag_src:
mag_src_new = 18
res = plot_compare_vs_pylima(t0, u0_amp, tE, mag_src_new, b_sff, q, sep, phi)
t_mjd, mag_pyl, mag_our, max_delta = res
assert max_delta < 1e-6
# Change b_sff:
b_sff_new = 0.5
res = plot_compare_vs_pylima(t0, u0_amp, tE, mag_src, b_sff_new, q, sep, phi)
t_mjd, mag_pyl, mag_our, max_delta = res
assert max_delta < 1e-6
# Change q
q_new = 0.8
res = plot_compare_vs_pylima(t0, u0_amp, tE, mag_src, b_sff, q_new, sep, phi)
t_mjd, mag_pyl, mag_our, max_delta = res
assert max_delta < 1e-6
return
def plot_compare_vs_pylima(t0, u0_amp, tE, mag_src, b_sff, q, sep, phi, piEE=0.1, piEN=0.1):
"""All input values are our model definitions.
Note piEE and piEN should be arbitrary. But might want to check just in case.
"""
from pyLIMA import microlmodels
from pyLIMA import event
from pyLIMA import telescopes
from pyLIMA import microltoolbox
from pyLIMA import microlmodels
phi_rad = np.radians(phi)
# These are arbitrary in the Phot-noParallax model.
piEE = 0.1
piEN = 0.1
# Our --> PyLIMA conversions
pylima_q = 1.0 / q
q_prime = (1.0 - q) / (2.0 * (1 + q))
pylima_u0 = u0_amp + q_prime * sep * np.sin(phi_rad)
pylima_t0 = t0 + q_prime * sep * tE * np.cos(phi_rad)
# Note that pylima_phi = phi
log_q = np.log10(pylima_q)
log_s = np.log10(sep)
# Load up some artificial data for pyLIMA... need this for time array definition.
pylima_data = np.loadtxt(os.path.dirname(model.__file__) + '/tests/OB120169_phot.dat')
pylima_data[:, 1] = 1e5
time_jd = pylima_data[:, 0]
time_mjd = time_jd - 2400000.5
pylima_tel = telescopes.Telescope(name='OGLE', camera_filter='I', light_curve_flux=pylima_data)
pylima_ev = event.Event()
pylima_ev.name = 'Fubar'
pylima_ev.telescopes.append(pylima_tel)
pylima_mod = microlmodels.create_model('PSBL', pylima_ev)
pylima_mod.define_model_parameters()
pylima_mod.blend_flux_ratio = False
tmp_params = [pylima_t0 + 2400000.5, pylima_u0, tE, log_s, log_q, phi_rad]
pylima_par = pylima_mod.compute_pyLIMA_parameters(tmp_params)
pylima_par.fs_OGLE = microltoolbox.magnitude_to_flux(mag_src)
pylima_par.fb_OGLE = pylima_par.fs_OGLE * (1.0 - b_sff) / b_sff
pylima_amp = pylima_mod.model_magnification(pylima_tel, pylima_par)
pylima_lcurve, sf, bf = pylima_mod.compute_the_microlensing_model(pylima_tel, pylima_par)
pylima_lcurve_mag = microltoolbox.flux_to_magnitude(pylima_lcurve)
# Compute our model
psbl = model.PSBL_Phot_noPar_Param1(t0, u0_amp, tE, piEE, piEN, q, sep, phi,
[b_sff], [mag_src], root_tol=1e-8)
our_mag = psbl.get_photometry(time_mjd)
plt.figure(1, figsize=(11, 6))
plt.clf()
f1 = plt.gcf().add_axes([0.4, 0.35, 0.57, 0.6])
f2 = plt.gcf().add_axes([0.4, 0.15, 0.57, 0.2])
f1.get_shared_x_axes().join(f1, f2)
f1.set_xticklabels([])
f1.plot(time_mjd, pylima_lcurve_mag, 'ko', label='pyLIMA')
f1.plot(time_mjd, our_mag, 'r.', label='Ours')
f1.invert_yaxis()
f1.set_xlabel('MJD (day)')
f1.set_ylabel('I (mag)')
f1.legend()
f2.plot(time_mjd, pylima_lcurve_mag - our_mag, 'k.')
f2.set_xlabel('MJD (day)')
f2.set_ylabel('PyL-Ours')
tleft = 0.03
ttop = 0.7
ttstep = 0.05
fig = plt.gcf()
fig.text(tleft, ttop - 0*ttstep, 't0 = {0:.1f} (MJD)'.format(t0), fontsize=12)
fig.text(tleft, ttop - 1*ttstep, 't0_pyL = {0:.1f} (MJD)'.format(pylima_t0), fontsize=12)
fig.text(tleft, ttop - 2*ttstep, 'u0 = {0:.3f}'.format(u0_amp), fontsize=12)
fig.text(tleft, ttop - 3*ttstep, 'u0_pyL = {0:.3f}'.format(pylima_u0), fontsize=12)
fig.text(tleft, ttop - 4*ttstep, 'tE = {0:.1f} (day)'.format(tE), fontsize=12)
fig.text(tleft, ttop - 5*ttstep, 'q = {0:.5f}'.format(q), fontsize=12)
fig.text(tleft, ttop - 6*ttstep, 'q_pyL = {0:.5f}'.format(pylima_q), fontsize=12)
fig.text(tleft, ttop - 7*ttstep, 'sep = {0:.5f}'.format(sep), fontsize=12)
fig.text(tleft, ttop - 8*ttstep, 'phi = {0:.1f}'.format(phi), fontsize=12)
fig.text(tleft, ttop - 9*ttstep, 'b_sff = {0:.2f}'.format(b_sff), fontsize=12)
fig.text(tleft, ttop - 10*ttstep, 'mag_src = {0:.1f}'.format(mag_src), fontsize=12)
max_delta = np.max(np.abs(pylima_lcurve_mag - our_mag))
if max_delta > 1e-6:
fig.text(tleft, 0.05, '!!BAD!!', fontsize=16, color='red')
plt.savefig('PSBL_phot_vs_pyLIMA.png')
return (time_mjd, pylima_lcurve_mag, our_mag, max_delta)
def test_PSPL_phot_vs_pyLIMA():
from pyLIMA import microlmodels
from pyLIMA import event
from pyLIMA import telescopes
from pyLIMA import microltoolbox
from pyLIMA import microlmodels
# Parameters -- common to ours and pyLIMA
ra = 267.4640833333333
dec = -34.62555555555556
t0 = 55775.0
u0_amp = 0.5
tE = 200.0
piEE = 0.5
piEN = -0.1
mag_src = 16
b_sff = 0.5
# res = plot_compare_vs_pylima_pspl(ra, dec, t0, u0_amp, tE, piEE, piEN, mag_src, b_sff, parallax=False)
# t_mjd, mag_pyl, mag_our, max_delta = res
# assert max_delta < 1e-6
res = plot_compare_vs_pylima_pspl(ra, dec, t0, u0_amp, tE, piEE, piEN, mag_src, b_sff, parallax=True)
t_mjd, mag_pyl, mag_our, max_delta = res
assert max_delta < 1e-6
return
def plot_compare_vs_pylima_pspl(ra, dec, t0, u0_amp, tE, piEE, piEN, mag_src, b_sff, parallax=True):
"""
Compare pyLIMA to our models with some plots.
Input parameters are in our conventions and heliocentric coordinate system.
"""
from pyLIMA import microlmodels
from pyLIMA import event
from pyLIMA import telescopes
from pyLIMA import microltoolbox
from pyLIMA import microlmodels
# To convert between pyLIMA (geocentric) and Ours (heliocentric), we will
# need some info about the Sun's position and velocity.
# First define the reference time: We will use our t0 as the reference time.
t0_par = t0 + 2400000.5 # JD
t0_ref = t0_par
#t0_ref += 10.0
####
# Full Astropy Version
####
# Make a temporary muRel
from astropy.coordinates import SkyCoord, GCRS, FK5
from astropy.time import TimeJD, Time
from astropy.coordinates import solar_system_ephemeris
# muRel_helio = 3.0 * u.mas / u.yr
# dist = 6.0 * u.kpc
# targ = SkyCoord(ra * u.deg, dec * u.deg, distance=dist,
# pm_ra_cosdec = piEE*muRel_helio,
# pm_dec = piEN*muRel_helio,
# radial_velocity = 0 * u.km / u.s,
# frame=FK5(equinox=Time(2000, format='jyear')))
# solar_system_ephemeris.set('builtin')
# foo = targ.transform_to(GCRS(obstime=Time(t0_ref, format='jd', scale='tai')))
# print('targ = ', targ)
# print('foo = ', foo)
# Above is useless because it includes the whole velocity on the sky.
# Get the position and velocity of the Earth in the barycentric frame.
# with solar_system_ephemeris.set('jpl'):
with solar_system_ephemeris.set('builtin'):
time_jd_reference = Time(t0_ref, format='jd')
Earth_pos_t0_par = get_body_barycentric_posvel('Earth', time_jd_reference)
print('Earth pos (bary) = ', Earth_pos_t0_par[0])
print('Earth vel (bary) = ', Earth_pos_t0_par[1])
# This is the position of the Sun w.r.t. to the Earth at time t0 (in geocentric frame)
Sun_pos_t0_par = -Earth_pos_t0_par[0]
Sun_vel_t0_par = -Earth_pos_t0_par[1]
# Project sidereal RA and Dec into geocentric equatorial rectangular coordinates.
# See "Rectangular coordinates" section here:
# https://en.wikipedia.org/wiki/Equatorial_coordinate_system
# This code matches pyLIMA.
target_angles_in_the_sky = [ra * np.pi / 180, dec * np.pi / 180]
Target = np.array(
[np.cos(target_angles_in_the_sky[1]) * np.cos(target_angles_in_the_sky[0]),
np.cos(target_angles_in_the_sky[1]) * np.sin(target_angles_in_the_sky[0]),
np.sin(target_angles_in_the_sky[1])])
East = np.array([-np.sin(target_angles_in_the_sky[0]),
np.cos(target_angles_in_the_sky[0]),
0.0])
North = np.cross(Target, East)
print('East (geo) = ', East)
print('North (geo) = ', North)
Earth_vel_t0_par_proj = np.array([np.dot(Earth_pos_t0_par[1].xyz.to('AU/day').value, East),
np.dot(Earth_pos_t0_par[1].xyz.to('AU/day').value, North)])
print('Earth vel (EN projected, AU/day) = ', Earth_vel_t0_par_proj)
au_day_to_km_s = 1731.45683
print('Earth vel (EN projected, km/s) = ', Earth_vel_t0_par_proj * au_day_to_km_s)
# Unit vector pointing towards the Sun (projected onto the sky):
Sun_pos_t0_par_proj = np.array([np.dot(Sun_pos_t0_par.xyz.to('AU').value, East),
np.dot(Sun_pos_t0_par.xyz.to('AU').value, North)])
Sun_pos_t0_par_proj_unit = Sun_pos_t0_par_proj / np.linalg.norm( Sun_pos_t0_par_proj )
# Project velocity from RA and Dec into geocentric equatorial rectangular coordinates.
# Note units will be in AU / day.
Sun_vel_t0_par_proj = np.array([np.dot(Sun_vel_t0_par.xyz.to('AU/day').value, East),
np.dot(Sun_vel_t0_par.xyz.to('AU/day').value, North)])
print('Sun pos (geo) = ', Sun_pos_t0_par)
print('Sun pos (geo, EN projected) = ', Sun_pos_t0_par_proj)
print('Sun pos (geo, EN projected, unit) = ', Sun_pos_t0_par_proj_unit)
print()
# Compare this projected Sun vector to the parallax vector we use in our code.
par_vec = model.parallax_in_direction(ra, dec, np.array([t0]))
print('Parallax Vector from PSPL = ', par_vec)
print()
print('Sun vel (geo)= ', Sun_vel_t0_par)
print('Sun vel (geo, EN projected (AU/day)) = ', Sun_vel_t0_par_proj)
print('Sun vel (geo, EN projected (km/s)) = ', Sun_vel_t0_par_proj * au_day_to_km_s)
#####
# Load up some artificial data for pyLIMA... need this for time array definition.
#####
pylima_data = np.loadtxt(os.path.dirname(model.__file__) + '/tests/OB120169_phot.dat')
pylima_data[:, 1] = 1e5
time_jd = pylima_data[:, 0]
time_mjd = time_jd - 2400000.5
#####
# Compute our model
#####
print('Making our model')
if parallax:
pspl = model.PSPL_Phot_Par_Param1(t0, u0_amp, tE, piEE, piEN,
[b_sff], [mag_src], raL=ra, decL=dec)
else:
pspl = model.PSPL_Phot_noPar_Param1(t0, u0_amp, tE, piEE, piEN,
[b_sff], [mag_src], raL=ra, decL=dec)
our_mag = pspl.get_photometry(time_mjd)
# our_xL = pspl.get_lens_astrometry(time_mjd)
# our_xS = pspl.get_astrometry_unlensed(time_mjd)
# our_u = our_xS - our_xL
#####
# Compute pyLIMA model
#####
piE = pspl.piE_amp
# Sun_vel_t0_par_proj is in units of AU / days.
# Convert to unitless by multiplying:
# Sun_vel_t0_par_proj [AU / day] * tE [days] * piE^2 / 1 AU
# tmpEE_geo = piEE - (Sun_vel_t0_par_proj[0] * tE * piE**2) # unitless.
# tmpEN_geo = piEN - (Sun_vel_t0_par_proj[1] * tE * piE**2)
tmpEE_geo = piEE - (Sun_vel_t0_par_proj[0] * tE * piE) # unitless.
tmpEN_geo = piEN - (Sun_vel_t0_par_proj[1] * tE * piE)
tmp_geo = np.hypot(tmpEE_geo, tmpEN_geo)
print('tmp_geo = ', tmp_geo)
print('piE = ', piE)
piEE_geo = tmpEE_geo * piE / tmp_geo
piEN_geo = tmpEN_geo * piE / tmp_geo
print('piE_vec helio = ', piEE, piEN)
print('piE_vec geo = ', piEE_geo, piEN_geo)
tE_geo = tE * piE / tmp_geo
print()
# u0_vec_geo = pspl.u0 + (((t0_ref - t0_par)/tE)*pspl.thetaE_hat) - (piE * par_vec)
u0_vec_geo = pspl.u0 + (((t0_ref - t0_par)/tE)*pspl.thetaE_hat) - (piE * Sun_pos_t0_par_proj_unit)
u0_geo = np.linalg.norm(u0_vec_geo)
print('u0_vec_geo = ', u0_vec_geo)
print('pspl.u0 = ', pspl.u0)
print('u0_geo = ', u0_geo)
print('Making pyLIMA model')
pylima_u0 = u0_geo
pylima_t0 = t0_ref
pylima_piEE = -piEE_geo
pylima_piEN = -piEN_geo
pylima_tE = tE_geo
pylima_tel = telescopes.Telescope(name='OGLE', camera_filter='I', light_curve_flux=pylima_data)
pylima_ev = event.Event()
pylima_ev.name = 'Fubar'
pylima_ev.telescopes.append(pylima_tel)
pylima_ev.ra = ra
pylima_ev.dec = dec
if parallax:
pylima_mod = microlmodels.create_model('PSPL', pylima_ev, parallax=['Annual', pylima_t0 ])
tmp_params = [pylima_t0, pylima_u0, pylima_tE, pylima_piEN, pylima_piEE]
else:
pylima_mod = microlmodels.create_model('PSPL', pylima_ev)
tmp_params = [pylima_t0, pylima_u0, pylima_tE]
pylima_mod.define_model_parameters()
pylima_mod.blend_flux_ratio = False
pylima_par = pylima_mod.compute_pyLIMA_parameters(tmp_params)
pylima_par.fs_OGLE = microltoolbox.magnitude_to_flux(mag_src)
pylima_par.fb_OGLE = pylima_par.fs_OGLE * (1.0 - b_sff) / b_sff
print('piEE: pylima = ', pylima_par.piEE, ' ours = ', piEE)
print('piEN: pylima = ', pylima_par.piEN, ' ours = ', piEN)
pylima_amp = pylima_mod.model_magnification(pylima_tel, pylima_par)
pylima_lcurve, sf, bf = pylima_mod.compute_the_microlensing_model(pylima_tel, pylima_par)
pylima_lcurve_mag = microltoolbox.flux_to_magnitude(pylima_lcurve)
pylima_x, pylima_y, pylima_s = pylima_mod.source_trajectory(pylima_tel,
pylima_t0,
pylima_u0,
pylima_tE,
pylima_par)
plt.figure(1, figsize=(11, 6))
plt.clf()
f1 = plt.gcf().add_axes([0.4, 0.35, 0.57, 0.6])
f2 = plt.gcf().add_axes([0.4, 0.15, 0.57, 0.2])
f1.get_shared_x_axes().join(f1, f2)
f1.set_xticklabels([])
f1.plot(time_mjd, pylima_lcurve_mag, 'ko', label='pyLIMA')
f1.plot(time_mjd, our_mag, 'r.', label='Ours')
f1.invert_yaxis()
f1.set_xlabel('MJD (day)')
f1.set_ylabel('I (mag)')
f1.legend()
f2.plot(time_mjd, pylima_lcurve_mag - our_mag, 'k.')
f2.set_xlabel('MJD (day)')
f2.set_ylabel('PyL-Ours')
tleft = 0.03
ttop = 0.8
ttstep = 0.05
fig = plt.gcf()
fig.text(tleft, ttop - 0*ttstep, 'raL = {0:.2f} (deg)'.format(ra), fontsize=12)
fig.text(tleft, ttop - 1*ttstep, 'decL = {0:.2f} (deg)'.format(dec), fontsize=12)
fig.text(tleft, ttop - 2*ttstep, 't0 = {0:.1f} (MJD)'.format(t0), fontsize=12)
fig.text(tleft, ttop - 3*ttstep, 'u0 = {0:.3f}'.format(u0_amp), fontsize=12)
fig.text(tleft, ttop - 4*ttstep, 'u0_pyL = {0:.3f}'.format(pylima_u0), fontsize=12)
fig.text(tleft, ttop - 5*ttstep, 'tE = {0:.1f} (day)'.format(tE), fontsize=12)
fig.text(tleft, ttop - 6*ttstep, 'tE_pyL = {0:.1f} (day)'.format(pylima_tE), fontsize=12)
fig.text(tleft, ttop - 7*ttstep, 'piEE (S-L) = {0:.4f}'.format(piEE), fontsize=12)
fig.text(tleft, ttop - 8*ttstep, 'piEE_pyL (L-S) = {0:.4f}'.format(pylima_piEE), fontsize=12)
fig.text(tleft, ttop - 9*ttstep, 'piEN (S-L) = {0:.4f}'.format(piEN), fontsize=12)
fig.text(tleft, ttop - 10*ttstep, 'piEN_pyL (L-S) = {0:.4f}'.format(pylima_piEN), fontsize=12)
fig.text(tleft, ttop - 11*ttstep, 'b_sff = {0:.2f}'.format(b_sff), fontsize=12)
fig.text(tleft, ttop - 12*ttstep, 'mag_src = {0:.1f}'.format(mag_src), fontsize=12)
max_delta = np.max(np.abs(pylima_lcurve_mag - our_mag))
if max_delta > 1e-6:
fig.text(tleft, 0.05, '!!BAD!!', fontsize=16, color='red')
plt.savefig('PSPL_phot_vs_pyLIMA.png')
# plt.figure(2, figsize=(11, 6))
# plt.clf()
# f3 = plt.gcf().add_axes([0.4, 0.60, 0.57, 0.3])
# f4 = plt.gcf().add_axes([0.4, 0.15, 0.57, 0.3])
# f3.get_shared_x_axes().join(f3, f4)
# f3.plot(time_mjd, pylima_x, 'ko', label='pyLIMA')
# f3.plot(time_mjd, our_u[:, 0], 'r.', label='Ours')
# f4.plot(time_mjd, pylima_y, 'ko', label='pyLIMA')
# f4.plot(time_mjd, our_u[:, 1], 'r.', label='Ours')
return (time_mjd, pylima_lcurve_mag, our_mag, max_delta)
def test_u0_hat_thetaE_hat():
"""
Tests for:
u0_hat_from_thetaE_hat()
thetaE_hat_from_u0_hat()
"""
# Tests the current code implementation:
# Defines beta = u0_amp sign convention
# opposite of how Andy Gould (2004) does. Ours has:
# beta > 0 means u0_E > 0
# u0_amp > 0 mean u0_E > 0
#
E_hat = np.array([1.0, 0.0])
N_hat = np.array([0.0, 1.0])
##########
# Test 1:
# u0 sign: +, +
# muRel sign: +, -
##########
u0_hatE_in = 0.3
u0_hatN_in = (1.0 - u0_hatE_in**2)**0.5
u0_hat_in = np.array([u0_hatE_in, u0_hatN_in])
# direction of relative proper motion vector
# Same as thetaE_hat
muRel_hatE_in = (1.0 - u0_hatE_in**2)**0.5
muRel_hatN_in = -0.3
muRel_hat_in = np.array([muRel_hatE_in, muRel_hatN_in])
# Should be positive.
# in units of thetaE, opposite sign as beta???? NOT SURE ANYMORE.
u0amp_in = np.hypot(u0_hatE_in, u0_hatN_in) * np.cross(u0_hat_in, N_hat) * 1.0
# Test
u0_hat = model.u0_hat_from_thetaE_hat(muRel_hat_in, u0amp_in)
assert u0_hat[0] == u0_hat_in[0]
assert u0_hat[1] == u0_hat_in[1]
assert np.sign(u0_hat[0]) == np.sign(u0amp_in)
##########
# Test 2
# u0 sign: -, +
# muRel sign: +, +
##########
u0_hatE_in = -0.3
u0_hatN_in = (1.0 - u0_hatE_in**2)**0.5
u0_hat_in = np.array([u0_hatE_in, u0_hatN_in])
# direction of relative proper motion vector
# Same as thetaE_hat
muRel_hatE_in = (1.0 - u0_hatE_in**2)**0.5
muRel_hatN_in = 0.3
muRel_hat_in = np.array([muRel_hatE_in, muRel_hatN_in])
# Should be negative.
u0amp_in = np.hypot(u0_hatE_in, u0_hatN_in) * np.cross(u0_hat_in, N_hat) * 1.0
# Test
u0_hat = model.u0_hat_from_thetaE_hat(muRel_hat_in, u0amp_in)
assert u0_hat[0] == u0_hat_in[0]
assert u0_hat[1] == u0_hat_in[1]
assert np.sign(u0_hat[0]) == np.sign(u0amp_in)
##########
# Test 3
# u0 sign: -, -
# muRel sign: -, +
##########
u0_hatE_in = -0.3
u0_hatN_in = -(1.0 - u0_hatE_in**2)**0.5
u0_hat_in = np.array([u0_hatE_in, u0_hatN_in])
# direction of relative proper motion vector
# Same as thetaE_hat
muRel_hatE_in = -(1.0 - u0_hatE_in**2)**0.5
muRel_hatN_in = 0.3
muRel_hat_in = np.array([muRel_hatE_in, muRel_hatN_in])
# Should be negative.
u0amp_in = np.hypot(u0_hatE_in, u0_hatN_in) * np.cross(u0_hat_in, N_hat) * 1.0
# Test
u0_hat = model.u0_hat_from_thetaE_hat(muRel_hat_in, u0amp_in)
assert u0_hat[0] == u0_hat_in[0]
assert u0_hat[1] == u0_hat_in[1]
assert np.sign(u0_hat[0]) == np.sign(u0amp_in)
##########
# Test 4
# u0 sign: +, -
# muRel sign: +, +
##########
u0_hatE_in = 0.3
u0_hatN_in = -(1.0 - u0_hatE_in**2)**0.5
u0_hat_in = np.array([u0_hatE_in, u0_hatN_in])
# direction of relative proper motion vector
# Same as thetaE_hat
muRel_hatE_in = (1.0 - u0_hatE_in**2)**0.5
muRel_hatN_in = 0.3
muRel_hat_in = np.array([muRel_hatE_in, muRel_hatN_in])
# Should be negative.
u0amp_in = np.hypot(u0_hatE_in, u0_hatN_in) * np.cross(u0_hat_in, N_hat) * 1.0
# Test
u0_hat = model.u0_hat_from_thetaE_hat(muRel_hat_in, u0amp_in)
assert u0_hat[0] == u0_hat_in[0]
assert u0_hat[1] == u0_hat_in[1]
assert np.sign(u0_hat[0]) == np.sign(u0amp_in)
return
def test_PSBL_get_photometry_nans():
# This set of parameters reproduces the problem.
raL = 255.9785152922
decL = -26.7699679331
mL1 = 0.5449857890
mL2 = 0.2513479648
t0 = -445.5166414077
xS0 = [0, 0]
beta = 63.1888232379
muL = [-5.64504014, -5.63716286]
muS = [-4.60154964, -5.37112324]
dL = 6971.8480854741
dS = 13330.5805517047
sep = 205.1250722516
alpha = -496.2173351517
mag_src = 10.6950225830
b_sff = 0.0005696291
psbl = model.PSBL_PhotAstrom_Par_Param1(mL1, mL2, t0, xS0[0], xS0[1],
beta, muL[0], muL[1], muS[0], muS[1], dL, dS,
sep, alpha, [b_sff], [mag_src],
raL=raL, decL=decL, root_tol=1e-8)
print(f't0 = {psbl.t0:.1f} MJD')
print(f'tE = {psbl.tE:.1f} days')
duration = 100 # tE
time_steps = 5000
tmin = psbl.t0 - ((duration / 2.0) * psbl.tE)
tmax = psbl.t0 + ((duration / 2.0) * psbl.tE)
dt = np.linspace(tmin, tmax, time_steps)
dt = dt[1780:1788]
img, amp = psbl.get_all_arrays(dt)
phot = psbl.get_photometry(dt, amp_arr=amp)
# print('dt = ', dt)
# print('poht = ', phot)
# print('amp = ')
# print(amp)
# Check that we have both NaNs and valid values
# in our amplifications for testing.
idx_nan = np.where(np.isnan(amp).sum(axis=1) == 5)[0]
print(amp.shape)
assert len(idx_nan) > 0
assert len(idx_nan) != amp.shape[0]
# Check that the amp=nans are returned as masked.
assert np.sum(phot.mask[idx_nan]) == len(idx_nan)
# Check that the data itself has nan (not junk values)
# print(phot)
# print(phot.data)
# print(phot.mask)
# print(phot.data[idx_nan[0]])
assert np.isnan(phot.data[idx_nan]).sum() == len(idx_nan)
return
def test_PSBL_too_many_peaks3():
raL = 267.9828892936
decL = -26.4253612405
mL1 = 1.1694705685
mL2 = 6.8748978010
t0 = 272.1230025420
xS0 = [0, 0]
beta = 56.9742058606
muL = [-3.19572701, -5.71742749]
muS = [-3.50599981, -6.20537068]
dL = 3693.8211092591
dS = 8293.3433805508
sep = 0.9251665444
alpha = 147.6
mag_src = 5.1452097893
b_sff = 0.2092014345
psbl = model.PSBL_PhotAstrom_Par_Param1(mL1, mL2, t0, xS0[0], xS0[1],
beta, muL[0], muL[1], muS[0], muS[1], dL, dS,
sep, alpha, [b_sff], [mag_src],
raL=raL, decL=decL, root_tol=1e-8)
print(f't0 = {psbl.t0:.1f} MJD')
print(f'tE = {psbl.tE:.1f} days')
# Print out some angles to see when things might be a problem.
phi_muRel = np.degrees(np.arctan2(muS[1] - muL[1], muS[0] - muL[0]) - np.arctan2(1, 0))
phi = alpha - phi_muRel
print(f'phi_muRel = {phi_muRel} deg')
print(f'phi = {phi} deg')
print(f'alpha = {alpha} deg')
duration = 100 # tE
time_steps = 5000
tmin = psbl.t0 - 5000
tmax = psbl.t0 + 5000
dt = np.linspace(tmin, tmax, time_steps)
img_arr, amp_arr = psbl.get_all_arrays(dt)
phot = psbl.get_photometry(dt, amp_arr=amp_arr)
plt.figure(1)
plt.clf()
plt.plot(dt, phot)
ax = plt.gca()
ax.invert_yaxis()
ax.ticklabel_format(useOffset=False)
return
def test_PSBL_too_many_peaks_621():
raL = 255.9785152922
decL = -26.7699679331
mL1 = 0.5449857890
mL2 = 0.2513479648
t0 = -445.5166414077
xS0 = [0, 0]
beta = 63.1888232379
muL = [-5.64504014, -5.63716286]
muS = [-4.60154964, -5.37112324]
dL = 6971.8480854741
dS = 13330.5805517047
sep = 205.1250722516
alpha = -496.2173351517
mag_src = 10.6950225830
b_sff = 0.0005696291
psbl = model.PSBL_PhotAstrom_Par_Param1(mL1, mL2, t0, xS0[0], xS0[1],
beta, muL[0], muL[1], muS[0], muS[1], dL, dS,
sep, alpha, [b_sff], [mag_src],
raL=raL, decL=decL, root_tol=1e-8)
print(f't0 = {psbl.t0:.1f} MJD')
print(f'tE = {psbl.tE:.1f} days')
# Print out some angles to see when things might be a problem.
phi_muRel = np.degrees(np.arctan2(muS[1] - muL[1], muS[0] - muL[0]) - np.arctan2(1, 0))
phi = alpha - phi_muRel
print(f'phi_muRel = {phi_muRel} deg')
print(f'phi = {phi} deg')
print(f'alpha = {alpha} deg')
duration = 100 # tE
time_steps = 100
tmin = psbl.t0 - 2
tmax = psbl.t0 + 2
# time_steps = 10000
# tmin = psbl.t0 - 1000
# tmax = psbl.t0 + 1000
dt = np.linspace(tmin, tmax, time_steps)
img_arr, amp_arr = psbl.get_all_arrays(dt)
phot = psbl.get_photometry(dt, amp_arr=amp_arr)
print(amp_arr[0:5], img_arr[0:5])
plt.figure(1)
plt.clf()
plt.plot(dt, phot, 'k.-')
ax = plt.gca()
ax.invert_yaxis()
ax.ticklabel_format(useOffset=False)
return
# Testing GP classes.
def test_GP_classes(plot=False):
"""
Make sure can instantiate.
"""
t0 = 57000
u0_amp = 0.5
tE = 150
piE_E = 0.05
piE_N = 0.05
b_sff = 0.9
mag_src = 17.0
gp_log_sigma = 0.1
gp_log_rho = 1.0
gp_log_So = 0.1
gp_log_omegao = 0.1
raL = 17.30 * 15.
decL = -29.0
gp_rho = 1.0
gp_log_omegaofour_So = 0.1
np.random.seed(42)
data_stuff = fake_correlated_data(t0 = t0, u0_amp = u0_amp, tE = tE,
piE_E = piE_E, piE_N = piE_N,
b_sff = b_sff, mag_src = mag_src,
gp_log_sigma = gp_log_sigma, gp_log_rho = gp_log_rho,
gp_log_So = gp_log_So, gp_log_omegao = gp_log_omegao,
raL = raL, decL = decL)
pspl_model_in = data_stuff[0]
data23_uncorr = data_stuff[1]
data23 = data_stuff[2]
params = data_stuff[3]
model2 = model.PSPL_Phot_Par_GP_Param1(t0, u0_amp, tE,
piE_E, piE_N, b_sff, mag_src,
gp_log_sigma, gp_log_rho,
gp_log_So, gp_log_omegao,
raL=raL, decL=decL)
model3 = model.PSPL_Phot_noPar_GP_Param1(t0, u0_amp, tE,
piE_E, piE_N, b_sff, mag_src,
gp_log_sigma, gp_log_rho,
gp_log_So, gp_log_omegao,
raL=raL, decL=decL)
# Put in some assert statements to make sure things don't break in the future.
times = np.arange(56000, 58000, 100)
##########
# Note: Model 2 should match data (both with parallax) within errors.
##########
# Test that model 2 photometry is sensible (with no GP).
mod2_phot_good = np.array([16.88473035, 16.88425742, 16.88347207, 16.88227126, 16.87984510, 16.87418014,
16.86227960, 16.83498515, 16.74673373, 16.48463579, 16.11297016, 16.43088797,
16.75054887, 16.83643682, 16.86184533, 16.87410198, 16.87991725, 16.88227589,
16.88345499, 16.88426179])
mod2_phot_out = model2.get_photometry(times)
np.testing.assert_allclose(mod2_phot_out, mod2_phot_good, rtol=1e-5)
# Test the model 2 GP photometry... seed is fixed so this should remain identical. But I loosened tolerance
mod2_gpphot_good = np.array([16.88473035, 17.08646751, 16.74805916, 16.68253705, 16.87984509, 16.96989977,
16.6386355, 16.83498515, 16.85998419, 16.51997825, 16.12006682, 16.43088797,
16.61076884, 16.70502475, 16.93342688, 16.87410206, 16.87479723, 16.94838136,
16.88345499, 17.01714564])
mod2_gpphot_out, mod2_gpphot_std_out = model2.get_photometry_with_gp(data23['t_phot1'],
data23['mag1'], data23['mag_err1'],
t_pred=times)
np.testing.assert_allclose(mod2_phot_out, mod2_phot_good, rtol=1e-2)
# Test that we get the PSPL model out that we put in. (no GP)
mod2_phot_out_at_tobs = model2.get_photometry(data23_uncorr['t_phot1'])
np.testing.assert_allclose(mod2_phot_out_at_tobs, data23_uncorr['mag1'], rtol=0.3)
if plot:
plt.figure(1)
plt.clf()
plt.plot(times, mod2_gpphot_out, 'k-', label='With GP')
plt.plot(times, mod2_phot_out, 'r-', label='No GP')
plt.xlabel('MJD')
plt.ylabel('Mag')
plt.gca().invert_yaxis()
plt.legend()
plt.title('test_GP: model2')
##########
# Note: Model 3 should NOT match data (model without parallax, data with parallax)
##########
# Test that model 2 photometry is sensible (with no GP).
mod3_phot_good = np.array([16.88470908, 16.88426858, 16.88352731, 16.88220882, 16.87970123, 16.87452809,
16.86275283, 16.83268396, 16.74634087, 16.48738257, 16.09854878, 16.48738257,
16.74634087, 16.83268396, 16.86275283, 16.87452809, 16.87970123, 16.88220882,
16.88352731, 16.88426858])
mod3_phot_out = model3.get_photometry(times)
np.testing.assert_allclose(mod3_phot_out, mod3_phot_good, rtol=1e-5)
# Test the model 2 GP photometry... seed is fixed so this should remain identical. But I loosened tolerance
mod3_gpphot_good = np.array([16.88470908, 17.08646752, 16.74805921, 16.68253668, 16.87970122, 16.96989985,
16.63863561, 16.83268396, 16.85998404, 16.51997894, 16.12006319, 16.48738257,
16.61076444, 16.70502078, 16.93406828, 16.87452817, 16.874797, 16.94838128,
16.88352731, 17.01714565])
mod3_gpphot_out, mod3_gpphot_std_out = model3.get_photometry_with_gp(data23['t_phot1'],
data23['mag1'], data23['mag_err1'],
t_pred=times)
np.testing.assert_allclose(mod3_phot_out, mod3_phot_good, rtol=1e-5)
return
def test_FSPL_classes(plot=False):
"""
Make sure can instantiate.
"""
mL = 10
t0 = 57000
beta = 2
dL = 4000
dS = 8000
xS0_E = 0.0
xS0_N = 0.08E-3
muS_E = -4.18
muS_N = -0.28
muL_E = 5.0
muL_N = -0.16
radius = 1e-3
b_sff = 0.9
mag_src = 19.0
raL = 17.30 * 15.
decL = -29.0
# Array of times we will sample on.
time_arr = np.linspace(t0-1000, t0+1000, 1000)
def test_fspl_once(r_in, n_in, phot_arr_good, mod='FSPL'):
# Make the model
tstart = time.time()
if mod == 'FSPL':
tmp_mod = model.FSPL_PhotAstrom_Par_Param1(mL, t0, beta, dL, dS,
xS0_E, xS0_N,
muL_E, muL_N,
muS_E, muS_N,
r_in,
b_sff, mag_src, n_in,
raL=raL, decL=decL)
else:
tmp_mod = model.PSPL_PhotAstrom_Par_Param1(mL, t0, beta, dL, dL/dS,
xS0_E, xS0_N,
muL_E, muL_N,
muS_E, muS_N,
b_sff, mag_src,
raL=raL, decL=decL)
# Generate photometry
phot_arr = tmp_mod.get_photometry(time_arr)
# When working, fetch 10 points, evenly distributed and
# save as the "right answer" in the individual tests below.
# print(repr(phot_arr[::100])) # GET GOOD
# Check against good data
|
np.testing.assert_allclose(phot_arr[::100], phot_arr_good, rtol=1e-6)
|
numpy.testing.assert_allclose
|
import cv2
from mapUtils import angleToCenter, distanceToCenter, fractalnoise, normalize
import numpy as np
import matplotlib.pyplot as plt
def get_image():
# image2 = normalize(angleToCenter((300, 300)))
image1 = 1 - normalize(distanceToCenter((300, 300)))
image3 = normalize(fractalnoise((300, 300), 2))
image = image1 * image3
thres = 0.3
image =
|
np.clip((image - thres) * 100.0 + thres, 0, 1)
|
numpy.clip
|
import pandas as pd
import numpy as np
from pandas_datareader import data
import matplotlib.pyplot as plt
import yaml
import sys
import math
plt.style.use('ggplot')
def LoadConfig(
yamlpath: str)-> dict:
config = yaml.load(
open(yamlpath, 'r'),
Loader=yaml.FullLoader)
return config
def GetData(
ticker : str,
start_date : str,
end_date : str)-> pd.DataFrame:
"""Getting historic price data from yahoo finance.
Arguments:
ticker {str}
start_date {str}
end_date {str}
Returns:
pd.DataFrame --> the output price dataframe
"""
return data.DataReader(ticker,'yahoo', start_date, end_date)
def PlotOptimalSharpeRatio(Agent):
plt.plot(range(len(Agent.epoch_training)),Agent.epoch_training, color ="navy")
plt.title("Sharpe ratio optimization")
plt.xlabel("Number of Iterations")
plt.ylabel("Sharpe Ratio")
plt.grid(True)
plt.savefig("outputfiles/graphs/Sharpe ratio optimization {} SMA noFeatures.png".format(str(Agent.input_size)), dpi=300)
plt.close
def PlotTraining(Agent):
fig, ax = plt.subplots(nrows=3, figsize=(20, 10))
t =
|
np.linspace(1, Agent.trading_periods, Agent.trading_periods)
|
numpy.linspace
|
import math
import os
import random
import sys
import time
import cv2
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from skimage.morphology import skeletonize
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def setSeed(config):
if config["seed"] is None:
manualSeed = np.random.randint(1, 10000)
else:
manualSeed = config["seed"]
print("Random Seed: ", manualSeed)
np.random.seed(manualSeed)
torch.manual_seed(manualSeed)
random.seed(manualSeed)
torch.cuda.manual_seed_all(manualSeed)
def getParllelNetworkStateDict(state_dict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
return new_state_dict
def to_variable(tensor, volatile=False, requires_grad=True):
return Variable(tensor.long().cuda(), requires_grad=requires_grad)
def weights_init(model, manual_seed=7):
np.random.seed(manual_seed)
torch.manual_seed(manual_seed)
random.seed(manual_seed)
torch.cuda.manual_seed_all(manual_seed)
for m in model.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def weights_normal_init(model, manual_seed=7):
np.random.seed(manual_seed)
torch.manual_seed(manual_seed)
random.seed(manual_seed)
torch.cuda.manual_seed_all(manual_seed)
for m in model.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0.0, 0.02)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def performAngleMetrics(
train_loss_angle_file, val_loss_angle_file, epoch, hist, is_train=True, write=False
):
pixel_accuracy = np.diag(hist).sum() / hist.sum()
mean_accuracy = np.diag(hist) / hist.sum(1)
mean_iou = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
freq = hist.sum(1) / hist.sum()
fwavacc = (freq[freq > 0] * mean_iou[freq > 0]).sum()
if write and is_train:
train_loss_angle_file.write(
"[%d], Pixel Accuracy:%.3f, Mean Accuracy:%.3f, Mean IoU:%.3f, Freq.Weighted Accuray:%.3f \n"
% (
epoch,
100 * pixel_accuracy,
100 * np.nanmean(mean_accuracy),
100 * np.nanmean(mean_iou),
100 * fwavacc,
)
)
elif write and not is_train:
val_loss_angle_file.write(
"[%d], Pixel Accuracy:%.3f, Mean Accuracy:%.3f, Mean IoU:%.3f, Freq.Weighted Accuray:%.3f \n"
% (
epoch,
100 * pixel_accuracy,
100 * np.nanmean(mean_accuracy),
100 * np.nanmean(mean_iou),
100 * fwavacc,
)
)
return 100 * pixel_accuracy, 100 *
|
np.nanmean(mean_iou)
|
numpy.nanmean
|
# run octave from python (IO code )
# This code will run the IO code and produce the plots for RMSE and save the out put data
# Created by <NAME>
# <EMAIL>
# -----------------------------------------------------------------------------------
# DO not change any line here, it will be changed automatically by the io_scrpt.sh!!
# -----------------------------------------------------------------------------------
from oct2py import octave
import pandas as pd
import csv
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import numpy as np
from RMSE_MAPS_INGO import read_data_from_mistral as rdfm
from CCLM_OUTS import Plot_CCLM
import cartopy.crs as ccrs
#from Plot_RMSE_SPREAD_main import plot_rmse_spread as prs
# ============================================= NAMELIST ==========================================
SEAS='DJF'
NN=1000#number of observations should be read from previous funcions!!!!
month_length=20
SEAS='DJF'
Vari = 'T_2M'
buffer=20
timesteps=10 # number of the seasons (years)
start_time=0
#name_2 = 'member_relax_3_big_00_' + Vari + '_ts_splitseas_1979_2015_' + SEAS + '.nc'
name_2 = 'tg_0.44deg_rot_v15.0_' + SEAS + '_1979_2015_remapbil.nc'
member=0
DIR="path_oi"
DIR_exp="path_dir"+"/"
# =================================================================================================
octave.run(DIR+"run_IO.m") # Running the Octave Interface from python!
# -------------------------------------------------------------------------------------------------
LAT = pd.read_csv(DIR_exp+"Trash/LAT.csv", header=None)
LON = pd.read_csv(DIR_exp+"Trash/LON.csv", header=None)
Forecast_3 = np.array(pd.read_csv(DIR_exp+'Trash/SEASON_MEAN1' + '_' + SEAS + '.csv', header=None))#Reading the Forecast values
t_f = np.zeros((month_length,Forecast_3.shape[0],Forecast_3.shape[1]))
for month in range(0, month_length):# Reading the ensemble forecast for each month!
t_f[month,:,:] = pd.read_csv(DIR_exp+'Trash/SEASON_MEAN' + str(month) + '_' + SEAS + '.csv', header=None)
t_f = np.array(t_f)
## add correction to forecast :
# declare zero matrix which will be filled
result_IO = np.zeros((month_length,Forecast_3.shape[0],Forecast_3.shape[1]))
result = np.zeros((Forecast_3.shape[0],Forecast_3.shape[1]))
for i in range(0,month_length):
fil=DIR + 'fi' + str(member) + str(i) +'.csv'
result=np.array(list(csv.reader(open(fil,"rb"),delimiter=','))).astype('float')
result_IO[i,:,:] = np.squeeze(t_f[i,:,:]) + result
# plot differences
pdf_name= 'last_m100_l20_'+str(member)+'.pdf'
#t_o, lat_o, lon_o, rlat_o, rlon_o =rdfm(dir='/work/bb1029/b324045/work5/03/member_relax_3_big_00/post/', # the observation (default run without shifting)
# name=name_2,
# var=Vari)
#t_o, lat_o, lon_o, rlat_o, rlon_o =rdfm(dir='NETCDFS_CCLM/03/member_relax_3_big_00/post/', # the observation (default run without shifting)
# name=name_2,
# var=Vari)
t_o, lat_o, lon_o, rlat_o, rlon_o =rdfm(dir='/NETCDFS_CCLM/eobs/', # the observation (default run without shifting)
name=name_2,
var=Vari)
dext_lon = t_o.shape[2] - (2 * buffer)
dext_lat = t_o.shape[1] - (2 * buffer)
start_lon=(buffer+4)
start_lat=(buffer-4)
##TODO: make it a function:
#def f(x):
# if x==-9999:
# return float('NaN')
# else:
# return x
#f2 = np.vectorize(f)
#t_o= f2(t_o)
#t_o=t_o.squeeze()
#t_o = t_o.data
#t_o[np.isnan(t_o)] = np.nanmean(t_o)
##end todo
#t_o[t_o<-900]=float('NaN')
#t_o[np.isnan(t_o)]=float(0.0)
forecast = result_IO
obs = t_o[0:month_length, buffer:buffer + dext_lat, buffer:buffer + dext_lon]
RMSE=np.zeros((forecast.shape[1],forecast.shape[2]))
RMSE_TIME_SERIES=
|
np.zeros(forecast.shape[0])
|
numpy.zeros
|
import PIL
import io
import os
import itertools
import cv2
import uuid
import tensorflow as tf
import numpy as np
import pandas as pd
from pathlib import Path
from object_detection.utils import dataset_util
from jigsaw.model_utils.base.mask import LabeledImageMask
from jigsaw.model_utils.types import BoundingBox
class InstanceImageMask(LabeledImageMask):
training_type = "Instance Segmentation"
verbose_write = False
def __init__(self, image_id, image_path, image_type, label_boxes, mask_path, label_masks, binary_masks,
xdim, ydim):
super().__init__(image_id, image_path, image_type, mask_path, label_masks, xdim, ydim)
self.label_boxes = label_boxes
self.binary_masks = binary_masks
@classmethod
def construct(cls, image_id, **kwargs):
"""Constructs a LabeledImageMask object from a set of standard files
Args:
image_id (str): the unique ID for this image
Returns:
LabeledImageMask: the object representative of this semantically-
labeled image
"""
try:
skip_background = kwargs["skip_background"]
except KeyError:
skip_background = True
if cls.temp_dir is None:
cwd = Path.cwd()
data_dir = cwd / 'data'
else:
data_dir = cls.temp_dir
mask_filepath = data_dir / f'mask_{image_id}.png'
mask_filepath = str(
mask_filepath.absolute()) # cv2.imread doesn't like Path objects.
labels_filepath = data_dir / f'labels_{image_id}.csv'
image_filepath = None
image_type = None
file_extensions = [".png", ".jpg", ".jpeg"]
for extension in file_extensions:
temp_filepath = data_dir / f'image_{image_id}{extension}'
if os.path.exists(data_dir / f'image_{image_id}{extension}'):
image_filepath = data_dir / f'image_{image_id}{extension}'
image_type = extension
break
if image_filepath is None:
raise ValueError("Hmm, there doesn't seem to be a valid image filepath.")
labels_df = pd.read_csv(labels_filepath, index_col="label")
image_mask = cv2.imread(mask_filepath)
ydim, xdim, _ = image_mask.shape
label_masks = {}
for label, color in labels_df.iterrows():
if label == "background" and skip_background:
continue
color_bgr = np.array([color["B"], color["G"], color["R"]])
label_masks[label] = color_bgr
label_boxes = []
binary_masks = []
image_mask = cv2.imread(mask_filepath)
r = 2
b = 0
g = 1
for label, color in label_masks.items():
if label == 'panel_left' or label == 'panel_right':
label = 'solar_panel'
matched = False
x = [-2 -1, 0, 1, 2]
iters = [p for p in itertools.product(x, repeat=3)]
mask = np.zeros(image_mask.shape, dtype=np.uint8)
for i in iters:
c = np.add(color, np.array(i))
match = np.where((image_mask == c).all(axis=2))
y, x = match
if len(y) != 0 and len(x) != 0:
mask[match] = [255, 255, 255]
matched = True
cls.add_label_int(label)
if not matched:
continue
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
ymin, ymax =
|
np.where(rows)
|
numpy.where
|
from __future__ import print_function
'''
This module should be organized as follows:
Main function:
chi_estimate() = returns chi_n, chi_b
- calls:
wealth.get_wealth_data() - returns data moments on wealth distribution
labor.labor_data_moments() - returns data moments on labor supply
minstat() - returns min of statistical objective function
model_moments() - returns model moments
SS.run_SS() - return SS distributions
'''
'''
------------------------------------------------------------------------
Last updated: 7/27/2016
Uses a simulated method of moments to calibrate the chi_n adn chi_b
parameters of OG-USA.
This py-file calls the following other file(s):
wealth.get_wealth_data()
labor.labor_data_moments()
SS.run_SS
This py-file creates the following other file(s): None
------------------------------------------------------------------------
'''
import numpy as np
import scipy.optimize as opt
import pandas as pd
import os
try:
import cPickle as pickle
except ImportError:
import pickle
from . import wealth
from . import labor
from . import SS
from . import utils
def chi_n_func(s, a0, a1, a2, a3, a4):
chi_n = a0 + a1 * s + a2 * s ** 2 + a3 * s ** 3 + a4 * s ** 4
return chi_n
def chebyshev_func(x, a0, a1, a2, a3, a4):
func = np.polynomial.chebyshev.chebval(x, [a0, a1, a2, a3, a4])
return func
def chi_estimate(p, client=None):
'''
--------------------------------------------------------------------
This function calls others to obtain the data momements and then
runs the simulated method of moments estimation by calling the
minimization routine.
INPUTS:
income_tax_parameters = length 4 tuple, (analytical_mtrs, etr_params, mtrx_params, mtry_params)
ss_parameters = length 21 tuple, (J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\
g_n_ss, tau_payroll, retire, mean_income_data,\
h_wealth, p_wealth, m_wealth, b_ellipse, upsilon)
iterative_params = [2,] vector, vector with max iterations and tolerance
for SS solution
chi_guesses = [J+S,] vector, initial guesses of chi_b and chi_n stacked together
baseline_dir = string, path where baseline results located
OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION:
wealth.compute_wealth_moments()
labor.labor_data_moments()
minstat()
OBJECTS CREATED WITHIN FUNCTION:
wealth_moments = [J+2,] array, wealth moments from data
labor_moments = [S,] array, labor moments from data
data_moments = [J+2+S,] array, wealth and labor moments stacked
bnds = [S+J,] array, bounds for parameter estimates
chi_guesses_flat = [J+S,] vector, initial guesses of chi_b and chi_n stacked
min_arg = length 6 tuple, variables needed for minimizer
est_output = dictionary, output from minimizer
chi_params = [J+S,] vector, parameters estimates for chi_b and chi_n stacked
objective_func_min = scalar, minimum of statistical objective function
OUTPUT:
./baseline_dir/Calibration/chi_estimation.pkl
RETURNS: chi_params
--------------------------------------------------------------------
'''
baseline_dir="./OUTPUT"
#chi_b_guess = np.ones(80)
# a0 = 5.38312524e+01
# a1 = -1.55746248e+00
# a2 = 1.77689237e-02
# a3 = -8.04751667e-06
# a4 = 5.65432019e-08
""" Kei's Vals
a0 = 170
a1 = -2.19154735e+00
a2 = -2.22817460e-02
a3 = 4.49993507e-04
a4 = -1.34197054e-06
"""
""" Adam's Vals 1
a0 = 2.59572155e+02
a1 = -2.35122641e+01
a2 = 4.27581467e-01
a3 = -3.40808933e-03
a4 = 1.00404321e-05
"""
a0 = 1.16807470e+03#5.19144310e+02
a1 = -1.05805189e+02#-4.70245283e+01
a2 = 1.92411660e+00#8.55162933e-01
a3 = -1.53364020e-02#-6.81617866e-03
a4 = 4.51819445e-05#2.00808642e-05
sixty_plus_chi = 10000
params_init =
|
np.array([a0, a1, a2, a3, a4])
|
numpy.array
|
#!
#
# REFERENCES
# util_ManualOverlapGrid.py
# 20160906-Pankow-ProgressPortingNRVariantToMaster/util_ManualOverlapGrid.py
#
import scipy.linalg as linalg
import numpy as np
def fit_quadratic(x,y,x0=None,variable_symmetry_list=None,gamma_x=None,prior_x_gamma=None,prior_quadratic_gamma=None,verbose=False,n_digits=None,hard_regularize_negative=False,hard_regularize_scale=1):
"""
Simple least squares to a quadratic.
Written out in long form so I can impose priors as needed on (a) the fit coefficients [=regularization] and (b) the 'x' coordinates.
INPUT:
x = array so x[0] , x[1], x[2] are points.
y = array of y values
OUTPUT
peak_val_est,
best_val_est,
my_fisher_est,
linear_term_est,
fit_here : python function providing best fit. Usually what you want, but cannot be pickled/saved to text
OPTIONAL
variable_symmetry_list = list of length x, indicating symmetry under ONE discrete symmetry (so far)
"""
x0_val = np.zeros(len(x[0]))
if not (x0 is None):
if verbose:
print(" Fisher: Using reference point ", x0)
x0_val = x0
dim = len(x[0])
npts = len(x)
if verbose:
print(" Fisher : dimension, npts = " ,dim, npts)
# Constant, linear, quadratic functions.
# Beware of lambda: f_list = [(lambda x: k) for k in range(5)] does not work, but this does
# f_list = [(lambda x,k=k: k) for k in range(5)]
f0 = [lambda z: np.ones(len(z),dtype=np.float128)]
# indx_lookup_linear = {} # protect against packing errors
# indx_here = len(f0)
# f_linear = []
# for k in np.arange(dim):
# f_linear.append( (lambda z,k=k,x0V=x0_val: z.T[k] - x0V[k]))
# indx_lookup_linear[k] =indx_here
# indx_here+=1
f_linear = [(lambda z,k=k,x0V=x0_val: z.T[k] - x0V[k]) for k in np.arange(dim)]
f_quad = []
indx_lookup = {}
indx_here =len(f0)+len(f_linear)
for k in np.arange(dim):
for q in range(k,dim):
if variable_symmetry_list:
if variable_symmetry_list[k]*variable_symmetry_list[q] <0:
if verbose:
print(" Not including quadratic term because of symmetry", (k,q))
continue # skip the remaining part
f_quad.append( (lambda z,k=k,q=q: (z.T[k] - x0_val[k])*(z.T[q]-x0_val[q])) )
indx_lookup[(k,q)] = indx_here
indx_here+=1
f_list=f0+f_linear + f_quad
n_params_model = len(f_list)
if verbose:
print(" ---- Dimension: --- ", n_params_model)
print(" ---- index pattern (paired only; for manual identification of quadratic terms) --- ")
print(indx_lookup)
# if verbose:
# print " ---- check quadratic --- "
# for pair in indx_lookup:
# fn_now = f_list[indx_lookup[pair]]
# print " Grid test " , pair, fn_now(np.array([1,0])), fn_now(np.array([0,1])), fn_now(np.array([1,1])) ,fn_now(np.array([1,-1]))
F = np.matrix(np.zeros((len(x), n_params_model),dtype=np.float128))
for q in np.arange(n_params_model):
fval = f_list[q](np.array(x,dtype=np.float128))
F[:,q] = np.reshape(fval, (len(x),1))
gamma = np.matrix( np.diag(np.ones(npts,dtype=np.float128)))
if not(gamma_x is None):
gamma = np.matrix(gamma_x)
Gamma = F.T * gamma * F # Fisher matrix for the fit
Sigma = linalg.inv(Gamma) # Covariance matrix for the fit. WHICH CODE YOU USE HERE IS VERY IMPORTANT.
# if verbose:
# print " -- should be identity (error here is measure of overall error) --- "
# print " Fisher: Matrix inversion/manipulation error ", np.linalg.norm(Sigma*Gamma - np.eye(len(Sigma))) , " which can be large if the fit coordinates are not centered near the peak"
# print " -- --- "
lambdaHat = np.array((Sigma* F.T*gamma* np.matrix(y).T))[:,0] # point estimate for the fit parameters (i.e., fisher matrix and best fit point)
if n_digits:
lambdaHat = np.array(map(lambda z: round(z,n_digits),lambdaHat))
if verbose:
print(" Fisher: LambdaHat = ", lambdaHat)
if verbose:
print(" Generating predictive function ")
def fit_here(x):
return np.sum(list(map(lambda z: z[1]*z[0](x), zip(f_list,lambdaHat) )),axis=0)
if verbose:
my_resid = y - fit_here(x)
print(" Fisher: Residuals ", np.std(my_resid))
###
### Reconstructing quadratic terms: a bonus item
###
constant_term_est = lambdaHat[0] # Constant term
linear_term_est = lambdaHat[1:dim+1] # Coefficient of linear terms
my_fisher_est = np.zeros((dim,dim),dtype=np.float64) # A SIGNIFICANT LIMITATION...
for pair in indx_lookup:
k = pair[0]; q=pair[1];
indx_here = indx_lookup[pair]
my_fisher_est[k,q] += -lambdaHat[indx_here]
my_fisher_est[q,k] += -lambdaHat[indx_here] # this will produce a factor of 2 if the two terms are identical
if not(prior_x_gamma is None) and (prior_x_gamma.shape == my_fisher_est.shape):
my_fisher_est += prior_x_gamma
if verbose:
print(" Fisher: ", my_fisher_est)
print(" Fisher: Sanity check (-0.5)*Fisher matrix vs components (diagonal only) : ", -0.5*my_fisher_est, "versus", lambdaHat)
my_fisher_est_inv = linalg.inv(my_fisher_est) # SEE INVERSE DISCUSSION
if verbose:
print(" Fisher: Matrix inversion/manipulation error test 2", np.linalg.norm(np.dot(my_fisher_est,my_fisher_est_inv) - np.eye(len(my_fisher_est))))
peak_val_est = float(constant_term_est) +np.dot(linear_term_est, np.dot(my_fisher_est_inv,linear_term_est))/2
best_val_est = x0_val + np.dot(my_fisher_est_inv,linear_term_est) # estimated peak location, including correction for reference point
if verbose:
print(" Fisher : Sanity check: peak value estimate = ", peak_val_est, " which arises as a delicate balance between ", constant_term_est, " and ", np.dot(linear_term_est, np.dot(my_fisher_est_inv,linear_term_est))/2)
print(" Fisher : Best coordinate estimate = ", best_val_est)
print(" Fisher : eigenvalues (original) ", np.linalg.eig(my_fisher_est))
if hard_regularize_negative:
w,v = np.linalg.eig(my_fisher_est)
indx_neg = w<0
# usually we are regularizing placements in spin ... this provides us with error in that dimension
w[indx_neg] = hard_regularize_scale # 1./np.min( np.std(x,axis=0))**2 # use scatterplot of input points to set scale of this dimension
my_fisher_est = np.dot(v.T,np.dot(np.diag(w),v)) # reconstruct matrix, after regularization
return [peak_val_est, best_val_est, my_fisher_est, linear_term_est,fit_here]
def fit_quadratic_and_resample(x,y,npts,rho_fac=1,x0=None,gamma_x=None,prior_x_gamma=None,prior_quadratic_gamma=None,verbose=False,n_digits=None,hard_regularize_negative=False,hard_regularize_scale=1):
"""
Simple least squares to a quadratic, *and* resamples from the quadratic derived from the fit.
Critical for iterative evaluation of
- Fisher matrix
- lnLmarg (ILE)
TO DO:
- implement non-stochastic placement as option (e.g., like effectiveFisher.py)
"""
# Find the fit
the_quadratic_results = fit_quadratic(x,y,x0=x0,gamma_x=gamma_x,prior_x_gamma=prior_x_gamma,prior_quadratic_gamma=prior_quadratic_gamma,n_digits=n_digits,hard_regularize_negative=hard_regularize_negative,hard_regularize_scale=hard_regularize_scale)
peak_val_est, best_val_est, my_fisher_est, linear_term_est,fit_here = the_quadratic_results
# Use the inverse covariance mattrix
my_fisher_est_inv = linalg.pinv(my_fisher_est) # SEE INVERSE DISCUSSION
x_new = np.random.multivariate_normal(best_val_est,my_fisher_est_inv/(rho_fac*rho_fac),size=npts)
return x_new
if __name__ == "__main__":
import argparse
import sys
import numpy as np
print(" Testing quadratic fit code ")
print(" Two dimensions ")
x1 = np.linspace(-5,5,40)
x2 =
|
np.linspace(-1,1,10)
|
numpy.linspace
|
import numpy as np
import time
class SVM():
def __init__(self, max_iter=10000, kernel='rbf', C=1.0, gamma=0.001):
self.max_iter = max_iter
if kernel == 'linear':
self.kernel = self.linear_kernel
elif kernel == 'sigmoid':
self.kernel = self.sigmoid_kernel
else:
self.kernel = self.rbf_kernel
self.kernel = self.sigmoid_kernel if kernel=='sigmoid' else self.rbf_kernel
self.C = C
self.gamma = gamma
def fit(self, X, y):
t1 = time.time()
# 初期化
n, d = X.shape[0], X.shape[1]
alpha = np.zeros((n))
kernel = self.kernel
count = 0
while count < self.max_iter:
count += 1
alpha_prev = np.copy(alpha)
for j in range(0, n):
i = j
while i == j:
i = np.random.randint(0, n)
x_i, x_j, y_i, y_j = X[i,:], X[j,:], y[i], y[j]
k_ij = kernel(x_i, x_i, self.gamma) + kernel(x_j, x_j, self.gamma) - 2 * kernel(x_i, x_j, self.gamma)
if k_ij == 0:
continue
alpha_prime_j, alpha_prime_i = alpha[j], alpha[i]
(L, H) = self.compute_L_H(self.C, alpha_prime_j, alpha_prime_i, y_j, y_i)
# パラメータの更新
self.w = np.dot(alpha * y, X)
self.b = np.mean(y - np.dot(self.w.T, X.T))
# 予測誤差
E_i = self.E(x_i, y_i, self.w, self.b)
E_j = self.E(x_j, y_j, self.w, self.b)
# alphaの更新
alpha[j] = alpha_prime_j + float(y_j * (E_i - E_j))/k_ij
alpha[j] = max(alpha[j], L)
alpha[j] = min(alpha[j], H)
alpha[i] = alpha_prime_i + y_i*y_j * (alpha_prime_j - alpha[j])
# 収束判定
diff = np.linalg.norm(alpha - alpha_prev)
if diff < 0.001:
break
# パラメータの決定
self.b = np.mean(y - np.dot(self.w.T, X.T))
if self.kernel == self.linear_kernel:
self.w = np.dot(alpha * y, X)
# 処理時間
t2 = time.time()
self.elapsed_time = t2 - t1
def predict(self, X):
return self.h(X, self.w, self.b)
# 予測
def h(self, X, w, b):
return np.sign(np.dot(w.T, X.T) + b).astype(int)
# 予測誤差
def E(self, x_k, y_k, w, b):
return self.h(x_k, w, b) - y_k
def compute_L_H(self, C, alpha_prime_j, alpha_prime_i, y_j, y_i):
if(y_i != y_j):
return (max(0, alpha_prime_j - alpha_prime_i), min(C, C - alpha_prime_i + alpha_prime_j))
else:
return (max(0, alpha_prime_i + alpha_prime_j - C), min(C, alpha_prime_i + alpha_prime_j))
# カーネルを定義
def linear_kernel(self, x1, x2):
return np.dot(x1, x2.T)
def sigmoid_kernel(self, x1, x2, gamma):
return 1 / (1 + np.exp(-gamma * np.dot(x1, x2.T)))
def rbf_kernel(self, x1, x2, gamma):
return (np.exp(-gamma *
|
np.linalg.norm(x1 - x2)
|
numpy.linalg.norm
|
from math import isclose
import numpy as np
import scipy.spatial
class PointData:
def __init__(self, left, point, right):
self.left = left
self.point = point
self.right = right
self.between = between_neighbors(left, point, right)
self.offset = midpoint_projection_offset(left, point, right)
def __eq__(self, other):
if isinstance(self, other.__class__):
return all([
np.array_equal(self.left, other.left),
np.array_equal(self.point, other.point),
np.array_equal(self.right, other.right),
(self.between == other.between),
isclose(self.offset, other.offset),
])
return NotImplemented
def less_or_close(a, b, *args, **kwargs):
# Use isclose for handling effective equivalence
return a < b or isclose(a, b, *args, **kwargs)
def neighbor_window(seq, index, count=1):
if len(seq) < (count + 2):
raise ValueError("seq must have at least 3 elements to have neighbors")
if index < 1 or index > (len(seq) - (count + 1)):
raise IndexError(f"Index must fall between 1 and len(seq) - 2 to have neighbors: (index={index}, seq={seq})")
return seq[index - 1:index + count + 1]
def modified_point_list(seq):
if len(seq) < 3:
raise ValueError("seq must have at least 3 elements to have neighbors")
if not np.array_equal(seq[0], seq[-1]):
raise ValueError("First and last element must match")
return_seq = []
for pnt in tuple(seq) + (seq[1],):
try:
if len(pnt) == 2:
return_seq.append(np.asarray(pnt))
continue
except TypeError:
raise ValueError("each element in seq must have len(2)")
return return_seq
def point_window_iter(seq):
# Iterates over groups of three points, where the input seq
# has first and last the same, then add a final group with the
# first/last element in the middle
elem_wrapped_seq = seq + (seq[1],)
for i in range(1, len(elem_wrapped_seq) - 1):
yield neighbor_window(elem_wrapped_seq, i)
def within_tolerance(value, within, float_tol=1e-9):
if (within < 0):
raise ValueError('Argument "within" cannot be negative')
abs_value = abs(value)
return less_or_close(abs_value, within, rel_tol=float_tol)
def midpoint_projection_offset(pnt1, pnt2, pnt3):
outer_vec = pnt3 - pnt1
norm_outer = np.linalg.norm(outer_vec)
return abs(np.cross(outer_vec, pnt1 - pnt2) / norm_outer)
def between_neighbors(pnt1, pnt2, pnt3):
"""Midpoint projected onto neighboring points line is contained in segment"""
# Make sure the projection of the midpoint lies between the outer points
outer_vec = pnt3 - pnt1
norm_outer = np.linalg.norm(outer_vec)
scalar_proj = np.dot(pnt2 - pnt1, outer_vec / norm_outer)
return (
less_or_close(0, scalar_proj) and less_or_close(scalar_proj, norm_outer)
)
def points_inline(pnt1, pnt2, pnt3, tolerance, float_tol=1e-9):
"""Check if the middle point lies on the line between 1 and 2 withing tolerance"""
mid_offset = midpoint_projection_offset(pnt1, pnt2, pnt3)
# First check point is inline within tolerence
is_inline = within_tolerance(mid_offset, tolerance, float_tol)
# Make sure the projection of the midpoint lies between the outer points
is_between = between_neighbors(pnt1, pnt2, pnt3)
return is_inline and is_between
def get_radians(pnt1, pnt2, pnt3):
v1 = pnt1 - pnt2
v2 = pnt3 - pnt2
return np.arccos(np.dot(v1, v2) / (
|
np.linalg.norm(v1)
|
numpy.linalg.norm
|
import os
import glob
import pathlib
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, average_precision_score, precision_recall_curve, roc_curve
from sklearn.utils.fixes import signature
from skimage.measure import compare_ssim as ssim
from scipy.misc import imread
from scipy.io import loadmat, savemat
from ROC import assessment
from ProgressBar import ProgressBar
import cv2
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def check_path(dataset, cube_size):
cube_str = '%d_%d_%d' % tuple(cube_size)
assert cube_str in dataset['cube_dir']
# [SECTION] IMAGE PROCESSING
# important: load as gray image (i.e. 1 channel)
def resize(datum, size):
assert len(datum.shape) == 2
ret = cv2.resize(datum.astype(float), tuple(size))
return ret
def load_images_and_resize(dataset, new_size=[120, 160], train=True, force_recalc=False, return_entire_data=False):
img_dir = dataset['path_train' if train else 'path_test']
n_images = np.sum(count_sequence_n_frame(dataset, test=not train))
print('number of images: ', n_images)
n_clip = dataset['n_clip_train' if train else 'n_clip_test']
#
if return_entire_data:
resized_image_data = np.empty((n_images, new_size[0], new_size[1], 1), dtype=np.float32)
idx = 0
#
for i in range(n_clip):
clip_path = '%s/%s%s/' % (img_dir, 'Train' if train else 'Test', str(i+1).zfill(3))
print(clip_path)
# image
img_files = sorted(glob.glob(clip_path + '*.tif'))
saved_image_file = '%s/%s_image_clip_%d.npz' % (dataset['cube_dir'], 'training' if train else 'test', i+1)
if os.path.isfile(saved_image_file) and not force_recalc:
image_data = np.load(saved_image_file)['image']
else:
image_data = np.array([resize(imread(img_file, 'L')/255., (new_size[1], new_size[0])) for img_file in img_files]).astype(np.float32)
np.savez_compressed(saved_image_file, image=image_data)
print('clip', i+1, image_data.shape)
if return_entire_data:
resized_image_data[idx:idx+len(image_data)] = image_data
idx += len(image_data)
#
if return_entire_data:
return resized_image_data
def load_images_single_clip(dataset, clip_idx, indices, train=True):
assert clip_idx in np.arange(dataset['n_clip_train' if train else 'n_clip_test'])
img_dir = dataset['path_train' if train else 'path_test']
n_images = count_sequence_n_frame(dataset, test=not train)[clip_idx]
print('number of images: ', n_images)
#
clip_path = '%s/%s%s/' % (img_dir, 'Train' if train else 'Test', str(clip_idx+1).zfill(3))
print(clip_path)
# image
img_files = sorted(glob.glob(clip_path + '*.tif'))
image_data = np.array([imread(img_files[idx])/255. for idx in indices]).astype(np.float32)
print('clip', clip_idx+1, image_data.shape)
return image_data
# [SECTION] CUBE PROCESSING
def split_cubes(dataset, clip_idx, cube_size, training_set=True, force_recalc=False, dist_thresh=None):
check_path(dataset, cube_size)
n_clip = dataset['n_clip_train' if training_set else 'n_clip_test']
assert clip_idx in range(n_clip)
print('clip %2d/%2d' % (clip_idx + 1, n_clip))
# load from file if existed
saved_cube_file = '%s/%s_cubes_clip_%d_size_%d_%d_%d.npz' % \
(dataset['cube_dir'], 'training' if training_set else 'test', clip_idx + 1, cube_size[0], cube_size[1], cube_size[2])
if os.path.isfile(saved_cube_file) and not force_recalc:
loader = np.load(saved_cube_file)
cubes = loader['data']
mapping = loader['mapping']
return cubes, mapping
# first load image data from file
saved_image_file = '%s/%s_image_clip_%d.npz' % (dataset['cube_dir'], 'training' if training_set else 'test', clip_idx + 1)
if not os.path.isfile(saved_image_file):
print('image file not found! (%s)' % saved_image_file)
return None, None
image_data = np.load(saved_image_file)['image']
h, w = image_data.shape[1:3]
assert h % cube_size[0] == 0
assert w % cube_size[1] == 0
h_grid, w_grid = np.array([h, w])//cube_size[:2]
# split images to cubes
d_grid = len(image_data) + 1 - cube_size[2]
cubes = np.zeros(np.concatenate(([h_grid * w_grid * d_grid], cube_size), axis=0), dtype=np.float32)
mapping = np.zeros((h_grid * w_grid * d_grid, 4), dtype=int)
print(cubes.shape, image_data.shape)
for j in range(d_grid):
for k in range(h_grid):
for l in range(w_grid):
cubes[j*h_grid*w_grid+k*w_grid+l] = np.moveaxis(image_data[j:j+cube_size[2],
k*cube_size[0]:(k+1)*cube_size[0],
l*cube_size[1]:(l+1)*cube_size[1]], 0, -1)
mapping[j*h_grid*w_grid+k*w_grid+l] = [clip_idx, j, k, l]
if dist_thresh is not None and training_set:
successive_dist = np.array([np.mean(abs(cubes[i]-cubes[i+1])) for i in range(len(cubes)-1)])
idx = np.where(successive_dist >= dist_thresh)[0]
cubes, mapping = cubes[idx], mapping[idx]
print('new shape:', cubes.shape, image_data.shape)
np.savez_compressed(saved_cube_file, data=cubes, mapping=mapping)
return cubes, mapping
def calc_n_cube_in_set(dataset, h, w, cube_size, training_set=True):
check_path(dataset, cube_size)
assert h % cube_size[0] == 0
assert w % cube_size[1] == 0
h_grid, w_grid = np.array([h, w])//cube_size[:2]
sequence_n_frame = count_sequence_n_frame(dataset, test=not training_set)
n_cube = np.sum([((n_frame + 1 - cube_size[2]) * h_grid * w_grid) for n_frame in sequence_n_frame])
return n_cube
def load_all_cubes_in_set(dataset, h, w, cube_size, training_set=True):
check_path(dataset, cube_size)
n_cube_in_set = calc_n_cube_in_set(dataset, h, w, cube_size, training_set=training_set)
n_clip = dataset['n_clip_train' if training_set else 'n_clip_test']
#
cubes = np.zeros(np.concatenate(([n_cube_in_set], cube_size), axis=0), dtype=np.float32)
mapping = np.zeros((n_cube_in_set, 4), dtype=int)
idx = 0
for clip_idx in range(n_clip):
tmp_cubes, tmp_mapping = split_cubes(dataset, clip_idx, cube_size, training_set=training_set)
assert len(tmp_cubes) == len(tmp_mapping)
cubes[idx:idx+len(tmp_cubes)] = tmp_cubes
mapping[idx:idx+len(tmp_mapping)] = tmp_mapping
idx += len(tmp_mapping)
# to work with thresholding motion in training samples
item_sum = np.array([np.sum(item) for item in cubes])
idx = np.where(item_sum == 0.0)[0]
cubes = np.delete(cubes, idx, axis=0)
mapping = np.delete(mapping, idx, axis=0)
print(cubes.shape, mapping.shape)
#
return cubes, mapping
# get sequence of number of clip's frames
def count_sequence_n_frame(dataset, test=True):
sequence_n_frame = np.zeros(dataset['n_clip_test' if test else 'n_clip_train'], dtype=int)
for i in range(len(sequence_n_frame)):
clip_path = '%s/%s%s/' % (dataset['path_test' if test else 'path_train'], 'Test' if test else 'Train', str(i+1).zfill(3))
sequence_n_frame[i] = len(sorted(glob.glob(clip_path + '*.tif')))
return sequence_n_frame
# 1: abnormal, 0: normal
def get_test_frame_labels(dataset, sequence_n_frame, cube_size, is_subway=False):
ground_truth = dataset['ground_truth']
assert len(ground_truth) == len(sequence_n_frame)
labels_select_last = np.zeros(0, dtype=int)
labels_select_first = np.zeros(0, dtype=int)
labels_select_mid = np.zeros(0, dtype=int)
labels_full = np.zeros(0, dtype=int)
for i in range(len(sequence_n_frame)):
if not is_subway:
seg = ground_truth[i]
# label of full frames
tmp_labels =
|
np.zeros(sequence_n_frame[i])
|
numpy.zeros
|
from .pa_rb_env import (
PAEnv,
Node
)
import numpy as np
from pathlib import Path
log2 = np.log2
cues = {
0: Node(0.1, 0, 'cue'),
1: Node(-0.1, 0, 'cue'),
}
devices = {
0: {
't_device': Node(0, 0.5, 't_device'),
'r_devices': {
0: Node(0, 0.6, 'r_device')
}
},
1: {
't_device': Node(0, -0.5, 't_device'),
'r_devices': {
0: Node(0, -0.6, 'r_device')
}
}
}
def equal(unit, target):
tolerance = 1e-6 * np.ones_like(target)
return (np.abs(unit - target) < tolerance).all()
def test_init_pos():
"""test position constraint"""
env = PAEnv(n_level=4)
def dis(node, target):
return np.sqrt(
(node.x - target.x) ** 2 +
(node.y - target.y) ** 2
)
# test bs cues
assert all(
env.r_bs <= dis(usr, env.station) <= env.R_bs
for usr in env.cues.values()
)
# test devices
for cluster in env.devices.values():
t_device, r_devices = cluster['t_device'], cluster['r_devices']
assert env.r_bs <= dis(t_device, env.station) <= (
env.R_bs - env.R_dev)
assert all(
env.r_dev <= dis(r_device, t_device) <= env.R_dev
for r_device in r_devices.values()
)
def test_jakes():
# TODO test stastic features of jakes
# target_std, target_mean = 0.429, 1.253 # Rayleigh Distribution
# x_len, y_len, Ns = H_set.shape
# h_std = np.mean([
# H_set[x, y, :].std()
# for x in range(x_len)
# for y in range(y_len)
# ])
# assert (h_std - target_std) / target_std < 0.1
# h_mean = np.mean([
# H_set[x, y, :].mean()
# for x in range(x_len)
# for y in range(y_len)
# ])
# assert (h_mean - target_mean) / target_mean < 0.05
pass
def test_init_path_loss():
"""test distance, since lognormal is random"""
env = PAEnv(n_level=4, n_pair=2, m_cue=2)
env.cues = cues
env.devices = devices
env.init_path_loss()
distance_matrix = env.distance_matrix
target_dis = np.array(
[
[0.1, 1.1, np.sqrt(0.26), np.sqrt(0.26), 0.5],
[1.1, 0.1, np.sqrt(0.26), np.sqrt(0.26), 0.5],
[0.6, 0.6, 0.1, 0.1, 0.503],
[np.sqrt(0.37), np.sqrt(0.37), 0.503, 0.2, 0.1],
[np.sqrt(0.37), np.sqrt(0.37), 0.2, 0.503, 0.1],
]
)
assert equal(distance_matrix, target_dis)
def test_get_recv_powers():
"""test get_recv_powers"""
env = PAEnv(n_level=4, n_pair=2, m_cue=1)
power = np.array([
[0.01, 0],
[0, 0.01],
[0.1, 0],
[0, 0.1],
])
emit_powers = np.tile(np.expand_dims(power, axis=1),
(1, env.n_channel, 1))
fading = np.array([
[1.1e-2, 1.2e-2, 1.3e-2, 1.4e-2],
[2.1e-2, 2.2e-2, 2.3e-2, 2.4e-2],
[3.1e-2, 3.2e-2, 3.3e-2, 3.4e-2],
[4.1e-2, 4.2e-2, 4.3e-2, 4.4e-2],
])
recv_powers = env.get_recv_powers(emit_powers, fading)
target_recv_powers = np.array([
[[1.1e-4, 0], [1.2e-4, 0], [1.3e-4, 0], [1.4e-4, 0]],
[[0, 2.1e-4], [0, 2.2e-4], [0, 2.3e-4], [0, 2.4e-4]],
[[3.1e-3, 0], [3.2e-3, 0], [3.3e-3, 0], [3.4e-3, 0]],
[[0, 4.1e-3], [0, 4.2e-3], [0, 4.3e-3], [0, 4.4e-3]],
])
assert equal(recv_powers, target_recv_powers)
def test_get_rates():
"""test get_rates"""
env = PAEnv(n_level=4, n_pair=2, m_cue=1)
recv_powers = np.array([
[[1.1e-4, 0], [1.2e-4, 0], [1.3e-4, 0], [1.4e-4, 0]],
[[0, 2.1e-4], [0, 2.2e-4], [0, 2.3e-4], [0, 2.4e-4]],
[[3.1e-3, 0], [3.2e-3, 0], [3.3e-3, 0], [3.4e-3, 0]],
[[0, 4.1e-3], [0, 4.2e-3], [0, 4.3e-3], [0, 4.4e-3]],
])
rates = env.get_rates(recv_powers)
_rate = np.array([
log2(1+1.1/31), log2(1+2.2/42), log2(1+33/1.3), log2(1+44/2.4)
])
target_rates = (_rate * np.ones((env.n_channel, env.n_channel))).T
assert equal(rates, target_rates)
def test_get_indices():
"""test get_indices"""
env = PAEnv(n_level=4, n_pair=2, m_cue=1, sorter="recv_power",
m_state=2)
power = np.array([
[0.01, 0],
[0, 0.01],
[0.1, 0],
[0, 0.1],
])
emit_powers = np.tile(np.expand_dims(power, axis=1),
(1, env.n_channel, 1))
fading = np.array([
[1.1e-2, 1.2e-2, 1.3e-2, 1.4e-2],
[2.1e-2, 2.2e-2, 2.3e-2, 2.4e-2],
[3.1e-2, 3.2e-2, 3.3e-2, 3.4e-2],
[4.1e-2, 4.2e-2, 4.3e-2, 4.4e-2],
])
recv_powers = env.get_recv_powers(emit_powers, fading)
rates = env.get_rates(recv_powers)
metrics = emit_powers, recv_powers, rates, fading
# rx_indice don't need test
tx_indice, rx_indice = env.get_indices(*metrics)
target_tx_indice = np.array([
[3, 3, 3, 2],
[0, 1, 2, 3]
])
assert equal(tx_indice, target_tx_indice)
def test_get_rewards():
env = PAEnv(n_level=4, n_pair=2, m_cue=1, sorter="recv_power",
m_state=2)
power = np.array([
[0.01, 0],
[0, 0.01],
[0.1, 0],
[0, 0.1],
])
emit_powers = np.tile(np.expand_dims(power, axis=1),
(1, env.n_channel, 1))
fading = np.array([
[1.1e-2, 1.2e-2, 1.3e-2, 1.4e-2],
[2.1e-2, 2.2e-2, 2.3e-2, 2.4e-2],
[3.1e-2, 3.2e-2, 3.3e-2, 3.4e-2],
[4.1e-2, 4.2e-2, 4.3e-2, 4.4e-2],
])
recv_powers = env.get_recv_powers(emit_powers, fading)
rates = env.get_rates(recv_powers)
metrics = emit_powers, recv_powers, rates, fading
indices = env.get_indices(*metrics)
rewards = env.get_rewards(rates, indices)
target_rewards = np.array([
log2(1+1.1/31) + log2(1+44/2.4),
log2(1+2.2/42) + log2(1+44/2.4),
log2(1+33/1.3) + log2(1+44/2.4),
log2(1+44/2.4) + log2(1+33/1.3),
])[:2]
assert equal(rewards, target_rewards)
def test_get_states():
# test m_state
env = PAEnv(n_level=4, n_pair=2, m_cue=1,
m_state=8, metrics=['emit_power', 'recv_power', 'rate'],
sorter='recv_power')
assert env.m_state == 4
env = PAEnv(n_level=4, n_pair=2, m_cue=1,
m_state=2, metrics=['emit_power', 'recv_power', 'rate'],
sorter='recv_power')
power = np.array([
[0.01, 0],
[0, 0.01],
[0.1, 0],
[0, 0.1],
])
emit_powers = np.tile(np.expand_dims(power, axis=1),
(1, env.n_channel, 1))
fading = np.array([
[1.1e-2, 1.2e-2, 1.3e-2, 1.4e-2],
[2.1e-2, 2.2e-2, 2.3e-2, 2.4e-2],
[3.1e-2, 3.2e-2, 3.3e-2, 3.4e-2],
[4.1e-2, 4.2e-2, 4.3e-2, 4.4e-2],
])
recv_powers = env.get_recv_powers(emit_powers, fading)
rates = env.get_rates(recv_powers)
metrics = emit_powers, recv_powers, rates, fading
indices = env.get_indices(*metrics)
states = env.get_states(*metrics, indices=indices)
_recv = np.array([
[[1.1e-4, 0], [1.2e-4, 0], [1.3e-4, 0], [1.4e-4, 0]],
[[0, 2.1e-4], [0, 2.2e-4], [0, 2.3e-4], [0, 2.4e-4]],
[[3.1e-3, 0], [3.2e-3, 0], [3.3e-3, 0], [3.4e-3, 0]],
[[0, 4.1e-3], [0, 4.2e-3], [0, 4.3e-3], [0, 4.4e-3]],
])
_rate = np.array([
log2(1+1.1/31), log2(1+2.2/42), log2(1+33/1.3), log2(1+44/2.4)
])
target_states = np.array([
np.concatenate([power[3],power[0],_recv[3][0],_recv[0][0],[_rate[3], _rate[0]]]),
np.concatenate([power[3],power[1],_recv[3][1],_recv[1][1],[_rate[3], _rate[1]]]),
np.concatenate([power[3],power[2],_recv[3][2],_recv[2][2],[_rate[3], _rate[2]]]),
np.concatenate([power[2],power[3],_recv[2][3],_recv[3][3],[_rate[2], _rate[3]]]),
])[:2]
assert equal(states, target_states)
def test_sorter():
# now only recv_power can be sorter
pass
def test_seed():
env = PAEnv(n_level=4, m_cue=1, seed=123)
# this is func in PAEnv to random pos
def random_point(min_r, radius, ox=0, oy=0):
theta = np.random.random() * 2 * np.pi
r = np.random.uniform(min_r, radius**2)
x, y = np.cos(theta) * np.sqrt(r),
|
np.sin(theta)
|
numpy.sin
|
"""
Worlds and bodies for agents whose habitats are ordered sequences of vectors.
"""
import os
from configuration import config as cfg
from micropsi_core.world.world import World
from micropsi_core.world.worldadapter import WorldAdapter, ArrayWorldAdapter
import numpy as np
from datetime import datetime
class TimeSeries(World):
""" A world that cycles through a fixed time series loaded from a file.
The file should be a numpy archive with the following fields:
'data': numpy array of shape (nr of ids) x (nr of timestamps)
'timestamps', a list of timestamps - the legend for the data's second axis
'ids': a list of IDs - the legend for data's first axis.
"""
supported_worldadapters = ['TimeSeriesRunner']
assets = {
'js': "timeseries/timeseries.js",
'template': 'timeseries/timeseries.tpl'
}
def __init__(self, filename, world_type="TimeSeries", name="", owner="", engine=None, uid=None, version=1, config={}):
World.__init__(self, filename, world_type=world_type, name=name, owner=owner, uid=uid, version=version, config=config)
self.data['assets'] = self.assets
filename = config.get('time_series_data_file', "timeseries.npz")
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(cfg['micropsi2']['data_directory'], filename)
self.logger.info("loading timeseries from %s for world %s" % (path, uid))
self.realtime_per_entry = int(config['realtime_per_entry'])
self.last_realtime_step = datetime.utcnow().timestamp() * 1000
try:
with np.load(path) as f:
self.timeseries = f['data']
self.ids = f['ids']
self.timestamps = f['timestamps']
except IOError as error:
self.logger.error("Could not load data file %s, error was: %s" % (path, str(error)))
self.ids = [0]
self.timeseries[[0, 0, 0]]
self.timestamps = [0]
self.len_ts = 1
return
# todo use the new configurable world options.
dummydata = config['dummy_data'] == "True"
z_transform = config['z_transform'] == "True"
clip_and_scale = config['clip_and_scale'] == "True"
sigmoid = config['sigmoid'] == "True"
self.shuffle = config['shuffle'] == "True"
if clip_and_scale and sigmoid:
self.logger.warn("clip_and_scale and sigmoid cannot both be configured, choosing sigmoid")
clip_and_scale = False
def sigm(X):
""" sigmoid that avoids float overflows for very small inputs.
expects a numpy float array.
"""
cutoff = np.log(np.finfo(X.dtype).max) - 1
X[np.nan_to_num(X) <= -cutoff] = -cutoff
return 1. / (1. + np.exp(-X))
if (z_transform or clip_and_scale or sigmoid) and not dummydata:
data_z = np.empty_like(self.timeseries)
data_z[:] = np.nan
pstds = []
for i, row in enumerate(self.timeseries):
if not np.all(np.isnan(row)):
std = np.sqrt(np.nanvar(row))
if std > 0:
if not clip_and_scale:
row_z = (row - np.nanmean(row)) / std
if clip_and_scale:
row_z = row - np.nanmean(row)
pstd = std * 4
row_z[np.nan_to_num(row_z) > pstd] = pstd
row_z[np.nan_to_num(row_z) < -pstd] = -pstd
row_z = ((row_z / pstd) + 1) * 0.5
data_z[i,:] = row_z
self.timeseries = data_z if not sigmoid else sigm(data_z)
if dummydata:
self.logger.warn("! Using dummy data")
n_ids = self.timeseries.shape[0]
self.timeseries = np.tile(
|
np.random.rand(n_ids,1)
|
numpy.random.rand
|
import numpy as np
from scipy.linalg import logm
from sklearn.base import TransformerMixin
from pyriemann.tangentspace import TangentSpace
class Riemann(TransformerMixin):
def __init__(self, n_fb=9, metric='riemann'):
self.n_fb = n_fb
self.ts = [TangentSpace(metric=metric) for fb in range(n_fb)] # Tangent Space Learning
def fit(self, X, y):
for fb in range(self.n_fb):
self.ts[fb].fit(X[:, fb, :, :])
return self
def transform(self, X):
n_sub, n_fb, p, _ = X.shape
Xout = np.empty((n_sub, n_fb, p*(p+1)//2))
for fb in range(n_fb):
Xout[:, fb, :] = self.ts[fb].transform(X[:, fb, :, :])
return Xout.reshape(n_sub, -1) # (sub, fb * c*(c+1)/2)
class Diag(TransformerMixin):
def __init__(self):
return None
def fit(self, X, y):
return self
def transform(self, X):
n_sub, n_fb, n_compo, _ = X.shape
Xout = np.empty((n_sub, n_fb, n_compo))
for sub in range(n_sub):
for fb in range(n_fb):
Xout[sub, fb] = np.diag(X[sub, fb])
return Xout.reshape(n_sub, -1) # (sub, fb * n_compo)
class LogDiag(TransformerMixin):
def __init__(self):
return None
def fit(self, X, y):
return self
def transform(self, X):
n_sub, n_fb, n_compo, _ = X.shape
Xout = np.empty((n_sub, n_fb, n_compo))
for sub in range(n_sub):
for fb in range(n_fb):
Xout[sub, fb] = np.log10(np.diag(X[sub, fb]))
return Xout.reshape(n_sub, -1) # (sub, fb * n_compo)
class NaiveVec(TransformerMixin):
def __init__(self, method):
self.method = method
return None
def fit(self, X, y):
return self
def transform(self, X):
n_sub, n_fb, n_compo, _ = X.shape
q = int(n_compo * (n_compo+1) / 2)
Xout = np.empty((n_sub, n_fb, q))
for sub in range(n_sub):
for fb in range(n_fb):
if self.method == 'upper':
Xout[sub, fb] = X[sub, fb][np.triu_indices(n_compo)]
elif self.method == 'upperlog':
logmat = logm(X[sub, fb])
Xout[sub, fb] = logmat[np.triu_indices(n_compo)]
elif self.method == 'logdiag+upper':
logdiag = np.log10(np.diag(X[sub, fb]))
upper = X[sub, fb][np.triu_indices(n_compo, k=1)]
Xout[sub, fb] = np.concatenate((logdiag, upper), axis=None)
return Xout.reshape(n_sub, -1) # (sub, fb * c*(c+1)/2)
def to_quotient(C, rank):
d, U = np.linalg.eigh(C)
U = U[:, -rank:]
d = d[-rank:]
Y = U * np.sqrt(d)
return Y
def distance2(S1, S2, rank=None):
Sq = sqrtm(S1, rank)
P = sqrtm(np.dot(Sq,
|
np.dot(S2, Sq)
|
numpy.dot
|
import tempfile
import numpy as np
from numpy.testing import assert_array_equal
import pisces.io.readwaveform as rwf
def test_read_s3():
# 100 big-endian 3-byte samples from an s3 file
BYTS = (b'\x00\x01\x02\x00\x00\xc4\x00\x00}\x00\x00P\xff\xff\x08\xff\xfe\xfd\x00\x00\xa3\x00'
b'\x00\xa6\xff\xfe\xca\x00\x00\xca\x00\x01\x9d\xff\xff\x10\xff\xff5\xff\xffT\xff\xffV'
b'\xff\xff\xce\x00\x00J\x00\x00\xf4\x00\x01g\x00\x01\x7f\x00\x00\x01\xff\xff!\x00\x00='
b'\x00\x00\xb3\x00\x00,\x00\x00c\x00\x00\xb6\x00\x00z\xff\xff\x98\xff\xff.\x00\x00@\x00'
b'\x01\x1f\x00\x00\xc0\x00\x00\x11\x00\x01\x18\x00\x01b\xff\xff\xc9\x00\x00\x14\x00'
b'\x01A\x00\x01q\x00\x00u\xff\xff\xa8\x00\x00\x8a\xff\xff\xe2\xff\xff\x11\x00\x01\r'
b'\x00\x00\x86\xff\xfe\xf3\x00\x00\xd9\x00\x00I\xff\xff1\x00\x01\xdd\x00\x01s\x00\x00Q'
b'\x00\x01\xab\x00\x00\xb0\x00\x00\x1b\x00\x00\xb0\xff\xff\x07\xff\xff\xd8\x00\x01\x8c'
b'\xff\xffh\xff\xffo\x00\x00#\xff\xff\xae\xff\xff\xc5\xff\xff`\x00\x00.\x00\x01\x13'
b'\x00\x00\xb9\xff\xff\xf9\x00\x00(\xff\xff\xc2\xff\xfe\xe9\xff\xff\xca\x00\x01%\x00'
b'\x00\xe3\xff\xffO\xff\xfe\x82\x00\x00\x00\x00\x01\x17\xff\xff\xab\x00\x00A\x00\x00e'
b'\xff\xff\xb4\x00\x00,\x00\x00Q\x00\x01\x80\xff\xfe\xfd\xff\xfd\xb0\xff\xff\xad\x00'
b'\x00\x10\x00\x01\xe8\x00\x01\x15\xff\xff\xcf\x00\x00\xde\x00\x00\x9f\xff\xff\x85\xff'
b'\xff5\x00\x01\x8f')
# 100 corresponding values from the s3 file
data = np.array([258, 196, 125, 80, -248, -259, 163, 166, -310, 202, 413, -240, -203, -172,
-170, -50, 74, 244, 359, 383, 1, -223, 61, 179, 44, 99, 182, 122, -104, -210,
64, 287, 192, 17, 280, 354, -55, 20, 321, 369, 117, -88, 138, -30, -239, 269,
134, -269, 217, 73, -207, 477, 371, 81, 427, 176, 27, 176, -249, -40, 396,
-152, -145, 35, -82, -59, -160, 46, 275, 185, -7, 40, -62, -279, -54, 293,
227, -177, -382, 0, 279, -85, 65, 101, -76, 44, 81, 384, -259, -592, -83, 16,
488, 277, -49, 222, 159, -123, -203, 399], dtype=np.int32)
with tempfile.SpooledTemporaryFile() as f:
f.write(BYTS)
f.seek(0)
s3 = rwf.read_s3(f, 0, 100)
|
assert_array_equal(s3, data)
|
numpy.testing.assert_array_equal
|
import numpy as np
from sklearn.datasets import *
import matplotlib.pyplot as plt
import seaborn as sns
seapal = sns.color_palette('Paired')
sns.set_style('ticks')
from IPython import embed
def f(x, w):
if len(w) == 2:
y = -w[0]/w[1] *
|
np.array(x)
|
numpy.array
|
#!/usr/bin/env python
# coding: utf-8
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
from matplotlib import pyplot as plt
from scipy.linalg import block_diag
from iam.all_three_layers import IAM, features_binary, alphabet, corpus, FeatureLayer, feature_count, Connection
iam = IAM()
# # Present the ambiguous input
# We need to present features consistent with both 'WORK' and 'WORR'
# Features that are both in K and R
rk_common_features = [min(K_f, K_r) for (K_f, K_r) in zip(features_binary['K'], features_binary['R'])]
rk_common_features
# All features
ambiguous_input = np.array([
features_binary['W'],
features_binary['O'],
features_binary['R'],
rk_common_features
])
ambiguous_input
def present_ambiguous_input():
iam.feature_layer.activations = ambiguous_input.astype(float)
# # Test
def get_letter_activation(position, letter):
return iam.letter_layer.activations[position, alphabet.index(letter)]
def get_word_activation(word):
word_index = corpus.word.tolist().index(word.lower())
return iam.word_layer.activations[word_index]
def take_snapshot():
for letter, activation_list in letter_activations_history.items():
activation_list.append(get_letter_activation(position=3, letter=letter))
for word, activation_list in word_activations_history.items():
activation_list.append(get_word_activation(word))
letter_activations_history = dict(K=[], R = [], D = [])
word_activations_history = dict(WORK=[], WORD=[], WEAK=[], WEAR=[])
iam.reset_nodes()
present_ambiguous_input()
take_snapshot()
for _ in range(40):
iam.run_cycle()
take_snapshot()
plt.figure(figsize=(10, 6))
plt.plot(np.array(list(letter_activations_history.values())).T)
plt.legend(list(letter_activations_history.keys()), loc='upper left')
plt.grid()
plt.yticks(np.arange(-0.2, 1.1, 0.1));
# 
# - In our simulation, "D" gets uninhibited, in the article, "D" stayst at -0.2
# - In our simulation, "R" gets a bit activated (~0.1) and then decays towards 0, in the article, "R" grows steadily towards ~0.35
plt.figure(figsize=(10, 6))
plt.plot(np.array(list(word_activations_history.values())).T)
plt.legend(list(word_activations_history.keys()), loc='upper left')
plt.grid()
plt.yticks(np.arange(-0.2, 1.1, 0.1));
# 
# - "WORD" get less activate at the peak (~0.03) than in the article (~0.1).
# - "WEAK" and "WEAR" plateau later in our simulation.
# # Absence detectors
# There are two main differences between our implementation and the one from the article:
# - we don't have absence detectors,
# - word resting levels do not depend on the frequency.
#
# More likely, it is the absence detectors that drive the difference in letter activations.
# There are fewer feature nodes to excite "R" (absence detectors of features absent in both "R" and "K") and inhibit "D" (features absent in "R" and "K" but present in "D").
class AbsenceDetectorLayer(FeatureLayer):
def present_word(self, word):
"""Show a word to the model"""
features_absent = 1 -
|
np.array([features_binary[letter] for letter in word])
|
numpy.array
|
import torch
import numpy
import bionetwork
import plotting
import pandas
import saveSimulations
import matplotlib.pyplot as plt
import copy
import seaborn as sns
#Load network
networkList, nodeNames, modeOfAction = bionetwork.loadNetwork('data/KEGGnet-Model.tsv')
annotation = pandas.read_csv('data/KEGGnet-Annotation.tsv', sep='\t')
uniprot2gene = dict(zip(annotation['code'], annotation['name']))
bionetParams = bionetwork.trainingParameters(iterations = 60, clipping=5, leak=0.01, spectralTarget=0.9)
spectralCapacity = numpy.exp(numpy.log(1e-2)/bionetParams['iterations'])
inputAmplitude = 3
projectionAmplitude = 1.2
inName = annotation.loc[annotation['ligand'],'code'].values
outName = annotation.loc[annotation['TF'],'code'].values
inName = numpy.intersect1d(nodeNames, inName)
outName = numpy.intersect1d(nodeNames, outName)
outNameGene = [uniprot2gene[x] for x in outName]
nodeNameGene = [uniprot2gene[x] for x in nodeNames]
parameterizedModel = bionetwork.model(networkList, nodeNames, modeOfAction, inputAmplitude, projectionAmplitude, inName, outName, bionetParams)
parameterizedModel = bionetwork.loadParam('synthNetScreen/equationParams.txt', parameterizedModel, nodeNames)
#Generate data
def generateData(parameterizedModel, N = 500, simultaniousInput = 5):
numberOfInputs = parameterizedModel.inputLayer.weights.shape[0]
X = torch.zeros(N, len(inName), dtype=torch.double)
for i in range(1, N): #skip 0 to include a ctrl sample i.e. zero input
X[i, (i-1) % numberOfInputs] = torch.rand(1, dtype=torch.double) #stimulate each receptor at least once
X[i, numpy.random.randint(0, numberOfInputs, simultaniousInput-1)] = torch.rand(simultaniousInput-1, dtype=torch.double)
Y, YfullRef = parameterizedModel(X)
Y = Y.detach()
X = X.detach()
YfullRef = YfullRef.detach()
return X, Y, YfullRef
criterion1 = torch.nn.MSELoss(reduction='mean')
# X, Y, YfullRef = generateData(parameterizedModel, 500)
# importantWeights = numpy.zeros(parameterizedModel.network.weights.shape[0])
# referenceWeights = parameterizedModel.network.weights.detach().clone()
# for i in range(len(importantWeights)):
# #zero out weight
# parameterizedModel.network.weights.data[i] = 0
# Yhat, Yfull = parameterizedModel(X)
# importantWeights[i] = criterion1(Yhat, Y).item()
# #reset weight
# parameterizedModel.network.weights.data[i] = referenceWeights[i]
# print(i, importantWeights[i])
#importantWeights = importantWeights[0:10]
#selectedWeights = numpy.flip(numpy.argsort(importantWeights))
#selectedWeights = selectedWeights[0:10]
selectedWeights = numpy.array([163, 607, 29, 80, 375, 793, 760, 370, 274, 276], dtype=int)
groundTruth = networkList[:, selectedWeights[0]]
print(numpy.array(nodeNameGene)[groundTruth])
#Identify consistently important interactions
# def getImportantNodes(parameterizedModel, networkList):
# totalSamples = 100
# totalN = 500
# countWeights = numpy.zeros(parameterizedModel.network.weights.shape[0])
# topWeights = 10
# for i in range(totalSamples):
# X, Y, YfullRef = generateData(parameterizedModel, totalN)
# variabilityOfProteins = torch.std(YfullRef, axis=0)
# jointVariability = variabilityOfProteins[networkList[0]] * variabilityOfProteins[networkList[1]]
# variabilityAndWeight = jointVariability * model.network.weights
# selectedWeights = numpy.flip(numpy.argsort(numpy.abs(variabilityAndWeight.detach().numpy())))
# selectedWeights = selectedWeights[0:topWeights]
# countWeights[selectedWeights] += 1
# return countWeights
# weightImportance = getImportantNodes(parameterizedModel, networkList)
#print(weightImportance)
N = 400
X, Y, YfullRef = generateData(parameterizedModel, N)
Xtest, Ytest, YfullRef = generateData(parameterizedModel, N)
#%%
def executeErrorModel(model2, errorModel, dataIn, stateIn):
dataError = errorModel(stateIn)
#dataError = errorModel(dataIn)
Yin = model2.inputLayer(dataIn)
Yin = Yin + dataError
YhatFull = model2.network(Yin)
Yhat = model2.projectionLayer(YhatFull)
return Yhat, YhatFull, dataError
model2 = copy.deepcopy(parameterizedModel)
model2.network.weights.data[selectedWeights[0]] = 0
latentSize = 200
batchSize = 50
maxIter = 2000
L1 = 1e-4
Yhat, referenceState = model2(Xtest)
Yhat, referenceStateTest = model2(Xtest)
referenceState = referenceState.detach()
referenceStateTest = referenceStateTest.detach()
baseLine = criterion1(Yhat, Ytest)
#Define network:s
# errorModel = torch.nn.Sequential(*[torch.nn.Linear(len(inName), latentSize, bias=True),
# torch.nn.LeakyReLU(),
# torch.nn.Linear(latentSize, latentSize, bias=True),
# torch.nn.LeakyReLU(),
# torch.nn.Linear(latentSize, latentSize, bias=True),
# torch.nn.LeakyReLU(),
# torch.nn.Linear(latentSize, latentSize, bias=True),
# torch.nn.LeakyReLU(),
# torch.nn.Linear(latentSize, len(nodeNames), bias=True)])
# errorModel = errorModel.double()
errorModel = torch.nn.Sequential(*[torch.nn.Linear(referenceState.shape[1], latentSize, bias=True),
torch.nn.LeakyReLU(),
torch.nn.Linear(latentSize, latentSize, bias=True),
torch.nn.LeakyReLU(),
torch.nn.Linear(latentSize, latentSize, bias=True),
torch.nn.LeakyReLU(),
torch.nn.Linear(latentSize, latentSize, bias=True),
torch.nn.LeakyReLU(),
torch.nn.Linear(latentSize, len(nodeNames), bias=True)])
errorModel = errorModel.double()
#Setup optimizer
optimizer = torch.optim.Adam(errorModel.parameters(), lr=1, weight_decay=1e-3)
resetState = optimizer.state.copy()
#Evaluate network
stats = plotting.initProgressObject(maxIter)
e=0
for e in range(e, maxIter):
curLr = bionetwork.oneCycle(e, maxIter, maxHeight = 1e-4, minHeight = 1e-6, peak = 500)
optimizer.param_groups[0]['lr'] = curLr
curLoss = []
trainloader = bionetwork.getSamples(N, batchSize)
for dataIndex in trainloader:
optimizer.zero_grad()
dataIn = X[dataIndex, :].view(len(dataIndex), X.shape[1])
dataOut = Y[dataIndex, :].view(len(dataIndex), Y.shape[1])
stateIn = referenceState[dataIndex, :].view(len(dataIndex), referenceState.shape[1])
Yhat, YhatFull, dataError = executeErrorModel(model2, errorModel, dataIn, stateIn)
errorSparsity = L1 * torch.sum(torch.abs(dataError))
fitLoss = criterion1(Yhat, dataOut)
loss = fitLoss + errorSparsity
loss.backward()
optimizer.step()
curLoss.append(fitLoss.item())
stats = plotting.storeProgress(stats, e, loss=curLoss, lr=curLr)
if (e % 200 == 0 and e > 0 and e < maxIter*0.5):
optimizer.state = resetState.copy()
if e % 50 == 0:
Yhat, YhatFull, dataError = executeErrorModel(model2, errorModel, Xtest, referenceStateTest)
fitLoss = criterion1(Ytest, Yhat)
stats['test'][e] = fitLoss.item()
plotting.printStats(e, stats)
stats = plotting.finishProgress(stats)
#%%
plt.rcParams["figure.figsize"] = (4,4)
plt.figure()
T = numpy.array(range(stats['loss'].shape[0]))
plotting.shadePlot(T, stats['loss'], stats['lossSTD'])
nanFilter = numpy.isnan(stats['test'])==False
plt.plot(T[nanFilter], plotting.movingaverage(stats['test'][nanFilter], 5))
#plt.plot([0, len(T)], numpy.array([1, 1])*mLoss.item(), 'black', linestyle='--')
plt.plot([0, len(T)], numpy.array([1, 1])*baseLine.item(), 'red', linestyle='--')
plt.xlim([0, len(T)])
plt.ylim(bottom=1e-6)
plt.yscale('log')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(numpy.array(['Train', 'Test', 'Mean']), frameon=False)
plt.figure()
Yhat, YhatFull, dataError = executeErrorModel(model2, errorModel, X, referenceState)
YtestHat, YtestHatFull, dataError = executeErrorModel(model2, errorModel, Xtest, referenceStateTest)
plotting.plotComparison(Yhat, Y, YtestHat, Ytest)
plt.figure()
nodeNameGeneArray = numpy.array(nodeNameGene, dtype=object)
dataErrorValues = dataError.detach().numpy()
modelValues = YhatFull.detach().numpy()
# jointValues = numpy.concatenate((modelValues, dataErrorValues), axis=1)
# dataNames = 'Error_' + nodeNameGeneArray
# modelNames = 'Model_' + nodeNameGeneArray
# jointNames = numpy.concatenate((modelNames, dataNames), axis=0)
topN = 5
meanAbsActivity = numpy.mean(numpy.abs(dataErrorValues), axis=0)
candidatesTarget = numpy.flip(numpy.argsort(meanAbsActivity))[0:topN]
df = pandas.DataFrame(meanAbsActivity, index=nodeNameGeneArray, columns=['Mean activity'])
df = df.iloc[candidatesTarget,:]
sns.barplot(data=df.T)
plt.ylabel('Mean abs activity')
plt.savefig("figures/missingInteractions/posthocSource.svg")
#%%
def sensitivityAnalysis(model, errorModel, dataIn, referenceState, selectedNode):
upValue = 0.1
dataError = errorModel(referenceState)
Yin = model.inputLayer(dataIn)
Yin = Yin + dataError
YinUp = Yin.clone()
YinUp[:,selectedNode] = YinUp[:,selectedNode] + upValue
ctrl = model.network(Yin)
up = model.network(YinUp)
sensitivity = (up-ctrl)/(1 + upValue)
return sensitivity
correlatedNodes = numpy.corrcoef(dataErrorValues[:,candidatesTarget[0]], y=modelValues.T)
correlatedNodes = correlatedNodes[0,1:]
correlatedNodes[candidatesTarget[0]] = 0
candidatesSources = numpy.flip(numpy.argsort(numpy.abs(correlatedNodes)))[0:20]
# plt.figure()
# df = pandas.DataFrame(numpy.abs(correlatedNodes[candidatesSources]), index=nodeNameGeneArray[candidatesSources], columns=['Correlation'])
# sns.barplot(data=df.T, orient='h')
# plt.xlabel('abs correlation')
plt.figure()
#sensitivityAnalysis
sensitivity = sensitivityAnalysis(model2, errorModel, X, referenceState, candidatesTarget[0])
meanAbsSensitivity = numpy.mean(numpy.abs(sensitivity.detach().numpy()), axis=0)
sensitiveNodes = numpy.argwhere(meanAbsSensitivity>1e-2).flatten()
insensitiveCandididates = candidatesSources[numpy.isin(candidatesSources, sensitiveNodes)==False]
plt.figure()
df = pandas.DataFrame(numpy.abs(correlatedNodes), index=nodeNameGeneArray, columns=['Correlation'])
df = df.iloc[insensitiveCandididates,:]
sns.barplot(data=df.T, orient='h')
plt.xlabel('abs correlation')
plt.savefig("figures/missingInteractions/posthocTarget.svg")
# correlationStructure[numpy.isnan(correlationStructure)] = 0
# numpy.fill_diagonal(correlationStructure, 0)
# #correlationStructure[0:len(dataNames), 0:len(dataNames)] = 0 #ignore model-model correlations
# df = pandas.DataFrame(correlationStructure[0:len(dataNames), :], index=dataNames, columns=jointNames)
# valueRange= numpy.max(numpy.abs(df.values))
# #tresh =
# tresh = 1e-4 * valueRange
# df = df.loc[numpy.mean(numpy.abs(df))>tresh,:]
# df = df.loc[:,numpy.mean(numpy.abs(df), axis=0)>tresh]
# sns.clustermap(df, cmap='RdBu_r', vmin=-valueRange, vmax=valueRange)
# correlationStructure = numpy.cov(jointValues.T)
# correlationStructure[numpy.isnan(correlationStructure)] = 0
# numpy.fill_diagonal(correlationStructure, 0)
# #correlationStructure[0:len(dataNames), 0:len(dataNames)] = 0 #ignore model-model correlations
# df = pandas.DataFrame(correlationStructure[0:len(dataNames), :], index=dataNames, columns=jointNames)
# valueRange= numpy.max(numpy.abs(df.values))
# #tresh =
# tresh = 1e-4 * valueRange
# df = df.loc[numpy.mean(numpy.abs(df))>tresh,:]
# df = df.loc[:,numpy.mean(numpy.abs(df), axis=0)>tresh]
# sns.clustermap(df, cmap='RdBu_r', vmin=-valueRange, vmax=valueRange)
# groundTruth = networkList[:, selectedWeights[0]]
# print(nodeNameGeneArray[groundTruth])
#df = pandas.DataFrame(dataErrorValues, columns=nodeNameGene)
#sns.clustermap(df, cmap='RdBu_r', vmin=0, vmax=1)
# correlationStructure = numpy.cov(dataErrorValues.T)
# correlationStructure[numpy.isnan(correlationStructure)] = 0
# numpy.fill_diagonal(correlationStructure, 0)
# df = pandas.DataFrame(correlationStructure, columns=nodeNameGene, index=nodeNameGene)
# valueRange= numpy.max(numpy.abs(correlationStructure))
# tresh = valueRange
# df = df.loc[numpy.sum(numpy.abs(df))>tresh,:]
# df = df.loc[:,numpy.sum(numpy.abs(df), axis=0)>tresh]
# sns.clustermap(df, cmap='RdBu_r', vmin=-valueRange, vmax=valueRange)
#%%
def executeErrorModel(model2, errorModel, dataIn, noiseLevel):
dataError = errorModel(dataIn)
Yin = model2.inputLayer(dataIn)
Yin = Yin + dataError
Yin = Yin + noiseLevel * torch.randn(Yin.shape)
YhatFull = model2.network(Yin)
Yhat = model2.projectionLayer(YhatFull)
return Yhat, YhatFull, dataError
#Setup optimizer
batchSize = 50
MoAFactor = 0.1
maxIter = 3000
L1 = 1e-4 #for the sparsity of signal inputation from the fully conected layer
L2 =1e-8 #for model
spectralFactor = 1e-3
latentSize = 200
noiseLevel = 1e-3
model = bionetwork.model(networkList, nodeNames, modeOfAction, inputAmplitude, projectionAmplitude, inName, outName, bionetParams, 'MML', torch.double)
model.inputLayer.weights.requires_grad = False
model.projectionLayer.weights.requires_grad = False
#model.network.balanceWeights()
model.network.preScaleWeights(0.7)
errorModel = torch.nn.Sequential(*[torch.nn.Linear(X.shape[1], latentSize, bias=True),
torch.nn.LeakyReLU(),
torch.nn.Linear(latentSize, latentSize, bias=True),
torch.nn.LeakyReLU(),
torch.nn.Linear(latentSize, latentSize, bias=True),
torch.nn.LeakyReLU(),
torch.nn.Linear(latentSize, latentSize, bias=True),
torch.nn.LeakyReLU(),
torch.nn.Linear(latentSize, len(nodeNames), bias=True)])
errorModel = errorModel.double()
nodeDegreOut = numpy.sum(numpy.array(model.network.A.todense() != 0), axis=0)
nodeDegreOut = torch.tensor(nodeDegreOut) + 1
criterion1 = torch.nn.MSELoss(reduction='mean')
optimizer1 = torch.optim.Adam(model.parameters(), lr=1, weight_decay=0)
optimizer2 = torch.optim.Adam(errorModel.parameters(), lr=1e-4, weight_decay=1e-3)
resetState1 = optimizer1.state.copy()
resetState2 = optimizer2.state.copy()
mLoss = criterion1(torch.mean(Y, dim=0)*torch.ones(Y.shape), Y)
print(mLoss)
stats = plotting.initProgressObject(maxIter)
curState = torch.rand((N, model.network.bias.shape[0]), dtype=torch.double, requires_grad=False)
e = 0
for e in range(e, maxIter):
curLr = bionetwork.oneCycle(e, maxIter, maxHeight = 1e-3, startHeight=1e-4, endHeight=1e-6, peak = 1000)
optimizer1.param_groups[0]['lr'] = curLr
curLoss = []
curEig = []
trainloader = bionetwork.getSamples(N, batchSize) #max(10, round(N * e/maxIter)
for dataIndex in trainloader:
model.network.weights.data[selectedWeights[0]] = 0 #simulate missing interaction
dataIn = X[dataIndex, :].view(len(dataIndex), X.shape[1])
dataOut = Y[dataIndex, :].view(len(dataIndex), Y.shape[1])
optimizer1.zero_grad()
optimizer2.zero_grad()
Yhat, YhatFull, dataError = executeErrorModel(model, errorModel, dataIn, noiseLevel)
curState[dataIndex, :] = YhatFull.detach()
fitLoss = criterion1(dataOut, Yhat)
signConstraint = MoAFactor * torch.sum(torch.abs(model.network.weights[model.network.getViolations(model.network.weights)]))
errorSparsity = L1 * torch.sum(nodeDegreOut * torch.sum(torch.abs(dataError), axis=0))
#stateLoss = 1e-5 * bionetwork.uniformLoss(curState, dataIndex, YhatFull, maxConstraintFactor = 1, targetMax = 1/projectionAmplitude)
stateLoss = 1e-5 * bionetwork.uniformLossBatch(YhatFull, maxConstraintFactor = 1, targetMax = 1/projectionAmplitude)
biasLoss = L2 * torch.sum(torch.square(model.network.bias))
#absFilter = torch.abs(model.network.weights.detach())>0.001
#weightLoss = L2 * torch.sum(torch.square(model.network.weights[absFilter]))
#weightLoss = L2 * (torch.sum(torch.square(model.network.weights)) + torch.sum(1/(torch.square(model.network.weights) + 0.5)))
weightLoss = L2 * torch.sum(torch.square(model.network.weights))
spectralRadiusLoss, spectralRadius = bionetwork.spectralLoss(model.network, YhatFull.detach(), model.network.weights, expFactor = 10)
spectralRadiusLoss = spectralFactor * spectralRadiusLoss
ligandConstraint = 1e-4 * torch.sum(torch.square(model.network.bias[model.inputLayer.nodeOrder,:]))
loss = fitLoss + signConstraint + biasLoss + weightLoss + stateLoss + spectralRadiusLoss + ligandConstraint + errorSparsity
loss.backward()
optimizer1.step()
optimizer2.step()
model.network.weights.data[selectedWeights[0]] = 0 #simulate missing interaction
curEig.append(spectralRadius.item())
curLoss.append(fitLoss.item())
stats = plotting.storeProgress(stats, e, curLoss, curEig, curLr, violations=torch.sum(model.network.getViolations(model.network.weights)).item())
if e % 50 == 0:
model.eval()
Yhat, YhatFull, dataError = executeErrorModel(model, errorModel, Xtest, 0)
Yhat, YhatFull = model(Xtest)
fitLoss = criterion1(Ytest, Yhat)
stats['test'][e] = fitLoss.item()
plotting.printStats(e, stats)
if e % 200 == 0 and e > 0:
optimizer1.state = resetState1.copy()
optimizer2.state = resetState2.copy()
stats = plotting.finishProgress(stats)
#%%
plt.rcParams["figure.figsize"] = (5,5)
plt.figure()
T = numpy.array(range(stats['loss'].shape[0]))
plotting.shadePlot(T, stats['loss'], stats['lossSTD'])
nanFilter = numpy.isnan(stats['test'])==False
plt.plot(T[nanFilter], stats['test'][nanFilter])
plt.plot([0, len(T)], numpy.array([1, 1])*mLoss.item(), 'black', linestyle='--')
plt.xlim([0, len(T)])
plt.ylim(bottom=1e-6)
plt.yscale('log')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(numpy.array(['Train', 'Test', 'Mean']), frameon=False)
Yhat, _ = model(X)
modelPerformance = criterion1(Yhat, Y)
deltaWeight = model.network.weights - parameterizedModel.network.weights
deltaWeight[selectedWeights[0]] = 0 #Ignore the weight we changed
deltaBias= model.network.bias - parameterizedModel.network.bias
treshWeight = 1e-1
treshBias = 4e-2
differentialWeights = numpy.abs(deltaWeight.detach().numpy())>treshWeight
differentialBias = numpy.abs(deltaBias.detach().numpy().flatten())>treshBias
print(
|
numpy.array(nodeNameGene)
|
numpy.array
|
#
# Copyright (c) 2020 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, <EMAIL>.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
"""
Solver module, that provides customized solvers for the global problem
"""
from amfeti.config_base import ConfigBase
import logging
import numpy as np
import time
from copy import copy
from scipy.sparse import csr_matrix, hstack, vstack
from scipy.sparse.linalg import spsolve
__all__ = ['PCPGsolver',
'GMRESsolver',
'ORTHOMINsolver']
class GlobalSolverBase(ConfigBase):
def __init__(self):
super().__init__()
def solve(self, *args):
return None
class PCPGsolver(GlobalSolverBase):
"""
Preconditioned Conjugate Projected Gradient-iterative solver, that is usually used to solve the linear global
problem. This solver is an extension of the well-known iterative Conjugate Gradient methods by a preconditioner and
a nullspace-projection for singular problems. Moreover, this solver supports full reorthogonalization, which is able
to improve convergence, if F-orthogonality degrades during the iterations.
References
----------
[1] <NAME> and <NAME> (1999): A simple and efficient extension of a class of substructure based
preconditioners to heterogeneous structural mechanics problems. International Journal for Numerical Methods in
Engineering 44 489--516.
[2] <NAME> and <NAME> (1994): Implicit parallel processing in structural mechanics. Computational Mechanics
Advances 2 1--124.
[3] <NAME>, <NAME>, <NAME> (2018): Recycling of solution spaces in multipreconditioned FETI methods
applied to structural dynamics. International Journal of Numerical Methods in Engineering 116 141--160
doi:10.1002/nme.5918.
Attributes
----------
_config_dict : dict
configuration dictionary
"""
def __init__(self):
"""
Parameters
----------
None
"""
super().__init__()
self._config_dict = {'tolerance': 1e-7,
'max_iter': None,
'projection': None,
'precondition': None,
'energy_norm': False,
'save_history': False,
'full_reorthogonalization': False}
def solve(self, F_callback, residual_callback, lambda_init):
"""
Solve-method of the PCPG-method
Parameters
----------
F_callback : callable
method, that applies the solution-vector on the system-matrix F and returns the result
residual_callback : callable
method, that calculates and return the system's residual from the solution-vector
lambda_init : ndarray
initial guess for the solution
Returns
-------
lambda_sol : ndarray
calculated solution
info_dict : dict
general information on the solution-process
"""
logger = logging.getLogger(__name__)
interface_size = len(lambda_init)
if self._config_dict['max_iter'] is None:
self._config_dict['max_iter'] = int(1 * interface_size)
logger.info('Setting PCPG tolerance = %4.2e' % self._config_dict['tolerance'])
logger.info('Setting PCPG max number of iterations = %i' % self._config_dict['max_iter'])
# initialize variables
info_dict = {}
global_start_time = time.time()
residual_hist = np.array([])
lambda_hist = np.array([])
if self._config_dict['full_reorthogonalization']:
Y = dict()
Q = dict()
lambda_sol = np.zeros_like(lambda_init)
rk = residual_callback(lambda_init)
k = 0
for k in range(self._config_dict['max_iter']):
wk = self._project(rk)
zk = self._precondition(wk)
yk = self._project(zk)
if self._config_dict['full_reorthogonalization']:
yk1 = yk
for i in range(k):
yki = Y[i]
qki = Q[i]
yk -= np.dot(qki, yk1) / np.dot(qki, yki) * yki
elif k > 0:
yk -= np.dot(qk_1, yk) /
|
np.dot(qk_1, yk_1)
|
numpy.dot
|
import numpy as np
from collections import OrderedDict
import random
from ..utils.sfr_renumber import Topology
from ..utils import gsflow_io
from . import Defaults
from flopy.modflow import ModflowSfr2
import shapefile
import inspect
class FlowAccumulation(object):
"""
Class to perform flow accumulation on a DEM (raster) or resampled to
a model grid
Parameters
----------
data : np.ndarray
two dimensional numpy array of dem data
xcenters : np.ndarray
a two dimensional array of x coordinate cell centers
ycenters : np.ndarray
two dimensional array of y coordinate cell centers
acc_type : str
flow accumulation type, currently supported options are "d8"
hru_type : np.array
optional numpy array of hru type numbers that can be used as
a masking array to exclude lakes, swales, etc...
0 == inactive
1 == land (included in flow accumulation)
2 == lake (excluded from flow accumulation)
3 == swale (excluded from flow accumulation)
closed_basin : bool
method to indicate that basin is closed without considering lake
flow. If true hru_type 2 is used in the flow direction calculations.
False ignores hru_type 2.
flow_dir_array : np.ndarray
previously calculated flow direction array of dimension nrow, ncol
that can be supplied to FlowAccumulation
verbose : bool
flag to print verbose output
"""
def __init__(
self,
data,
xcenters,
ycenters,
acc_type="d8",
hru_type=None,
closed_basin=False,
flow_dir_array=None,
verbose=False,
):
self._defaults = Defaults().to_dict()
# set the fa type
self._acc_type = acc_type
# flow directions vectors set up as top to bot, left to right
self._d8_vectors = np.array([32, 64, 128, 16, 1, 8, 4, 2], dtype=int)
self._quiver_u = {
32: -1,
64: 0,
128: 1,
16: -1,
1: 1,
8: -1,
4: 0,
2: 1,
-1: np.nan,
-2: np.nan,
}
self._quiver_v = {
32: 1,
64: 1,
128: 1,
16: 0,
1: 0,
8: -1,
4: -1,
2: -1,
-1: np.nan,
-2: np.nan,
}
self._inner_idx = []
self._graph = OrderedDict()
self._visited = []
self._stack = []
self._dest = None
self._solved_flats = {}
# adds an extra row and col of data
data, xcenters, ycenters, hru_type = self._buffer_data(
data, xcenters, ycenters, hru_type
)
self._offset = data.shape[1]
self._offsets = np.array(
[
(-1 * self._offset) - 1,
(-1 * self._offset),
(-1 * self._offset) + 1,
-1,
1,
self._offset - 1,
self._offset,
self._offset + 1,
]
)
self._shape = data.shape
self._data = data.ravel()
self._xcenters = xcenters.ravel()
self._ycenters = ycenters.ravel()
self._hru_type = hru_type.ravel()
self._flow_directions = np.ones((self._data.size,)) * -1
self._closed_basin = closed_basin
if flow_dir_array is None:
self._flow_directions = np.ones((self._data.size,)) * -1
self._closed_basin = closed_basin
if self._closed_basin:
self._cb_inner_idx = []
self._cb_flow_directions = np.ones((self._data.size,)) * -1
else:
if self._closed_basin:
err = (
"Supplying flow direction array not supported "
"for closed basin."
)
raise Exception(err)
self._flow_directions = np.pad(
flow_dir_array, 1, "constant", constant_values=-1
).ravel()
self._set_inners()
# for flow accumulation
self._dir_coords = {
32: (-1, -1),
64: (-1, 0),
128: (-1, 1),
16: (0, -1),
1: (0, 1),
8: (1, -1),
4: (1, 0),
2: (1, 1),
}
# for watersheds
self._d8_vectors_r = np.array(
list(reversed(self._d8_vectors)), dtype=int
)
self._offset_dict = {
32: self._offsets[0],
64: self._offsets[1],
128: self._offsets[2],
16: self._offsets[3],
1: self._offsets[4],
8: self._offsets[5],
4: self._offsets[6],
2: self._offsets[7],
}
self._size = self._data.size
self._dijkstra = False
self.verbose = verbose
self._wpp = None
def get_dem_data(self):
"""
Method to get processed DEM data after flow accumulation
Returns
-------
np.ndarray
"""
return np.reshape(self._data, self._shape)[1:-1, 1:-1]
def get_hru_type(self):
return np.reshape(self._hru_type, self._shape)[1:-1, 1:-1]
def flow_directions(self, dijkstra=False, breach=0.0):
"""
Method to get flow directions array
Parameters
----------
dijkstra : bool
method to use a modified dijkstra algorithmn to solve
tricky flat areas. Default is False and uses a distance based
topology method to solve flat areas.
breach : float
absolute value of breaching tolerance for digital dams. Use
caution while applying breaching values. These should be small
numbers.
Returns
-------
np.ndarray of flow directions
"""
self._dijkstra = dijkstra
if np.all(self._flow_directions == -1):
# builds inner indices
if self._closed_basin:
self._set_inners(cb=True)
else:
self._set_inners()
# fills pits in DEM
self._fill_pits()
self._d8_flow_directions(breach)
# reset the inners after performing the closed basin calculation
if self._closed_basin:
self._cb_inner_idx = list(np.copy(self._inner_idx))
self._inner_idx = []
self._set_inners()
if self._closed_basin:
self._cb_flow_directions = np.copy(self._flow_directions)
self._flow_directions[self._hru_type == 2] = -1
flow_directions = np.copy(self._flow_directions)
flow_directions.shape = self._shape
return flow_directions[1:-1, 1:-1]
@property
def get_vectors(self):
"""
Method to get flow vectors array
Returns
-------
(u, v): tuple (np.ndarray of flow vectors)
"""
flow_direction = self.flow_directions()
u =
|
np.zeros(flow_direction.shape)
|
numpy.zeros
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 27 11:00:37 2018
Script to fit SHG interference data.
The procedure fits the data twice, first with the period free, and then with
the period fixed at the average of the first runs.
@author: <NAME>
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import math
#name for file
fileout = '101618fitsHemi2.txt'
#names of each fit
names = ['hemi2pure1a','hemi2pure1b','hemi2salt1a','hemi2salt1b',\
'hemi2pure2a','hemi2pure2b','hemi2salt2a','hemi2salt2b',\
'hemi2pure3a','hemi2pure3b']
#indices to import if you have multiple files named phaseMeasureX where x is
#some number, enter the first and last indices that you want to import.
startNum = 16
endNum = 25
#open file for writing to
f = open(fileout,'w+')
#initialize data frames to hold data
countsA = pd.DataFrame()
countsB = pd.DataFrame()
pos = pd.DataFrame()
#go through each file
for i in range(endNum-startNum+1):
#names of each file
filename = 'phaseMeasure' + str(i+startNum) + '.txt'
#import countsA (signal),countsB (dark counts), and pos (stage position)
countsA[names[i]] = pd.read_csv(filename,sep='\t')['countsA']
countsB[names[i]] = pd.read_csv(filename,sep='\t')['countsB']
pos[names[i]] = pd.read_csv(filename,sep='\t')['stage']
del endNum,startNum
#function to find the av
def findAverage(series):
#set number of points per position here
interval = 20
reshaped = np.reshape(series.values,(int(len(series.values)/interval),interval))
return pd.Series(np.mean(reshaped,1))
#apply function to raw data to get average data
aveCountsA = countsA.apply(findAverage,axis=0)
aveCountsB = countsB.apply(findAverage,axis=0)
pos = pos.apply(findAverage,axis=0)
del countsA,countsB
#sort each
for column in pos.columns:
#create temp dataframe
df = pd.DataFrame()
#import data frome one run into temp data frame
df['countsA'] = aveCountsA[column]
df['countsB'] = aveCountsB[column]
df['pos'] = pos[column]
#sort this dataframe
sdf = df.sort_values('pos')
#put the sorted data back
aveCountsA[column] = sdf['countsA'].values
aveCountsB[column] = sdf['countsB'].values
pos[column] = sdf['pos'].values
del df,sdf, column
#dataframe with actual counts, corrected for dark counts
counts = aveCountsA.sub(aveCountsB)
del aveCountsA,aveCountsB
#define fit func, same as IGOR
def sinFunc(x, y0, A, f, phi):
return y0 + A*np.sin(f*x + phi)
#DO INITIAL FITTING WITH PERIODS FREE
#x values from which to plot fit function
xvalues = np.linspace(0,99.7,1000)
#write header for initial fits to file
f.write('Initial Fits\n')
f.write('Name\ty0\tA\tf\tphi\n')
#array to store the frequencies from each fit, in order to then find the average
fVals = np.array([])
#go through each column in dataframe
for column in counts.columns:
#calculate guesses for fit func
y0guess = np.mean(counts[column])
Aguess = (np.amax(counts[column])-np.amin(counts[column]))/2
fguess = 0.05;
phiguess = 0;
guesses = [y0guess,Aguess,fguess,phiguess]
#fit it
popt, pcov = curve_fit(sinFunc,pos[column],
counts[column],p0=guesses)
#calculate standard error
pstd = np.sqrt(np.diag(pcov))
# plt.figure()
# plt.plot(pos[column],counts[column],'.')
# yvalues = sinFunc(xvalues,popt[0],popt[1],popt[2],popt[3])
# plt.plot(xvalues,yvalues)
# plt.title('First' + column)
#write to file
f.write(column + '\t' +'%.1f'%popt[0] +'+-'+'%.1f'%pstd[0]+
'\t'+'%.1f'%popt[1]+'+-'+'%.1f'%pstd[1]+
'\t'+'%.4f'%popt[2]+'+-'+'%.4f'%pstd[2]+
'\t'+'%.3f'%popt[3]+'+-'+'%.3f'%pstd[3]+'\n')
fVals = np.append(fVals,popt[2])
#calculate average of f values, write to file
fAve = np.mean(fVals)
fStd = np.std(fVals)
f.write('\n')
f.write('f = '+'%.4f'%fAve+'+-'+'%.4f'%fStd+
'('+'%.0f'%(fStd/fAve*100)+'% error)'+'\n')
f.write('lambda ='+'%.2f'%(2*np.pi/fAve)+'+-'+
'%.2f'%(2*np.pi/fAve*fStd/fAve)+'\n')
f.write('\n')
#SECOND ROUND OF FITTING WITH PERIOD FIXED AT AVERAGE OF PREVIOUS
#write header
f.write('Fits with f fixed\n')
f.write('Name\ty0\tA\tf\tphi\tphi(degrees)\n')
#array to store the y0s to normalize
y0s = {}
#x values from which to plot fit function
xvalues = np.linspace(0,99.7,1000)
fits = pd.DataFrame()
#go through each column
for column in counts.columns:
#calculate guesses
y0guess = np.mean(counts[column])
Aguess = (np.amax(counts[column])-
|
np.amin(counts[column])
|
numpy.amin
|
""" Unit tests for the problem interface."""
import sys
import unittest
import itertools
from io import StringIO
import numpy as np
import openmdao.api as om
from openmdao.core.driver import Driver
from openmdao.test_suite.components.paraboloid import Paraboloid
from openmdao.test_suite.components.sellar import SellarDerivatives, SellarDerivativesConnected
from openmdao.utils.assert_utils import assert_near_equal, assert_warning
import openmdao.utils.hooks as hooks
from openmdao.utils.units import convert_units
from openmdao.utils.om_warnings import DerivativesWarning
try:
from parameterized import parameterized
except ImportError:
from openmdao.utils.assert_utils import SkipParameterized as parameterized
class SellarOneComp(om.ImplicitComponent):
def initialize(self):
self.options.declare('solve_y1', types=bool, default=True)
self.options.declare('solve_y2', types=bool, default=True)
def setup(self):
# Global Design Variable
self.add_input('z', val=np.array([-1., -1.]))
# Local Design Variable
self.add_input('x', val=2.)
self.add_output('y1', val=1.0)
self.add_output('y2', val=1.0)
self.add_output('R_y1')
self.add_output('R_y2')
if self.options['solve_y1']:
self.declare_partials('y1', ['x', 'z', 'y1', 'y2'])
else:
self.declare_partials('y1', 'y1')
if self.options['solve_y2']:
self.declare_partials('y2', ['z', 'y1', 'y2'])
else:
self.declare_partials('y2', 'y2')
self.declare_partials('R_y1', ['R_y1', 'x', 'z', 'y1', 'y2'])
self.declare_partials('R_y2', ['R_y2','z', 'y1', 'y2'])
def apply_nonlinear(self, inputs, outputs, residuals):
z0 = inputs['z'][0]
z1 = inputs['z'][1]
x = inputs['x']
y1 = outputs['y1']
y2 = outputs['y2']
if self.options['solve_y1']:
residuals['y1'] = (z0**2 + z1 + x - 0.2*y2) - y1
else:
residuals['y1'] = 0
if self.options['solve_y2']:
residuals['y2'] = (y1**.5 + z0 + z1) - y2
else:
residuals['y2'] = 0
residuals['R_y1'] = (z0**2 + z1 + x - 0.2*y2) - y1 - outputs['R_y1']
residuals['R_y2'] = (y1**.5 + z0 + z1) - y2 - outputs['R_y2']
def linearize(self, inputs, outputs, J):
# this will look wrong in check_partials if solve_y2 = False, but its not: R['y1'] = y1^* - y1
J['y1', 'y1'] = -1.
J['R_y1','R_y1'] = -1
if self.options['solve_y1']:
J['y1', 'x'] = [1]
J['y1', 'z'] = [2*inputs['z'][0], 1]
J['y1', 'y2'] = -0.2
J['R_y1', 'x'] = [1]
J['R_y1', 'z'] = [2*inputs['z'][0], 1]
J['R_y1', 'y1'] = -1.
J['R_y1', 'y2'] = -0.2
# this will look wrong in check_partials if solve_y2 = False, but its not" R['y1'] = y2^* - y2
J['y2','y2'] = -1
J['R_y2','R_y2'] = -1
if self.options['solve_y2']:
J['y2','z'] = [1, 1]
J['y2','y1'] = 0.5*outputs['y1']**-0.5
J['R_y2','y2'] = -1
J['R_y2','z'] = [1, 1]
J['R_y2','y1'] = 0.5*outputs['y1']**-0.5
def solve_nonlinear(self, inputs, outputs):
z0 = inputs['z'][0]
z1 = inputs['z'][1]
x = inputs['x']
y1 = outputs['y1']
y2 = outputs['y2']
outputs['R_y1'] = (z0**2 + z1 + x - 0.2*y2) - y1
outputs['R_y2'] = (y1**.5 + z0 + z1) - y2
class TestProblem(unittest.TestCase):
def test_simple_component_model_with_units(self):
class TestComp(om.ExplicitComponent):
def setup(self):
self.add_input('foo', units='N')
self.add_output('bar', units='N')
self.declare_partials('bar', 'foo')
def compute(self, inputs, outputs):
outputs['bar'] = inputs['foo']
def compute_partials(self, inputs, J):
J['bar', 'foo'] = 1.
p = om.Problem(model=TestComp())
p.setup()
p.set_val('foo', 5, units='lbf')
p.run_model()
lbf_val = convert_units(5, 'lbf', 'N')
self.assertEqual(p.get_val('foo'), lbf_val)
self.assertEqual(p.get_val('bar'), lbf_val)
def test_feature_simple_run_once_no_promote(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid())
model.set_input_defaults('comp.x', 3.0)
model.set_input_defaults('comp.y', -4.0)
prob.setup()
prob.run_model()
assert_near_equal(prob['comp.f_xy'], -15.0)
def test_feature_simple_run_once_input_input(self):
prob = om.Problem()
model = prob.model
# promote the two inputs to the same name
model.add_subsystem('comp1', Paraboloid(), promotes_inputs=['x'])
model.add_subsystem('comp2', Paraboloid(), promotes_inputs=['x'])
model.set_input_defaults('x', 3.0)
prob.setup()
prob.run_model()
assert_near_equal(prob['comp1.f_xy'], 13.0)
assert_near_equal(prob['comp2.f_xy'], 13.0)
def test_feature_simple_run_once_compute_totals(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid())
model.set_input_defaults('comp.x', 3.0)
model.set_input_defaults('comp.y', -4.0)
prob.setup()
prob.run_model()
totals = prob.compute_totals(of=['comp.f_xy'], wrt=['comp.x', 'comp.y'])
assert_near_equal(totals[('comp.f_xy', 'comp.x')][0][0], -4.0)
assert_near_equal(totals[('comp.f_xy', 'comp.y')][0][0], 3.0)
totals = prob.compute_totals(of=['comp.f_xy'], wrt=['comp.x', 'comp.y'], return_format='dict')
assert_near_equal(totals['comp.f_xy']['comp.x'][0][0], -4.0)
assert_near_equal(totals['comp.f_xy']['comp.y'][0][0], 3.0)
def test_feature_simple_run_once_compute_totals_scaled(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid())
model.set_input_defaults('comp.x', 3.0)
model.set_input_defaults('comp.y', -4.0)
model.add_design_var('comp.x', 3.0, ref0=50.0)
model.add_design_var('comp.y', -4.0)
model.add_objective('comp.f_xy')
prob.setup()
prob.run_model()
totals = prob.compute_totals(of=['comp.f_xy'], wrt=['comp.x', 'comp.y'], driver_scaling=True)
assert_near_equal(totals[('comp.f_xy', 'comp.x')][0][0], 196.0)
assert_near_equal(totals[('comp.f_xy', 'comp.y')][0][0], 3.0)
def test_feature_simple_run_once_set_deriv_mode(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid())
model.set_input_defaults('comp.x', 3.0)
model.set_input_defaults('comp.y', -4.0)
prob.setup(mode='rev')
# prob.setup(mode='fwd')
prob.run_model()
assert_near_equal(prob['comp.f_xy'], -15.0)
prob.compute_totals(of=['comp.f_xy'], wrt=['comp.x', 'comp.y'])
def test_single_string_wrt_of(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 3.0))
model.add_subsystem('p2', om.IndepVarComp('y', -4.0))
model.add_subsystem('comp', Paraboloid())
model.connect('p1.x', 'comp.x')
model.connect('p2.y', 'comp.y')
prob.setup()
prob.run_model()
totals = prob.compute_totals(of='comp.f_xy', wrt='p1.x')
assert_near_equal(totals[('comp.f_xy', 'p1.x')][0][0], -4.0)
def test_two_var_single_string_error(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 3.0))
model.add_subsystem('p2', om.IndepVarComp('y', -4.0))
model.add_subsystem('comp', Paraboloid())
model.connect('p1.x', 'comp.x')
model.connect('p2.y', 'comp.y')
prob.setup()
prob.run_model()
with self.assertRaises(KeyError) as cm:
totals = prob.compute_totals(of='comp.f_xy', wrt="p1.x, p2.y")
self.assertEqual(str(cm.exception), "'p1.x, p2.y'")
def test_compute_totals_cleanup(self):
p = om.Problem()
model = p.model
model.add_subsystem('indeps1', om.IndepVarComp('x', np.ones(5)))
model.add_subsystem('indeps2', om.IndepVarComp('x', np.ones(3)))
model.add_subsystem('MP1', om.ExecComp('y=7*x', x=np.zeros(5), y=np.zeros(5)))
model.add_subsystem('MP2', om.ExecComp('y=-3*x', x=np.zeros(3), y=np.zeros(3)))
model.add_design_var('indeps1.x')
model.add_design_var('indeps2.x')
model.add_constraint('MP1.y')
model.add_constraint('MP2.y')
model.connect('indeps1.x', 'MP1.x')
model.connect('indeps2.x', 'MP2.x')
p.setup(mode='rev')
p.run_model()
J = p.compute_totals()
assert_near_equal(J[('MP1.y', 'indeps1.x')], np.eye(5)*7., 1e-10)
assert_near_equal(J[('MP2.y', 'indeps2.x')], np.eye(3)*-3., 1e-10)
# before the bug fix, the following two derivs contained nonzero values even
# though the variables involved were not dependent on each other.
assert_near_equal(J[('MP2.y', 'indeps1.x')], np.zeros((3, 5)), 1e-10)
assert_near_equal(J[('MP1.y', 'indeps2.x')], np.zeros((5, 3)), 1e-10)
def test_set_2d_array(self):
prob = om.Problem()
model = prob.model
model.add_subsystem(name='indeps',
subsys=om.IndepVarComp(name='X_c', shape=(3, 1)))
prob.setup()
new_val = -5*np.ones((3, 1))
prob['indeps.X_c'] = new_val
prob.final_setup()
assert_near_equal(prob['indeps.X_c'], new_val, 1e-10)
new_val = 2.5*np.ones(3)
prob['indeps.X_c'][:, 0] = new_val
prob.final_setup()
assert_near_equal(prob['indeps.X_c'], new_val.reshape((3, 1)), 1e-10)
assert_near_equal(prob['indeps.X_c'][:, 0], new_val, 1e-10)
def test_set_checks_shape(self):
model = om.Group()
indep = model.add_subsystem('indep', om.IndepVarComp())
indep.add_output('num')
indep.add_output('arr', shape=(10, 1))
prob = om.Problem(model)
prob.setup()
msg = "Incompatible shape for '.*': Expected (.*) but got (.*)"
# check valid scalar value
new_val = -10.
prob['indep.num'] = new_val
assert_near_equal(prob['indep.num'], new_val, 1e-10)
# check bad scalar value
bad_val = -10*np.ones((10))
prob['indep.num'] = bad_val
with self.assertRaisesRegex(ValueError,
"<model> <class Group>: Failed to set value of '.*': could not broadcast input array from shape (.*) into shape (.*)."):
prob.final_setup()
prob._initial_condition_cache = {}
# check assign scalar to array
arr_val = new_val*np.ones((10, 1))
prob['indep.arr'] = new_val
prob.final_setup()
assert_near_equal(prob['indep.arr'], arr_val, 1e-10)
# check valid array value
new_val = -10*np.ones((10, 1))
prob['indep.arr'] = new_val
assert_near_equal(prob['indep.arr'], new_val, 1e-10)
msg = "<model> <class Group>: Failed to set value of '.*': could not broadcast input array from shape (.*) into shape (.*)."
# check bad array value
bad_val = -10*np.ones((9,1))
with self.assertRaisesRegex(ValueError, msg):
prob['indep.arr'] = bad_val
# check valid list value
new_val = new_val.tolist()
prob['indep.arr'] = new_val
assert_near_equal(prob['indep.arr'], new_val, 1e-10)
# check bad list value
bad_val = bad_val.tolist()
with self.assertRaisesRegex(ValueError, msg):
prob['indep.arr'] = bad_val
def test_compute_totals_basic(self):
# Basic test for the method using default solvers on simple model.
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
prob.setup(check=False, mode='fwd')
prob.set_solver_print(level=0)
prob.run_model()
of = ['f_xy']
wrt = ['x', 'y']
derivs = prob.compute_totals(of=of, wrt=wrt)
assert_near_equal(derivs['f_xy', 'x'], [[-6.0]], 1e-6)
assert_near_equal(derivs['f_xy', 'y'], [[8.0]], 1e-6)
prob.setup(check=False, mode='rev')
prob.run_model()
of = ['f_xy']
wrt = ['x', 'y']
derivs = prob.compute_totals(of=of, wrt=wrt)
assert_near_equal(derivs['f_xy', 'x'], [[-6.0]], 1e-6)
assert_near_equal(derivs['f_xy', 'y'], [[8.0]], 1e-6)
def test_compute_totals_basic_return_dict(self):
# Make sure 'dict' return_format works.
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
prob.setup(check=False, mode='fwd')
prob.set_solver_print(level=0)
prob.run_model()
of = ['f_xy']
wrt = ['x', 'y']
derivs = prob.compute_totals(of=of, wrt=wrt, return_format='dict')
assert_near_equal(derivs['f_xy']['x'], [[-6.0]], 1e-6)
assert_near_equal(derivs['f_xy']['y'], [[8.0]], 1e-6)
prob.setup(check=False, mode='rev')
prob.run_model()
of = ['f_xy']
wrt = ['x', 'y']
derivs = prob.compute_totals(of=of, wrt=wrt, return_format='dict')
assert_near_equal(derivs['f_xy']['x'], [[-6.0]], 1e-6)
assert_near_equal(derivs['f_xy']['y'], [[8.0]], 1e-6)
def test_compute_totals_no_args_no_desvar(self):
p = om.Problem()
dv = p.model.add_subsystem('des_vars', om.IndepVarComp())
dv.add_output('x', val=2.)
p.model.add_subsystem('calc', om.ExecComp('y=2*x'))
p.model.connect('des_vars.x', 'calc.x')
p.model.add_objective('calc.y')
p.setup()
p.run_model()
with self.assertRaises(RuntimeError) as cm:
p.compute_totals()
self.assertEqual(str(cm.exception),
"Driver is not providing any design variables for compute_totals.")
def test_compute_totals_no_args_no_response(self):
p = om.Problem()
dv = p.model.add_subsystem('des_vars', om.IndepVarComp())
dv.add_output('x', val=2.)
p.model.add_subsystem('calc', om.ExecComp('y=2*x'))
p.model.connect('des_vars.x', 'calc.x')
p.model.add_design_var('des_vars.x')
p.setup()
p.run_model()
with self.assertRaises(RuntimeError) as cm:
p.compute_totals()
self.assertEqual(str(cm.exception),
"Driver is not providing any response variables for compute_totals.")
def test_compute_totals_no_args(self):
p = om.Problem()
dv = p.model.add_subsystem('des_vars', om.IndepVarComp())
dv.add_output('x', val=2.)
p.model.add_subsystem('calc', om.ExecComp('y=2*x'))
p.model.connect('des_vars.x', 'calc.x')
p.model.add_design_var('des_vars.x')
p.model.add_objective('calc.y')
p.setup()
p.run_model()
derivs = p.compute_totals()
assert_near_equal(derivs['calc.y', 'des_vars.x'], [[2.0]], 1e-6)
def test_compute_totals_no_args_promoted(self):
p = om.Problem()
dv = p.model.add_subsystem('des_vars', om.IndepVarComp(), promotes=['*'])
dv.add_output('x', val=2.)
p.model.add_subsystem('calc', om.ExecComp('y=2*x'), promotes=['*'])
p.model.add_design_var('x')
p.model.add_objective('y')
p.setup()
p.run_model()
derivs = p.compute_totals()
assert_near_equal(derivs['calc.y', 'des_vars.x'], [[2.0]], 1e-6)
@parameterized.expand(itertools.product(['fwd', 'rev']))
def test_compute_jacvec_product(self, mode):
prob = om.Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = om.NonlinearBlockGS()
prob.setup(mode=mode)
prob.run_model()
of = ['obj', 'con1']
wrt = ['_auto_ivc.v1', '_auto_ivc.v0']
if mode == 'fwd':
seed_names = wrt
result_names = of
rvec = prob.model._vectors['output']['linear']
lvec = prob.model._vectors['residual']['linear']
else:
seed_names = of
result_names = wrt
rvec = prob.model._vectors['residual']['linear']
lvec = prob.model._vectors['output']['linear']
J = prob.compute_totals(of, wrt, return_format='array')
seed = []
for name in seed_names:
seed.append(np.random.random(rvec[name].size))
resdict = prob.compute_jacvec_product(of, wrt, mode, seed)
result = []
for name in result_names:
result.append(resdict[name].flat)
result = np.hstack(result)
testvec = np.hstack(seed)
if mode == 'fwd':
checkvec = J.dot(testvec)
else:
checkvec = J.T.dot(testvec)
np.testing.assert_allclose(checkvec, result)
def test_feature_set_indeps(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
prob.setup()
prob['x'] = 2.
prob['y'] = 10.
prob.run_model()
assert_near_equal(prob['f_xy'], 214.0, 1e-6)
def test_feature_set_indeps_auto(self):
prob = om.Problem()
prob.model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
prob.setup()
prob.set_val('x', 2.)
prob.set_val('y', 10.)
prob.run_model()
assert_near_equal(prob.get_val('f_xy'), 214.0, 1e-6)
def test_feature_basic_setup(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
prob.setup()
prob.set_val('x', 2.)
prob.set_val('y', 10.)
prob.run_model()
assert_near_equal(prob.get_val('f_xy'), 214.0, 1e-6)
prob.set_val('x', 0.)
prob.set_val('y', 0.)
prob.run_model()
assert_near_equal(prob.get_val('f_xy'), 22.0, 1e-6)
prob.setup()
prob.set_val('x', 4.)
prob.set_val('y', 8.)
prob.run_model()
assert_near_equal(prob.get_val('f_xy'), 174.0, 1e-6)
def test_feature_petsc_setup(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
# PETScVectors will be used automatically where needed. No need to set manually.
prob.setup()
prob['x'] = 2.
prob['y'] = 10.
prob.run_model()
assert_near_equal(prob['f_xy'], 214.0, 1e-6)
def test_feature_check_totals_manual(self):
prob = om.Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = om.NonlinearBlockGS()
prob.setup()
prob.run_model()
# manually specify which derivatives to check
prob.check_totals(of=['obj', 'con1'], wrt=['x', 'z'])
def test_feature_check_totals_from_driver_compact(self):
prob = om.Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = om.NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_objective('obj')
prob.model.add_constraint('con1', upper=0.0)
prob.model.add_constraint('con2', upper=0.0)
prob.setup()
# We don't call run_driver() here because we don't
# actually want the optimizer to run
prob.run_model()
# check derivatives of all obj+constraints w.r.t all design variables
prob.check_totals(compact_print=True)
def test_feature_check_totals_from_driver(self):
prob = om.Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = om.NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_objective('obj')
prob.model.add_constraint('con1', upper=0.0)
prob.model.add_constraint('con2', upper=0.0)
prob.setup()
# We don't call run_driver() here because we don't
# actually want the optimizer to run
prob.run_model()
# check derivatives of all obj+constraints w.r.t all design variables
prob.check_totals()
def test_feature_check_totals_from_driver_scaled(self):
prob = om.Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = om.NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100, ref=100.0, ref0=-100.0)
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_objective('obj')
prob.model.add_constraint('con1', upper=0.0, ref=3.0)
prob.model.add_constraint('con2', upper=0.0, ref=20.0)
prob.setup()
# We don't call run_driver() here because we don't
# actually want the optimizer to run
prob.run_model()
# check derivatives of all driver vars using the declared scaling
prob.check_totals(driver_scaling=True)
def test_feature_check_totals_suppress(self):
prob = om.Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = om.NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_objective('obj')
prob.model.add_constraint('con1', upper=0.0)
prob.model.add_constraint('con2', upper=0.0)
prob.setup()
# We don't call run_driver() here because we don't
# actually want the optimizer to run
prob.run_model()
# check derivatives of all obj+constraints w.r.t all design variables
totals = prob.check_totals(out_stream=None)
print(totals)
def test_feature_check_totals_cs(self):
prob = om.Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = om.NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_objective('obj')
prob.model.add_constraint('con1', upper=0.0)
prob.model.add_constraint('con2', upper=0.0)
prob.setup(force_alloc_complex=True)
# We don't call run_driver() here because we don't
# actually want the optimizer to run
prob.run_model()
prob.model.nonlinear_solver.options['atol'] = 1e-15
prob.model.nonlinear_solver.options['rtol'] = 1e-15
# check derivatives with complex step
prob.check_totals(method='cs')
def test_check_totals_user_detect(self):
class SimpleComp(om.ExplicitComponent):
def setup(self):
self.add_input('x', val=1.0)
self.add_output('y', val=1.0)
self.declare_partials(of='y', wrt='x')
if not self._force_alloc_complex:
raise RuntimeError('force_alloc_complex not set in component.')
def compute(self, inputs, outputs):
outputs['y'] = 3.0*inputs['x']
if inputs.iscomplex() and not self.under_complex_step:
raise RuntimeError('under_complex_step not set in component.')
def compute_partials(self, inputs, partials):
partials['y', 'x'] = 3.
prob = om.Problem()
prob.model.add_subsystem('px', om.IndepVarComp('x', 2.0))
prob.model.add_subsystem('comp', SimpleComp())
prob.model.connect('px.x', 'comp.x')
prob.model.add_design_var('px.x', lower=-100, upper=100)
prob.model.add_objective('comp.y')
prob.setup(force_alloc_complex=True)
prob.run_model()
# check derivatives with complex step and a larger step size.
prob.check_totals(method='cs', out_stream=None)
self.assertFalse(prob.model.under_complex_step,
msg="The under_complex_step flag should be reset.")
def test_feature_check_totals_user_detect_forced(self):
class SimpleComp(om.ExplicitComponent):
def setup(self):
self.add_input('x', val=1.0)
self.add_output('y', val=1.0)
self.declare_partials(of='y', wrt='x')
if self._force_alloc_complex:
print("Vectors allocated for complex step.")
def compute(self, inputs, outputs):
outputs['y'] = 3.0*inputs['x']
def compute_partials(self, inputs, partials):
partials['y', 'x'] = 3.
prob = om.Problem()
prob.model.add_subsystem('comp', SimpleComp())
prob.model.add_design_var('comp.x', lower=-100, upper=100)
prob.model.add_objective('comp.y')
prob.setup(force_alloc_complex=True)
prob.run_model()
prob.check_totals(method='cs')
def test_set_cs_error_messages(self):
prob = om.Problem()
prob.model.add_subsystem('comp', Paraboloid())
prob.setup()
prob.run_model()
with self.assertRaises(RuntimeError) as cm:
prob.set_complex_step_mode(True)
msg = "Problem: To enable complex step, specify 'force_alloc_complex=True' when calling " + \
"setup on the problem, e.g. 'problem.setup(force_alloc_complex=True)'"
self.assertEqual(cm.exception.args[0], msg)
prob = om.Problem()
prob.model.add_subsystem('comp', Paraboloid())
with self.assertRaises(RuntimeError) as cm:
prob.set_complex_step_mode(True)
msg = "Problem: set_complex_step_mode cannot be called before `Problem.run_model()`, " + \
"`Problem.run_driver()`, or `Problem.final_setup()`."
self.assertEqual(cm.exception.args[0], msg)
def test_feature_run_driver(self):
prob = om.Problem(model=SellarDerivatives())
model = prob.model
model.nonlinear_solver = om.NonlinearBlockGS()
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.options['tol'] = 1e-9
model.add_design_var('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0]))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', upper=0.0)
model.add_constraint('con2', upper=0.0)
prob.setup()
prob.run_driver()
assert_near_equal(prob.get_val('x'), 0.0, 1e-5)
assert_near_equal(prob.get_val('y1'), 3.160000, 1e-2)
assert_near_equal(prob.get_val('y2'), 3.755278, 1e-2)
assert_near_equal(prob.get_val('z'), [1.977639, 0.000000], 1e-2)
assert_near_equal(prob.get_val('obj'), 3.18339395, 1e-2)
def test_feature_promoted_sellar_set_get_outputs(self):
prob = om.Problem(model=SellarDerivatives())
prob.model.nonlinear_solver = om.NonlinearBlockGS()
prob.setup()
prob.set_val('x', 2.75)
prob.run_model()
assert_near_equal(prob.get_val('y1'), 27.3049178437, 1e-6)
def test_feature_not_promoted_sellar_set_get_outputs(self):
prob = om.Problem(model= SellarDerivativesConnected())
prob.model.nonlinear_solver = om.NonlinearBlockGS()
prob.setup()
prob.set_val('x', 2.75)
prob.run_model()
assert_near_equal(prob.get_val('x'), 2.75, 1e-6)
assert_near_equal(prob.get_val('d1.y1'), 27.3049178437, 1e-6)
def test_feature_promoted_sellar_set_get_inputs(self):
prob = om.Problem(model=SellarDerivatives())
prob.model.nonlinear_solver = om.NonlinearBlockGS()
prob.setup()
prob['x'] = 2.75
prob.run_model()
assert_near_equal(prob['x'], 2.75, 1e-6)
# the output variable, referenced by the promoted name
assert_near_equal(prob['y1'], 27.3049178437, 1e-6)
# the connected input variable, referenced by the absolute path
assert_near_equal(prob['d2.y1'], 27.3049178437, 1e-6)
def test_get_set_with_units_exhaustive(self):
prob = om.Problem()
prob.model.add_subsystem('comp', om.ExecComp('y=x-25.',
x={'val': 77.0, 'units': 'degF'},
y={'val': 0.0, 'units': 'degC'}))
prob.model.add_subsystem('prom', om.ExecComp('yy=xx-25.',
xx={'val': 77.0, 'units': 'degF'},
yy={'val': 0.0, 'units': 'degC'}),
promotes=['xx', 'yy'])
prob.model.add_subsystem('acomp', om.ExecComp('y=x-25.',
x={'val': np.array([77.0, 95.0]), 'units': 'degF'},
y={'val': 0.0, 'units': 'degC'}))
prob.model.add_subsystem('aprom', om.ExecComp('ayy=axx-25.',
axx={'val': np.array([77.0, 95.0]), 'units': 'degF'},
ayy={'val': 0.0, 'units': 'degC'}),
promotes=['axx', 'ayy'])
prob.setup()
# Make sure everything works before final setup with caching system.
# Gets
assert_near_equal(prob.get_val('comp.x'), 77.0, 1e-6)
assert_near_equal(prob.get_val('comp.x', 'degC'), 25.0, 1e-6)
assert_near_equal(prob.get_val('comp.y'), 0.0, 1e-6)
assert_near_equal(prob.get_val('comp.y', 'degF'), 32.0, 1e-6)
assert_near_equal(prob.get_val('xx'), 77.0, 1e-6)
assert_near_equal(prob.get_val('xx', 'degC'), 25.0, 1e-6)
assert_near_equal(prob.get_val('yy'), 0.0, 1e-6)
assert_near_equal(prob.get_val('yy', 'degF'), 32.0, 1e-6)
assert_near_equal(prob.get_val('acomp.x', indices=0), 77.0, 1e-6)
assert_near_equal(prob.get_val('acomp.x', indices=[1]), 95.0, 1e-6)
assert_near_equal(prob.get_val('acomp.x', 'degC', indices=[0]), 25.0, 1e-6)
assert_near_equal(prob.get_val('acomp.x', 'degC', indices=1), 35.0, 1e-6)
assert_near_equal(prob.get_val('acomp.y', indices=0), 0.0, 1e-6)
assert_near_equal(prob.get_val('acomp.y', 'degF', indices=0), 32.0, 1e-6)
assert_near_equal(prob.get_val('axx', indices=0), 77.0, 1e-6)
assert_near_equal(prob.get_val('axx', indices=1), 95.0, 1e-6)
assert_near_equal(prob.get_val('axx', 'degC', indices=0), 25.0, 1e-6)
assert_near_equal(prob.get_val('axx', 'degC', indices=np.array([1])), 35.0, 1e-6)
assert_near_equal(prob.get_val('ayy', indices=0), 0.0, 1e-6)
assert_near_equal(prob.get_val('ayy', 'degF', indices=0), 32.0, 1e-6)
# Sets
prob.set_val('comp.x', 30.0, 'degC')
assert_near_equal(prob['comp.x'], 86.0, 1e-6)
assert_near_equal(prob.get_val('comp.x', 'degC'), 30.0, 1e-6)
prob.set_val('xx', 30.0, 'degC')
assert_near_equal(prob['xx'], 86.0, 1e-6)
assert_near_equal(prob.get_val('xx', 'degC'), 30.0, 1e-6)
prob.set_val('acomp.x', 30.0, 'degC', indices=[0])
assert_near_equal(prob['acomp.x'][0], 86.0, 1e-6)
assert_near_equal(prob.get_val('acomp.x', 'degC', indices=0), 30.0, 1e-6)
prob.set_val('axx', 30.0, 'degC', indices=0)
assert_near_equal(prob['axx'][0], 86.0, 1e-6)
assert_near_equal(prob.get_val('axx', 'degC', indices=np.array([0])), 30.0, 1e-6)
prob.final_setup()
# Now we do it all over again for coverage.
# Gets
assert_near_equal(prob.get_val('comp.x'), 86.0, 1e-6)
assert_near_equal(prob.get_val('comp.x', 'degC'), 30.0, 1e-6)
assert_near_equal(prob.get_val('comp.y'), 0.0, 1e-6)
assert_near_equal(prob.get_val('comp.y', 'degF'), 32.0, 1e-6)
assert_near_equal(prob.get_val('xx'), 86.0, 1e-6)
assert_near_equal(prob.get_val('xx', 'degC'), 30.0, 1e-6)
assert_near_equal(prob.get_val('yy'), 0.0, 1e-6)
assert_near_equal(prob.get_val('yy', 'degF'), 32.0, 1e-6)
assert_near_equal(prob.get_val('acomp.x', indices=0), 86.0, 1e-6)
assert_near_equal(prob.get_val('acomp.x', indices=[1]), 95.0, 1e-6)
assert_near_equal(prob.get_val('acomp.x', 'degC', indices=[0]), 30.0, 1e-6)
assert_near_equal(prob.get_val('acomp.x', 'degC', indices=1), 35.0, 1e-6)
assert_near_equal(prob.get_val('acomp.y', indices=0), 0.0, 1e-6)
assert_near_equal(prob.get_val('acomp.y', 'degF', indices=0), 32.0, 1e-6)
assert_near_equal(prob.get_val('axx', indices=0), 86.0, 1e-6)
assert_near_equal(prob.get_val('axx', indices=1), 95.0, 1e-6)
assert_near_equal(prob.get_val('axx', 'degC', indices=0), 30.0, 1e-6)
assert_near_equal(prob.get_val('axx', 'degC', indices=np.array([1])), 35.0, 1e-6)
assert_near_equal(prob.get_val('ayy', indices=0), 0.0, 1e-6)
assert_near_equal(prob.get_val('ayy', 'degF', indices=0), 32.0, 1e-6)
# Sets
prob.set_val('comp.x', 35.0, 'degC')
assert_near_equal(prob['comp.x'], 95.0, 1e-6)
assert_near_equal(prob.get_val('comp.x', 'degC'), 35.0, 1e-6)
prob.set_val('xx', 35.0, 'degC')
assert_near_equal(prob['xx'], 95.0, 1e-6)
assert_near_equal(prob.get_val('xx', 'degC'), 35.0, 1e-6)
prob.set_val('acomp.x', 35.0, 'degC', indices=[0])
assert_near_equal(prob['acomp.x'][0], 95.0, 1e-6)
assert_near_equal(prob.get_val('acomp.x', 'degC', indices=0), 35.0, 1e-6)
prob.set_val('axx', 35.0, 'degC', indices=0)
assert_near_equal(prob['axx'][0], 95.0, 1e-6)
assert_near_equal(prob.get_val('axx', 'degC', indices=np.array([0])), 35.0, 1e-6)
def test_feature_get_set_with_units_diff_err(self):
prob = om.Problem()
prob.model.add_subsystem('C1', om.ExecComp('y=x*2.',
x={'val': 1.0, 'units': 'ft'},
y={'val': 0.0, 'units': 'ft'}),
promotes=['x'])
prob.model.add_subsystem('C2', om.ExecComp('y=x*3.',
x={'val': 1.0, 'units': 'inch'},
y={'val': 0.0, 'units': 'inch'}),
promotes=['x'])
try:
prob.setup()
except RuntimeError as err:
self.assertEqual(str(err), "<model> <class Group>: The following inputs, ['C1.x', 'C2.x'], promoted to 'x', are connected but their metadata entries ['units', 'val'] differ. Call <group>.set_input_defaults('x', units=?, val=?), where <group> is the model to remove the ambiguity.")
else:
self.fail("Exception expected.")
def test_feature_get_set_with_units_diff(self):
prob = om.Problem()
G1 = prob.model.add_subsystem('G1', om.Group())
G1.add_subsystem('C1', om.ExecComp('y=x*2.',
x={'val': 1.0, 'units': 'cm'},
y={'val': 0.0, 'units': 'cm'}),
promotes=['x'])
G1.add_subsystem('C2', om.ExecComp('y=x*3.',
x={'val': 1.0, 'units': 'mm'},
y={'val': 0.0, 'units': 'mm'}),
promotes=['x'])
# units and value to use for the _auto_ivc output are ambiguous. This fixes that.
G1.set_input_defaults('x', units='m', val=1.0)
prob.setup()
# set G1.x to 2.0 m, based on the units we gave in the set_input_defaults call
prob.set_val('G1.x', 2.)
prob.run_model()
# we gave 'G1.x' units of 'm' in the set_input_defaults call
assert_near_equal(prob.get_val('G1.x'), 2.0, 1e-6)
# using absolute value will give us the value of the input C1.x, in its units of 'cm'
assert_near_equal(prob.get_val('G1.C1.x'), 200.0, 1e-6)
# using absolute value will give us the value of the input C2.x, in its units of 'mm'
assert_near_equal(prob.get_val('G1.C2.x'), 2000.0, 1e-6)
def test_feature_get_set_with_src_indices_diff(self):
prob = om.Problem()
G1 = prob.model.add_subsystem('G1', om.Group())
G1.add_subsystem('C1', om.ExecComp('y=x*2.',
x={'val': 1.0, 'units': 'cm', 'src_indices': [0]},
y={'val': 0.0, 'units': 'cm'}),
promotes=['x'])
G1.add_subsystem('C2', om.ExecComp('y=x*3.',
x={'val': 1.0, 'units': 'mm', 'src_indices': [1,2]},
y={'val': np.zeros(2), 'units': 'mm'}),
promotes=['x'])
G1.add_subsystem('C3', om.ExecComp('y=x*4.',
x={'val': np.ones(3), 'units': 'mm'},
y={'val': np.zeros(3), 'units': 'mm'}),
promotes=['x'])
# units and value to use for the _auto_ivc output are ambiguous. This fixes that.
G1.set_input_defaults('x', units='m', val=np.ones(3))
prob.setup()
# set G1.x to 2.0 m, based on the units we gave in the set_input_defaults call
prob['G1.x'] = np.ones(3) * 2.0
prob.run_model()
# we gave 'G1.x' units of 'm' in the set_input_defaults call
assert_near_equal(prob['G1.x'], np.ones(3) * 2.0, 1e-6)
# using absolute value will give us the value of the input C1.x, in its units of 'cm'
assert_near_equal(prob['G1.C1.x'], 200.0, 1e-6)
assert_near_equal(prob['G1.C1.y'], 400.0, 1e-6)
# using absolute value will give us the value of the input C2.x, in its units of 'mm'
assert_near_equal(prob['G1.C2.x'], np.ones(2) * 2000.0, 1e-6)
assert_near_equal(prob['G1.C2.y'], np.ones(2) * 6000.0, 1e-6)
def test_feature_get_set_with_units_prom_plus_explicit(self):
prob = om.Problem()
prob.model.add_subsystem('indeps', om.IndepVarComp('x', val=1.0, units='m'))
G1 = prob.model.add_subsystem('G1', om.Group())
G1.add_subsystem('C1', om.ExecComp('y=x*2.',
x={'val': 1.0, 'units': 'cm'},
y={'val': 0.0, 'units': 'cm'}),
promotes=['x'])
G1.add_subsystem('C2', om.ExecComp('y=x*3.',
x={'val': 1.0, 'units': 'mm'},
y={'val': 0.0, 'units': 'mm'}),
promotes=['x'])
# connect IVC to promoted inputs
prob.model.connect('indeps.x', 'G1.x')
# units and value to use for the _auto_ivc output are ambiguous. This fixes that.
G1.set_input_defaults('x', units='dm', val=1.0)
prob.setup()
prob['indeps.x'] = 2.0
prob.run_model()
assert_near_equal(prob['indeps.x'], 2.0, 1e-6)
# using the promoted name of the inputs will give the value
# in the units set in set_input_defaults, which is 'dm'
assert_near_equal(prob['G1.x'], 20.0, 1e-6)
# get value from lower level group
assert_near_equal(G1.get_val('x'), 20.0, 1e-6)
# using absolute value will give us the value of the input C1.x, in its units of 'inch'
assert_near_equal(prob['G1.C1.x'], 200.0, 1e-6)
# using absolute value will give us the value of the input C2.x, in its units of 'ft'
assert_near_equal(prob['G1.C2.x'], 2000.0, 1e-6)
def test_feature_get_set_with_units_prom_plus_explicit_err(self):
prob = om.Problem()
prob.model.add_subsystem('indeps', om.IndepVarComp('x', val=1.0, units='m'))
G1 = prob.model.add_subsystem('G1', om.Group())
G1.add_subsystem('C1', om.ExecComp('y=x*2.',
x={'val': 1.0, 'units': 'cm'},
y={'val': 0.0, 'units': 'cm'}),
promotes=['x'])
G1.add_subsystem('C2', om.ExecComp('y=x*3.',
x={'val': 1.0, 'units': 'mm'},
y={'val': 0.0, 'units': 'mm'}),
promotes=['x'])
# connect IVC to promoted inputs
prob.model.connect('indeps.x', 'G1.x')
prob.setup()
prob['indeps.x'] = 2.0
prob.run_model()
assert_near_equal(prob['indeps.x'], 2.0, 1e-6)
# using absolute value will give us the value of the input C1.x, in its units of 'inch'
assert_near_equal(prob['G1.C1.x'], 200.0, 1e-6)
# using absolute value will give us the value of the input C2.x, in its units of 'ft'
assert_near_equal(prob['G1.C2.x'], 2000.0, 1e-6)
# using the promoted name of the inputs will raise an exception because the two promoted
# inputs have different units and set_input_defaults was not called to disambiguate.
with self.assertRaises(RuntimeError) as cm:
x = prob['G1.x']
msg = "<model> <class Group>: The following inputs, ['G1.C1.x', 'G1.C2.x'], promoted to 'G1.x', are connected but their metadata entries ['units'] differ. Call <group>.set_input_defaults('x', units=?), where <group> is the Group named 'G1' to remove the ambiguity."
self.assertEqual(cm.exception.args[0], msg)
def test_get_set_with_units_error_messages(self):
prob = om.Problem()
prob.model.add_subsystem('comp', om.ExecComp('y=x+1.',
x={'val': 100.0, 'units': 'cm'},
y={'units': 'm'}))
prob.model.add_subsystem('no_unit', om.ExecComp('y=x+1.', x={'val': 100.0}))
prob.setup()
prob.run_model()
msg = "Can't express variable 'comp.x' with units of 'cm' in units of 'degK'."
with self.assertRaisesRegex(TypeError, msg):
prob.get_val('comp.x', 'degK')
with self.assertRaisesRegex(TypeError, msg):
prob.set_val('comp.x', 55.0, 'degK')
msg = "Can't express variable 'no_unit.x' with units of 'None' in units of 'degK'."
with self.assertRaisesRegex(TypeError, msg):
prob.get_val('no_unit.x', 'degK')
with self.assertRaisesRegex(TypeError, msg):
prob.set_val('no_unit.x', 55.0, 'degK')
def test_feature_get_set_with_units(self):
prob = om.Problem()
prob.model.add_subsystem('comp', om.ExecComp('y=x+1.',
x={'val': 100.0, 'units': 'cm'},
y={'units': 'm'}))
prob.setup()
prob.run_model()
assert_near_equal(prob.get_val('comp.x'), 100, 1e-6)
assert_near_equal(prob.get_val('comp.x', 'm'), 1.0, 1e-6)
prob.set_val('comp.x', 10.0, 'mm')
assert_near_equal(prob.get_val('comp.x'), 1.0, 1e-6)
assert_near_equal(prob.get_val('comp.x', 'm'), 1.0e-2, 1e-6)
def test_feature_get_set_array_with_units(self):
prob = om.Problem()
prob.model.add_subsystem('comp', om.ExecComp('y=x+1.',
x={'val': np.array([100.0, 33.3]), 'units': 'cm'},
y={'shape': (2, ), 'units': 'm'}))
prob.setup()
prob.run_model()
assert_near_equal(prob.get_val('comp.x'), np.array([100, 33.3]), 1e-6)
assert_near_equal(prob.get_val('comp.x', 'm'), np.array([1.0, 0.333]), 1e-6)
assert_near_equal(prob.get_val('comp.x', 'km', indices=[0]), 0.001, 1e-6)
prob.set_val('comp.x', 10.0, 'mm')
assert_near_equal(prob.get_val('comp.x'),
|
np.array([1.0, 1.0])
|
numpy.array
|
import unittest
import numpy as np
import scipy.misc
import scipy.stats
import tensorflow as tf
import autogp
from autogp import kernels
from autogp import likelihoods
SIG_FIGS = 5
class TestGaussianProcess(unittest.TestCase):
@classmethod
def setUpClass(cls):
# We expect the child class to instantiate `cls.model` for us.
cls.session = tf.Session()
@classmethod
def tearDownClass(cls):
cls.session.close()
@classmethod
def entropy(cls, weights, means, covars):
entropy = cls.model._build_entropy(weights=np.array(weights, dtype=np.float32),
means=np.array(means, dtype=np.float32),
covars=np.array(covars, dtype=np.float32))
return cls.session.run(entropy)
@classmethod
def cross_ent(cls, weights, means, covars, kernel_chol):
cross_ent = cls.model._build_cross_ent(weights=np.array(weights, dtype=np.float32),
means=np.array(means, dtype=np.float32),
covars=np.array(covars, dtype=np.float32),
kernel_chol=np.array(kernel_chol, dtype=np.float32))
return cls.session.run(cross_ent)
@classmethod
def interim_vals(cls, kernel_chol, inducing_inputs, train_inputs):
kern_prods, kern_sums = cls.model._build_interim_vals(
kernel_chol=np.array(kernel_chol, dtype=np.float32),
inducing_inputs=np.array(inducing_inputs, dtype=np.float32),
train_inputs=np.array(train_inputs, dtype=np.float32))
return cls.session.run([kern_prods, kern_sums])
@classmethod
def sample_info(cls, kern_prods, kern_sums, means, covars):
mean, var = cls.model._build_sample_info(kern_prods=np.array(kern_prods, dtype=np.float32),
kern_sums=np.array(kern_sums, dtype=np.float32),
means=np.array(means, dtype=np.float32),
covars=np.array(covars, dtype=np.float32))
return cls.session.run([mean, var])
class TestSimpleFull(TestGaussianProcess):
@classmethod
def setUpClass(cls):
super(TestSimpleFull, cls).setUpClass()
likelihood = likelihoods.Gaussian(1.0)
kernel = [kernels.RadialBasis(input_dim=1, lengthscale=1.0, std_dev=1.0, white=0.0)]
# In most of our unit test, we will replace this value with something else.
inducing_inputs = np.array([[1.0]])
cls.model = autogp.GaussianProcess(likelihood_func=likelihood,
kernel_funcs=kernel,
inducing_inputs=inducing_inputs,
num_components=1,
diag_post=False,
num_samples=10)
cls.session.run(tf.initialize_all_variables())
def test_simple_entropy(self):
entropy = TestSimpleFull.entropy(weights=[1.0],
means=[[[1.0]]],
covars=[[[[1.0]]]])
self.assertAlmostEqual(entropy, 0.5 * (np.log(2 * np.pi) + np.log(2.0)), SIG_FIGS)
def test_small_covar_entropy(self):
entropy = TestSimpleFull.entropy(weights=[1.0],
means=[[[1.0]]],
covars=[[[[1e-10]]]])
self.assertAlmostEqual(entropy, 0.5 * (np.log(2 * np.pi) +
|
np.log(2 * 1e-20)
|
numpy.log
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.